Best Python code snippet using lemoncheesecake
qqzone.py
Source:qqzone.py
1from selenium import webdriver2import time,random,requests3import re4# ç¬åQQæ¥å¿5# 以ä¸è·åç®å½é¡µä¸çæææ¥å¿URL并åå
¥æ件6br = webdriver.Firefox()7br.get('https://user.qzone.qq.com/46420820/infocenter')8time.sleep(5)9br.switch_to_frame('login_frame')10time.sleep(random.randint(2,6))11br.find_element_by_id('switcher_plogin').click()12time.sleep(random.randint(2,6))13br.find_element_by_id('u').send_keys('46420820')14time.sleep(random.randint(2,6))15br.find_element_by_id('p').send_keys('87937339')16time.sleep(random.randint(2,6))17br.find_element_by_id('login_button').click()18time.sleep(random.randint(2,6))19br.switch_to_default_content()20success = input('pause')21br.find_element_by_xpath("//a[@title='æ¥å¿']").click()22time.sleep(random.randint(2,6))23br.switch_to_frame('tblog')24time.sleep(random.randint(2,6))25log_urls = []26while True:27 time.sleep(random.randint(1,2))28 logs = br.find_element_by_id('listArea').find_elements_by_tag_name('a')29 for log in logs:30 url = log.get_attribute('href')31 if url.startswith('http'):32 log_urls.append(url)33 try:34 br.find_element_by_partial_link_text('ä¸ä¸é¡µ').click()35 except:36 break37log_urls = list(set(log_urls))38with open('url.txt','w') as f:39 for url in log_urls:40 f.write(url)41 f.write('\n')42# å¤çæ¥å¿æ é¢ä¸ä¸ç¬¦åæ件åå½åçå符43def validateTitle(title):44 rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'45 new_title = re.sub(rstr, "_", title) # æ¿æ¢ä¸ºä¸å线46 return new_title47# # æå¼ä¸ä¸æ¥ä¿åçæ¥å¿URLææ¬æ件48# with open('url.txt','r') as f:49# log_urls = f.read()50# log_urls = log_urls.split('\n')51# log_urls = [u.strip() for u in log_urls if u.strip()]52# br = webdriver.Firefox()53failed_urls = []54# éæ¡è·åæ¥å¿çå
容ï¼ææ¬æ¾å¨ææ¬æ件ä¸ï¼å¾çåç¬åå¨55for index,url in enumerate(log_urls):56 time.sleep(random.randint(2,6))57 br.get(url)58 if index == 0:59 a = input('pause:')60 br.switch_to_frame('tblog')61 title = br.find_element_by_class_name('blog_tit_detail').text62 content_eles = br.find_element_by_id('blogDetailDiv')63 text = content_eles.text64 try:65 with open(validateTitle(title)+'.txt','w',encoding='utf-8') as f:66 f.write(text)67 except:68 print(url,'failed!')69 failed_urls.append(url)70 # é个å¾çè·å71 imgs = content_eles.find_elements_by_tag_name('img')72 for index,img in enumerate(imgs):73 time.sleep(random.randint(2,6))74 img_url = img.get_attribute('src')75 if img_url and img_url.startswith('http'):76 try:77 with open(''.join((validateTitle(title),'_',str(index),'.jpg')),'wb') as f:78 f.write(requests.get(img_url).content)79 except:80 print('img failed!')81with open('failed_urls.txt', 'w',encoding='utf-8') as f:82 for u in failed_urls:83 f.write(url)...
download_log.py
Source:download_log.py
...8 Get logfile URLs from html log file9 Args:10 filepath(str): html log filepath (You can download from Tenho site.)11 Returns:12 log_urls(list): list of log urls13 """14 log_cols = ["start_time", "play_time", "game_type", "url", "player"]15 df = pd.read_csv(filepath, sep='|', names=log_cols)16 # å麻ã«çµãè¾¼ã17 df = df[df.game_type.str.startswith(' å')]18 # logã®ãã¦ã³ãã¼ãå
URLãåãæã19 log_urls = df["url"].str[10:-9]20 # ãã¦ã³ãã¼ãã§ããå½¢å¼ã«URLãå¤æï¼ãã®ã¾ã¾ã ã¨è¦³æ¦ãã¼ã¸ã®ãªã³ã¯ã«ãªã£ã¦ããï¼21 log_urls = log_urls.str.replace("\?log=", "log/?")22 # ãªã¹ãå½¢å¼ã«å¤æ23 log_urls = log_urls.to_list()24 return log_urls25def download_one_log(url, save_path):26 with open(save_path, "wb") as f:...
log_analyzer.py
Source:log_analyzer.py
1#!/usr/bin/env python2# -*- coding: utf-8 -*-34from statistics import median567def analyze_log(log_urls, max_errors_percent, report_size):8 """9 Analyze parsed log_urls and prepare report_list10 """11 errors_percent = calc_errors_percent(log_urls)12 if errors_percent > max_errors_percent:13 msg = "Log errors limit exceeded. " \14 "Errors percent: {}, " \15 "max errors percent (from config): {}"16 raise Exception(17 msg.format(errors_percent, max_errors_percent)18 )1920 report_list = prepare_report_list(log_urls, report_size)21 return report_list222324def calc_log_rows_count(log_urls):25 good_rows_count = sum(len(u) for u in log_urls["urls_times"].values())26 return log_urls["errors"] + good_rows_count272829def calc_sum_request_time(log_urls):30 return sum(31 sum(url_times) for url_times in log_urls["urls_times"].values()32 )333435def calc_errors_percent(log_urls):36 errors_percent = log_urls["errors"] * 100 / calc_log_rows_count(log_urls)37 errors_percent = round(errors_percent, 3)38 return errors_percent394041def prepare_report_list(log_urls, report_size):42 urls_report = []4344 log_rows_count = calc_log_rows_count(log_urls)45 sum_request_time = calc_sum_request_time(log_urls)4647 for url, url_times in log_urls["urls_times"].items():48 url_info = dict()4950 url_info["url"] = url51 url_info["count"] = len(url_times)52 url_info["count_perc"] = round(53 url_info["count"] * 100 / log_rows_count, 354 )5556 url_info["time_sum"] = round(sum(url_times), 3)57 url_info["time_perc"] = round(58 url_info["time_sum"] * 100 / sum_request_time, 359 )60 url_info["time_avg"] = round(61 url_info["time_sum"] / url_info["count"], 362 )63 url_info["time_max"] = round(max(url_times), 3)64 url_info["time_med"] = round(median(url_times), 3)6566 urls_report.append(url_info)6768 urls_report = sorted(69 urls_report, key=lambda u: u["time_sum"], reverse=True70 )71 urls_report = urls_report[:report_size]72
...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!