Best Python code snippet using Airtest
product_page_storer.py
Source:product_page_storer.py
1from crawler_classes import get_libraries2from crawler_classes import product_page_extractor34class store_product_pages:56 def __init__(self, save_folder_name, specials_file_name, source_file_name, find_cheap_url):7 self.save_folder_name = save_folder_name8 self.specials_file_name = specials_file_name9 self.source_file_name = source_file_name10 self.find_cheap_url = find_cheap_url11 self.initialize_review_xpath()12 self.initialize_html_base_tag()13 self.initialize_chrome_driver()14 self.initialize_save_path_location()1516 def initialize_review_xpath(self):17 self.review_xpath_1 = None18 self.review_xpath_2 = None19 self.review_xpath_3 = None2021 def initialize_html_base_tag(self):22 self.html_base_tag = None2324 def initialize_chrome_driver(self):25 self.chrome_driver = None2627 def initialize_save_path_location(self):28 self.save_path_location = None2930 def set_review_xpath(self, review_xpath_1, review_xpath_2, review_xpath_3):31 self.review_xpath_1 = review_xpath_132 self.review_xpath_2 = review_xpath_233 self.review_xpath_3 = review_xpath_33435 def set_html_base_tag(self, product_listing_page_link):36 url_scheme, url_netlock = self.get_url_scheme_and_netlock(product_listing_page_link)37 self.html_base_tag = f'<base href="{url_scheme}://{url_netlock}"/>'3839 def set_chrome_driver(self, chrome_driver):40 self.chrome_driver = chrome_driver4142 def set_save_path_location(self, save_path_location):43 self.save_path_location = save_path_location4445 def get_file_safe_name(self, product_link):46 file_safe_name = ''47 for char in product_link:48 if char.isalnum():49 file_safe_name = file_safe_name + char50 else:51 file_safe_name = file_safe_name + '-'52 return file_safe_name5354 def get_domain_name(self, url):55 url_results = get_libraries.extract(url)56 domain_name = url_results.domain57 return domain_name5859 def remove_leading_slash(self, url_part):60 return url_part[1:]6162 def get_url_path(self, url):63 url_result = get_libraries.urlparse(url)64 url_path = url_result.path65 url_path = self.remove_leading_slash(url_path)66 return url_path6768 def get_url_scheme_and_netlock(self, url):69 url_result = get_libraries.urlparse(url)70 url_scheme = url_result.scheme71 url_netlock = url_result.netloc72 return url_scheme, url_netlock7374 def get_url_path_query_fragment(self, url):75 url_result = get_libraries.urlparse(url)76 path_query_fragment = ''77 if (url_result.path != ''):78 path_query_fragment = path_query_fragment + url_result.path79 if (url_result.query != ''):80 path_query_fragment = path_query_fragment + '?'81 path_query_fragment = path_query_fragment + url_result.query82 if (url_result.fragment != ''):83 path_query_fragment = path_query_fragment + '#'84 path_query_fragment = path_query_fragment + url_result.fragment85 path_query_fragment = self.remove_leading_slash(path_query_fragment)86 return path_query_fragment8788 def get_save_path_location(self, product_listing_page_link):89 current_path = get_libraries.os.path.dirname(get_libraries.os.path.realpath(__file__))90 parent_path = get_libraries.os.path.dirname(current_path)91 child_path_first = get_libraries.os.path.join(parent_path, self.save_folder_name)92 if not get_libraries.os.path.isdir(child_path_first):93 get_libraries.os.mkdir(child_path_first)94 domain_name = self.get_domain_name(product_listing_page_link)95 child_path_second = get_libraries.os.path.join(child_path_first, domain_name)96 if not get_libraries.os.path.isdir(child_path_second):97 get_libraries.os.mkdir(child_path_second)98 unsafe_category_name = self.get_url_path_query_fragment(product_listing_page_link)99 category_name = self.get_file_safe_name(unsafe_category_name)100 final_path = get_libraries.os.path.join(child_path_second, category_name)101 if not get_libraries.os.path.isdir(final_path):102 get_libraries.os.mkdir(final_path)103 return final_path, domain_name, category_name104105 def add_html_file_extension(self, file):106 html_file = file + '.html'107 return html_file108109 def join_path_components(self, component_one, component_two):110 combined_component = get_libraries.os.path.join(component_one, component_two)111 return combined_component112113 def get_parsed_html(self, html):114 parsed_html_soup_object = get_libraries.BeautifulSoup(html, 'html.parser')115 parsed_html = str(parsed_html_soup_object)116 return parsed_html117118 def remove_old_html_file(self, path):119 if get_libraries.os.path.exists(path):120 get_libraries.os.remove(path)121122 def clean_raw_html(self, raw_html, insert_base_tag):123 html_object = get_libraries.BeautifulSoup(raw_html, 'html.parser')124 if insert_base_tag:125 html_base_tag_object = get_libraries.BeautifulSoup(self.html_base_tag, 'html.parser').base126 html_object.insert(0, html_base_tag_object)127 html = str(html_object)128 return html129130 def create_new_html_file(self, path, raw_html, insert_base_tag = True):131 html = self.clean_raw_html(raw_html, insert_base_tag)132 file = get_libraries.codecs.open(path, 'w', 'utfâ8')133 file.write(html)134 file.close()135136 def save_product_listing_page_file(self, product_listing_page_source):137 product_listing_page_source_path = self.join_path_components(self.save_path_location, self.source_file_name)138 self.remove_old_html_file(product_listing_page_source_path)139 self.create_new_html_file(product_listing_page_source_path, product_listing_page_source)140141 def get_product_html(self, product_link):142 product_page = product_page_extractor.extract_page_html(product_link, self.review_xpath_1, self.review_xpath_2,143 self.review_xpath_3, self.chrome_driver)144 product_page_html = product_page.get_html()145 return product_page_html146147 def save_product_file(self, product_link):148 product_link_path = self.get_url_path(product_link)149 product_link_path_safe_name = self.get_file_safe_name(product_link_path)150 product_link_file_name = self.add_html_file_extension(product_link_path_safe_name)151 product_path = self.join_path_components(self.save_path_location, product_link_file_name)152 self.remove_old_html_file(product_path)153 product_page_html = self.get_product_html(product_link)154 self.create_new_html_file(product_path, product_page_html)155156 def iterate_over_html_files(self, save_path_location):157 html_file_names = []158 directory = get_libraries.os.fsencode(save_path_location)159 for file in get_libraries.os.listdir(directory):160 file_name = get_libraries.os.fsdecode(file)161 if file_name.endswith('.html'):162 html_file_names.append(file_name)163 return html_file_names164165 def get_domain_and_suffix_name(self, save_path_location):166 suffix = get_libraries.Path(save_path_location)167 domain = get_libraries.Path(suffix.parent)168 domain_name = domain.name169 suffix_name = suffix.name170 return domain_name, suffix_name171172 def join_url_components(self, url_one, url_two, slash = ''):173 combined_url = get_libraries.urljoin(url_one, url_two) + slash174 return combined_url175176 def create_specials_url(self, domain_name, suffix_name, html_file_name):177 specials_url = self.join_url_components(self.find_cheap_url, domain_name, '/')178 specials_url = self.join_url_components(specials_url, suffix_name, '/')179 specials_url = self.join_url_components(specials_url, html_file_name)180 return specials_url181182 def get_specials_html(self):183 specials_html = ""184 domain_name, suffix_name = self.get_domain_and_suffix_name(self.save_path_location)185 html_file_names = self.iterate_over_html_files(self.save_path_location)186 for html_file_name in html_file_names:187 html_elements = ""188 specials_url = self.create_specials_url(domain_name, suffix_name, html_file_name)189 if html_file_name == self.specials_file_name:190 continue191 elif html_file_name == self.source_file_name:192 html_elements = f"<a href='{specials_url}'>{html_file_name}</a><br/>"193 else:194 html_elements = f"<a class ='fc_products' href='{specials_url}'>{html_file_name}</a><br/>"195 specials_html = specials_html + html_elements196 return specials_html197198 def save_specials_file(self):199 specials_path = self.join_path_components(self.save_path_location, self.specials_file_name)200 specials_html = self.get_specials_html()201 self.remove_old_html_file(specials_path)202 self.create_new_html_file(specials_path, specials_html, False)203204 def save_html_files(self, product_listing_page_link, product_listing_page_source, product_links, review_xpath_1,205 review_xpath_2, review_xpath_3, chrome_driver):206 try:207 save_path_location, domain_name, category_name = self.get_save_path_location(product_listing_page_link)208 self.set_review_xpath(review_xpath_1, review_xpath_2, review_xpath_3)209 self.set_html_base_tag(product_listing_page_link)210 self.set_chrome_driver(chrome_driver)211 self.set_save_path_location(save_path_location)212 self.save_product_listing_page_file(product_listing_page_source)213 for product_link in product_links:214 self.save_product_file(product_link)215 self.save_specials_file()216 return save_path_location, domain_name, category_name217 except Exception as exception:218 print(exception)
...
get_libraries.py
Source:get_libraries.py
...8import prov.model9import uuid10# Gets list of public libraries in Massachusetts from public libraries site and geocodes location for each one based on Google Maps. 11# For some reason, Truro Public Library didn't have a location in Google Maps, so I had to manually look it up and add it.12class get_libraries(dml.Algorithm):13 contributor = 'emilymo'14 reads = []15 writes = ['d.libs']16 17 @staticmethod18 def execute(trial = False):19 startTime = datetime.datetime.now()20 21 client = dml.pymongo.MongoClient() # TO FIX: AUTH??22 d = client.d23 24 site = 'https://publiclibraries.com/state/massachusetts/'25 pg = requests.get(site)26 sp = bs(pg.content, "lxml")...
test_Cleanup.py
Source:test_Cleanup.py
...11 self.me = os.path.dirname(os.path.realpath(__file__))12 def test_Schematic(self):13 main(["--file", self.me + "/inputs/cleanup_test01.sch", "--out", self.me + "/inputs/cleanup_test01.out.sch"])14 ef = EagleFile.from_file(self.me + "/inputs/cleanup_test01.out.sch")15 self.assertEqual(From(ef).get_libraries().count(), 6, "Wrong number of libraries")16 self.assertEqual(From(ef).get_libraries().get_packages().count(), 3, "Wrong number of packages")17 self.assertEqual(From(ef).get_libraries().get_symbols().count(), 10, "Wrong number of symbols")18 self.assertEqual(From(ef).get_libraries().get_devicesets().count(), 9, "Wrong number of devicesets")19 self.assertEqual(From(ef).get_libraries().get_devicesets().get_devices().count(), 9, "Wrong number of devices")20 def test_Board(self):21 main(["--file", self.me + "/inputs/cleanup_test01.brd", "--out", self.me + "/inputs/cleanup_test01.out.brd"])22 ef = EagleFile.from_file(self.me + "/inputs/cleanup_test01.out.brd")23 self.assertEqual(From(ef).get_libraries().count(), 1, "Wrong number of libraries")24 self.assertEqual(From(ef).get_libraries().get_packages().count(), 3, "Wrong number of packages")25 self.assertEqual(From(ef).get_libraries().get_symbols().count(), 0, "Wrong number of symbols")26 self.assertEqual(From(ef).get_libraries().get_devicesets().count(), 0, "Wrong number of devicesets")27 self.assertEqual(From(ef).get_libraries().get_devicesets().get_devices().count(), 0, "Wrong number of devices")28 29 30 def test_Library(self):31 main(["--file", self.me + "/inputs/cleanup_test01.lbr", "--out", self.me + "/inputs/cleanup_test01.out.lbr"])32 ef = EagleFile.from_file(self.me + "/inputs/cleanup_test01.out.lbr")33 self.assertEqual(From(ef).get_library().get_packages().count(), 35, "Wrong number of packages")34 self.assertEqual(From(ef).get_library().get_symbols().count(), 97, "Wrong number of symbols")35 self.assertEqual(From(ef).get_library().get_devicesets().count(),96, "Wrong number of devicesets")36 self.assertEqual(From(ef).get_library().get_devicesets().get_devices().count(), 101, "Wrong number of devices")37 ...
setup.py
Source:setup.py
...5import numpy as np6setup(7 cmdclass = {'build_ext': build_ext},8 ext_modules = [Extension("definitions", ["definitions.pyx"],9 libraries=cython_gsl.get_libraries() + ["gmp", "mpfr"],10 library_dirs=[cython_gsl.get_library_dir()],11 include_dirs=[np.get_include(), cython_gsl.get_include()],12 )]13)14setup(15 include_dirs = [cython_gsl.get_include()],16 cmdclass = {'build_ext': build_ext},17 ext_modules = [Extension("froutines", ["froutines.pyx"],18 libraries=cython_gsl.get_libraries() + ["gmp", "mpfr"],19 library_dirs=[cython_gsl.get_library_dir()],20 include_dirs=[np.get_include(), cython_gsl.get_include()],21 )]22)23setup(24 cmdclass = {'build_ext': build_ext},25 ext_modules = [Extension("approx_routines", ["approx_routines.pyx"],26 libraries=cython_gsl.get_libraries() + ["gmp", "mpfr"],27 library_dirs=[cython_gsl.get_library_dir()],28 include_dirs=[np.get_include(), cython_gsl.get_include()],29 )]30)31setup(32 cmdclass = {'build_ext': build_ext},33 ext_modules = [Extension("misc", ["misc.pyx"],34 libraries=cython_gsl.get_libraries(),35 library_dirs=[cython_gsl.get_library_dir()],36 include_dirs=[np.get_include(), cython_gsl.get_include()])37 ])38setup(39 cmdclass = {'build_ext': build_ext},40 ext_modules = [Extension("eaamodel", ["eaamodel.pyx"],41 libraries=cython_gsl.get_libraries() + ["gmp", "mpfr"],42 library_dirs=[cython_gsl.get_library_dir()],43 include_dirs=[np.get_include(), cython_gsl.get_include()],44 )]...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!