Best Python code snippet using tempest_python
Get_LCSH_VIAF.py
Source:Get_LCSH_VIAF.py
...11import sys12from threading import *13from time import sleep1415def test_web_error(url):16 '''17 Given a url, this function tests 404, 200 error.18 Return 0: broken link19 Return 1: link works20 '''21 try:22 conn = urllib.request.urlopen(url)23 except urllib.error.HTTPError as e:24 return 025 except urllib.error.URLError as e:26 return 027 else:28 return 12930def test_html_error(item_folder_name, oclc_number):31 '''32 This functions tests if the RDF file has HTML error.33 If no: return None34 If yes: return the OCLC number35 '''36 with open(item_folder_name + '/' + oclc_number + '.rdf', 'r', encoding="utf-8") as f:37 data = f.read()38 if data[:6] == "<html>":39 return oclc_number40 else:41 return None4243def read_oclc_number_txt(filename):44 '''45 This function reads a .txt (oclc number), returns a list of 46 strings of OCLC numbers47 '''48 with open(filename) as f:49 content = f.readlines()50 # remove whitespace characters like `\n` at the end of each line51 content = [x.strip() for x in content] 52 return content535455def get_oclc_records(filename):56 '''57 This function reads a .txt file and returns a dictionary with key as the 58 OCLC number and values as a dictionary. 5960 The output is the input of 61 chunk_oclc_dictionary(dic, SIZE=6).62 '''63 oclc_number_list = read_oclc_number_txt(filename)64 d = {}65 for i in oclc_number_list:66 d[i] = {}67 return d6869def chunk_oclc_dictionary(dic, SIZE=6):70 '''71 This function splits the dictionary we get from get_oclc_records into72 a list of 6 dictionaries (6 threads)73 '''74 i = itertools.cycle(range(SIZE))75 split = [dict() for _ in range(SIZE)]76 for k, v in dic.items():77 split[next(i)][k] = v78 return split7980def download_rdf_per_dic(dic, item_folder_name, all_item_names):81 '''82 Download the rdf of all oclc numbers in the input dictionary83 with timeout function8485 Reference: https://stackoverflow.com/questions/32763720/timeout-a-file86 -download-with-python-urllib87 '''8889 initial_download_url = "http://www.worldcat.org/oclc/"90 for key in dic:91 if key not in all_item_names:92 source_url = initial_download_url + key + ".rdf"93 local_path = item_folder_name + "/" +key + ".rdf"94 # Make the actual request, set the timeout for no data to 10 seconds 95 # and enable streaming responses so we don't have to keep the large 96 # files in memory97 request = requests.get(source_url, timeout=10, stream=True)98 # Open the output file and make sure we write in binary mode99 with open(local_path, 'wb') as fh:100 # Walk through the request response in chunks of 1024 * 1024 101 # bytes, so 1MiB102 for chunk in request.iter_content(1024 * 1024):103 # Write the chunk to the file104 fh.write(chunk)105 all_item_names.append(key)106 sleep(1)107108109def download_all_rdf(l, item_folder_name):110 '''111 Given a list of all oclc numbers, this function download it dictionary 112 by dictionary and save the files in the folder user provided.113 '''114 pattern = glob.glob(item_folder_name + "/*.rdf")115 all_item_names = [i.lstrip("RDF\\").rstrip(".rdf") for i in pattern]116 thread_pool = []117 for i in range(len(l)):118 thread_pool.append(Thread(target=download_rdf_per_dic, args=(l[i],item_folder_name, all_item_names, )))119 for i in thread_pool:120 i.start()121 for i in thread_pool:122 i.join()123124def parse_one_rdf(oclc_number, all_subs, item_folder_name, sub_folder_name,125 c_report, s_report):126 '''127 Given an OCLC number (string), this function return its url of128 creator/contributor/subject LCSH, if any.129 Input:130 oclc_number: a string of digits131 all_subs: the names of all subject .rdf files we have already132 downloaded133 sub_folder_name: the address of the folder storing subject .rdf134 item_folder_name: the address of the folder storing item.rdf135 c_report: boolean, save report of creator/contributor or not136 s_report: boolean, save report of subject or not137 output:138 dic: a dictionary, key is oclc number, value is a dictionary, which has139 key as the one of the value 'creator'/ 'contributor'/'subject'140 and value as either ['type', linked data of creator/contributor] or141 a list of subjects142 one_sub_report:a report of subject that will later be written as a143 .txt later, a list of strings144 one_cre_report:a report of creator that will later be written as a145 .txt later, a list of strings146 one_con_report:a report of contributor that will later be written as a147 .txt later, a list of strings148 '''149 dic = {}150 graph = rdflib.Graph()151 graph.open("store", create=True)152 graph.parse(item_folder_name + "/" + oclc_number + '.rdf')153 record_url = "http://www.worldcat.org/oclc/" + oclc_number154155 # get_creator156 cre_qres = graph.query(157 """SELECT ?creator158 WHERE {159 ?record_url schema:creator ?creator .160 }""")161 if cre_qres:162 dic, one_cre_report = c_dictionary_get(oclc_number, cre_qres,163 "creator", dic, c_report)164 else: # if c_report == False165 one_cre_report = []166167 # get_contributor168 con_qres = graph.query(169 """SELECT ?contributor170 WHERE {171 ?record_url schema:contributor ?contributor .172 }""")173 if con_qres:174 dic, one_con_report = c_dictionary_get(oclc_number, con_qres,175 "contributor", dic, c_report)176 else:177 one_con_report = []178179 # get_subject180 sub_qres = graph.query(181 """SELECT ?sub182 WHERE {183 ?record_url schema:about ?sub .184 }""")185 if sub_qres:186 dic, all_subs, one_sub_report = s_dictionary_get(sub_qres,187 oclc_number, dic, all_subs, sub_folder_name, s_report)188 else:189 one_sub_report = []190191 return dic, all_subs, one_sub_report, one_cre_report, one_con_report192193def c_dictionary_get(oclc_number, query_result, key, dic, c_report):194 '''195 Get the VIAF link and type of the creator/contributor.196 Input:197 oclc number: a string of digits198 query_result: the SPARQL query result from the function, parse_one_rdf199 key: 'creator' or 'contributor'200 dic: a dictionary, key is oclc number, value is a dictionary, which has201 key as the one of the value 'creator'/ 'contributor'/'subject'202 and value as either ['type', linked data of creator/contributor] or203 a list of subjects204 c_report: boolean, save report of creator/contributor or not205 Output:206 dic:a dictionary, key is oclc number, value is a dictionary, which has207 key as the one of the value 'creator'/ 'contributor'/'subject'208 and value as either ['type', linked data of creator/contributor] or209 a list of subjects210 report: a report that will later be written as a .txt later, a list of211 strings212 '''213 report = []214 if c_report: # for report215 report.append("****************************************************")216 l1 = "For OCLC number " + oclc_number + " :"217 report.append(l1)218219 for row, in query_result:220 link = str(row)221 # talked with MJ on 3/5, good enough linked data222 if 'http://experiment.worldcat.org' in link:223 if c_report: # for report224 report.append("Experiment in the URL, pass")225226 else:227 if test_web_error(link) == 0: # link not works228 if c_report: # for report229 l2 = 'The link is invalid ' + link230 report.append(l2)231 pass232 else:233 if c_report: # for report234 l3 = 'Start tracking the following subject URLs in linked data ' + link235 report.append(l3)236237 graph = rdflib.Graph()238 graph.open("store", create=True)239 graph.parse('RDF/' + oclc_number + '.rdf')240 # Build and execute the query241 q_pre = sparql.prepareQuery((242 """SELECT ?value243 WHERE {244 ?viaf rdf:type ?value .245 }"""))246 viaf_url = rdflib.URIRef(str(row))247 qres_type = graph.query(q_pre, initBindings={'viaf': viaf_url})248249 for my_type, in qres_type:250 viaf_type = str(my_type)251252 if c_report: # for report253 l4 = 'Type of the link, ' + link + ' is ' + viaf_type254 report.append(l4)255 if key not in dic:256 dic[key] = [[link, viaf_type]]257 else:258 dic[key].append([link, viaf_type])259 return dic, report260261def s_dictionary_get(query_result, oclc_number, dic, all_subs, sub_folder_name,262 s_report):263 '''264 Get the subject linked data, including LCSHs, geonames, and LC name265 authorities. Because we need to download FAST.rdf to query the LCSHs,266 we use all_subs to keep track of the name of FAST files we have downloaded267 Input:268 query_result: the SPARQL query result from the function, parse_one_rdf269 oclc number: a string of digits270 key: 'creator' or 'contributor'271 dic: a dictionary, key is oclc number, value is a dictionary, which has272 key as the one of the value 'creator'/ 'contributor'/'subject'273 and value as either ['type', linked data of creator/contributor] or274 a list of subjects275 all_subs: a list of all the names of downloaded subject.rdf276 sub_folder_name: the address of the folder storing subject .rdf277 s_report: boolean, save report of subject or not278 Output:279 dic:a dictionary, key is oclc number, value is a dictionary, which has280 key as the one of the value 'creator'/ 'contributor'/'subject'281 and value as either ['type', linked data of creator/contributor] or282 a list of subjects283 all_subs: a list of all the names of downloaded subject.rdf284 report: a report that will later be written as a .txt later, a list of285 strings286 '''287 r_lst = [] # a list of all subject linked data for one OCLC288 report = [] # a list of strings289290 if s_report: # for report291 report.append("*****************************************************")292 l0 = "For OCLC number " + oclc_number + " :"293 report.append(l0)294295 for link, in query_result:296 sub_ori = str(link)297 if s_report:298 report.append("")299 l2 = "Start tracking the following subject URLs " + str(link)300 report.append(l2)301302 if 'http://experiment.worldcat.org' in sub_ori or 'http://dewey.info' in sub_ori:303 if s_report: # for report304 l8 = "experiment or dewey is in the linked data " + sub_ori + " , PASS"305 report.append(l8)306 pass307308 else:309 if test_web_error(sub_ori) == 0: # test if link is valid310 if s_report:311 l1 = 'The URL is broken, PASS'312 report.append(l1)313 pass314315 else:316 # we don't download work id files317 if "http://www.worldcat.org/oclc/" in sub_ori:318 if s_report: # for report319 report.append("www.worldcat.org/oclc/ found in the link, pass")320321 elif "http://id.loc.gov/authorities/subjects" in sub_ori:322 if sub_ori not in r_lst:323 r_lst.append(sub_ori)
...
test_container_staticweb.py
Source:test_container_staticweb.py
...87 self.assertIn(self.object_name, body)88 css = '<link rel="stylesheet" type="text/css" href="listings.css" />'89 self.assertIn(css, body)90 @test.attr('gate')91 def test_web_error(self):92 headers = {'web-listings': 'true',93 'web-error': self.object_name}94 self.container_client.update_container_metadata(95 self.container_name, metadata=headers)96 # Create object to return when requested object not found97 object_name_404 = "404" + self.object_name98 object_data_404 = data_utils.arbitrary_string()99 self.object_client.create_object(self.container_name,100 object_name_404,101 object_data_404)102 # Request non-existing object103 resp, body = self.custom_object_client.get_object(self.container_name,104 "notexisting")105 self.assertEqual(resp['status'], '404')...
test_02_errors.py
Source:test_02_errors.py
...18 assert 'deliberate' in str(excinfo.value)19 def test_cli_error_2(self, cli):20 result = cli.invoke('error', catch_exceptions=True)21 assert result.exception22 def test_web_error(self, web):23 with pytest.raises(Exception) as excinfo:24 # TODO: Show what this looks like when not caught25 web.get('/error')26 assert 'deliberate' in str(excinfo.value)27 def test_web_error2(self):28 app = flog.app.create_app()29 web = app.test_client()30 resp = web.get('/error')...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!