Best Python code snippet using selene_python
acv_xml_parser.py
Source:acv_xml_parser.py
1import xml.etree.ElementTree as ET2import csv3import os4reports_folder = "/path/to/reports/"5csv_fields = ["package_name", "instruction_coverage_percent", "method_coverage_percent", "class_coverage_percent", "total_instr", "total_method", "total_class"]6csv_rows = []7coverage_dict = {8 "covered" : 0,9 "missed" : 0,10 "covered_total" : 0,11 "missed_total" : 0,12}13def increment_cov_dict(cov_dict):14 cov_dict["covered_total"] += cov_dict["covered"]15 cov_dict["missed_total"] += cov_dict["missed"]16 cov_dict["covered"] = 017 cov_dict["missed"] = 018def get_coverage(package_name, dir_name):19 instr = coverage_dict.copy()20 method = coverage_dict.copy()21 tree = ET.parse(f"{reports_folder}{dir_name}/{package_name}")22 root = tree.getroot()23 class_missed = 024 class_miss = 025 class_total = 026 for package in root.findall("package"):27 for cl4ss in package.findall("class"):28 class_total += 129 for counter in cl4ss.findall("counter"):30 if counter.get("type") == "INSTRUCTION":31 instr["covered"] += int(counter.get("covered"))32 instr["missed"] += int(counter.get("missed"))33 if counter.get("type") == "METHOD":34 method["covered"] += int(counter.get("covered"))35 method["missed"] += int(counter.get("missed"))36 class_miss += int(counter.get("covered"))37 if class_miss == 0:38 class_missed += 139 class_miss = 040 increment_cov_dict(instr)41 increment_cov_dict(method)42 total_instr = instr["covered_total"] + instr["missed_total"]43 total_method = method["covered_total"] + method["missed_total"]44 csv_rows.append([package_name,45 round((instr["covered_total"] / total_instr) * 100, 3),46 round((method["covered_total"] / total_method) * 100, 3),47 round(((class_total - class_missed) / class_total) * 100, 3),48 total_instr,49 total_method,50 class_total])51if __name__ == "__main__":52 for dirname in os.listdir(reports_folder):53 if os.path.isdir(reports_folder + dirname):54 for filename in os.listdir(reports_folder + dirname):55 get_coverage(filename, dirname)56 with open(dirname, 'w') as f:57 write = csv.writer(f)58 write.writerow(csv_fields)59 write.writerows(csv_rows)...
load_analyse_data_module.py
Source:load_analyse_data_module.py
1"""2------------------------------------------------------------------------3This module is responsible for loading, analysing and splitting the data4------------------------------------------------------------------------5"""67import os8import matplotlib.pyplot as plt9from utils_classes import FoldersUtils, DataUtils, TimeSeriesPlots10from reports_class import StatsReports11import config12import warnings13warnings.filterwarnings("ignore")1415# Extract the generated paths by the create_dirs_module.py16sub_dirs = FoldersUtils.unpickle_file('sub_dirs_list')17data_folder = sub_dirs.get('Data')18models_folder = sub_dirs.get('Models')19graphics_folder = sub_dirs.get('Graphics')20reports_folder = sub_dirs.get('Reports')2122def main():23 """24 Functionalities to be executed by the module.25 """26 # Loads dataset inside the "Data" folder27 DATASET = DataUtils.get_data_file(folder = data_folder)2829 # Checks any missing dates or values and if finds any, fills30 # the dates and then the missing values through linear interpolation31 print("\n> Checking any missing dates or values...")32 DATASET = DataUtils.check_missing_dates_and_values(DATASET)3334 print(35 f'\n> Generating a report with descriptive statistics of the time series in {reports_folder}')36 StatsReports.general_stats(DATASET, report_name='0. TS descriptive stats', out_folder=reports_folder)3738 print(f'\n> Saving the time plot to {graphics_folder}')39 TimeSeriesPlots.time_plot(DATASET, out_folder=graphics_folder)4041 print(f'> Saving the ACF and PACF plots to {graphics_folder}')42 TimeSeriesPlots.acf_plot(DATASET, 35, out_folder=graphics_folder)43 TimeSeriesPlots.pacf_plot(DATASET, 35, out_folder=graphics_folder)4445 print(f'\n> Creating a KPSS test report in {reports_folder}')46 StatsReports.kpss_(DATASET, significance=0.05, report_name='1. KPSS report', out_folder=reports_folder)4748 print('\n> Spliting the time series dataset in a training and a test set based on the split point provided in the config.py file')49 train, test = DataUtils.train_test_split(DATASET, split_point=config.SPLIT_POINT)5051 DataUtils.train_test_to_csv(train, test, out_folder=data_folder) 52 print(f'> Train and test sets saved to {data_folder}')535455if __name__ == '__main__':5657 main()5859
...
biomodel_reports.py
Source:biomodel_reports.py
1"""2Creates SBML reports with sbmlutils for all curated3models in the latest biomodel release.4Models are 31th Biomodels Release5https://www.ebi.ac.uk/biomodels/content/news/biomodels-release-26th-june-20176"""7import os8from pprint import pprint9from sbmlutils.report import sbmlreport10def model_reports(biomodels_folder, reports_folder):11 """ Create sbmlreports for all biomodels.12 :return:13 """14 #if not os.path.exists(reports_folder):15 # os.mkdir(reports_folder)16 # get all SBML files17 sbml_paths = []18 for f in os.listdir(biomodels_folder):19 if f.endswith('.xml'):20 f_path = os.path.join(biomodels_folder, f)21 if os.path.isfile(f_path):22 sbml_paths.append(f_path)23 pprint(sbml_paths)24 sbmlreport.create_reports(sorted(sbml_paths)[:10], output_dir=reports_folder,25 validate=False)26if __name__ == "__main__":27 biomodels_folder = "/home/mkoenig/biomodels/releases/R31_2017-06-26/curated"28 reports_folder = "/home/mkoenig/biomodels/reports"29 model_reports(biomodels_folder=biomodels_folder,...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!