Best Python code snippet using slash
benchmark.py
Source:benchmark.py
...21 """22 for tool in conf.Tools:23 config = ConfigParser.ConfigParser()24 config.read(os.path.join(BASE_DIRECTORY, "solutions", tool, "solution.ini"))25 set_working_directory("solutions", tool)26 if skip_tests:27 subprocess.check_call(config.get('build', 'skipTests'), shell=True)28 else:29 subprocess.check_call(config.get('build', 'default'), shell=True)30def benchmark(conf, dbuser, dbpwd):31 """32 Runs measurements33 """34 header = os.path.join(BASE_DIRECTORY, "output", "header.csv")35 result_file = os.path.join(BASE_DIRECTORY, "output", "res_"+ FILE_NAME + ".csv")36 if os.path.exists(result_file):37 os.remove(result_file)38 shutil.copy(header, result_file)39 os.environ['MODE'] = conf.Mode40 os.environ['RUNS'] = str(conf.Runs)41 os.environ['DBUSER'] = dbuser42 os.environ['DBPSW'] = dbpwd43 for tool in conf.Tools:44 config = ConfigParser.ConfigParser()45 config.read(os.path.join(BASE_DIRECTORY, "solutions", tool, "solution.ini"))46 set_working_directory("solutions", tool)47 os.environ['TOOL'] = tool48 for iScenario, scenario in enumerate(conf.Scenario):49 os.environ['SCENARIO'] = scenario50 procedureCall = conf.ProcedureCall51 os.environ['PROCEDURECALL'] = procedureCall52 try:53 for iTestCase, testCase in enumerate(conf.TestCases):54 user, role = testCase.split(',',1);55 if user == 'lid{0}':56 user = user.format(int(scenario[3:]))57 os.environ['USER'] = user58 else:59 os.environ['USER'] = user60 os.environ['ROLE'] = role61 for r in range(0, conf.Runs):62 os.environ['RUNINDEX'] = str(r)63 print("Running benchmark: tool = {0}, runIndex = {1}, scenario = {2}, procedure = {3}, user = {4}, role = {5}".format(tool, str(r), scenario, procedureCall, user, role))64 65 # instead of subprocess.check_output()66 # to enforce timeout before Python 3.7.567 # and kill sub-processes to avoid interference68 # https://stackoverflow.com/a/3695542069 with subprocess.Popen(config.get('run', 'cmd'), shell=True, stdout=subprocess.PIPE,70 start_new_session=True) as process:71 try:72 stdout, stderr = process.communicate(timeout=conf.Timeout)73 return_code = process.poll()74 if return_code:75 raise subprocess.CalledProcessError(return_code, process.args,76 output=stdout, stderr=stderr)77 except subprocess.TimeoutExpired:78 os.killpg(process.pid, signal.SIGINT) # send signal to the process group79 raise80 with open(result_file, "ab") as file:81 file.write(stdout)82 except subprocess.TimeoutExpired as e:83 print("Program reached the timeout set ({0} seconds). The command we executed was '{1}'".format(e.timeout, e.cmd))84def clean_dir(*path):85 dir = os.path.join(BASE_DIRECTORY, *path)86 if os.path.exists(dir):87 shutil.rmtree(dir)88 os.mkdir(dir)89def set_working_directory(*path):90 dir = os.path.join(BASE_DIRECTORY, *path)91 os.chdir(dir)92def visualize():93 """94 Visualizes the benchmark results95 """96 clean_dir("diagrams")97 set_working_directory("reporting2")98 subprocess.call(["Rscript", "-e", "rmarkdown::render('report.Rmd', output_format=rmarkdown::pdf_document())"])99def check_results():100 """101 Checks the benchmark results102 """103 clean_dir("results")104 set_working_directory("reporting")105 subprocess.call(["Rscript", "check_results.R"])106if __name__ == "__main__":107 parser = argparse.ArgumentParser()108 parser.add_argument("-b", "--build",109 help="build the project",110 action="store_true")111 parser.add_argument("-m", "--measure",112 help="run the benchmark",113 action="store_true")114 parser.add_argument("-s", "--skip-tests",115 help="skip tests",116 action="store_true")117 parser.add_argument("-v", "--visualize",118 help="create visualizations",119 action="store_true")120 # parser.add_argument("-c", "--check",121 # help="check results",122 # action="store_true")123 parser.add_argument("-t", "--test",124 help="run test",125 action="store_true")126 parser.add_argument("-d", "--debug",127 help="set debug to true",128 action="store_true")129 parser.add_argument('-c', '--testcase', 130 action='store', 131 dest='testcase', 132 help='The testcase filename.')133 parser.add_argument('-u', '--database-user', 134 action='store', 135 dest='username', 136 help='The database username.')137 parser.add_argument('-p', '--database-pwd', 138 action='store', 139 dest='password', 140 help='The database password.')141 args = parser.parse_args()142 FILE_NAME = args.testcase143 set_working_directory("config")144 with open("config_"+FILE_NAME+".json", "r") as config_file:145 config = json.load(config_file, object_hook=JSONObject)146 # if there are no args, execute a full sequence147 # with the test and the visualization/reporting148 no_args = all(not val for val in vars(args).values())149 if args.debug:150 os.environ['DEBUG'] = 'true'151 if args.build or args.test or no_args:152 build(config, args.skip_tests and not args.test)153 if args.measure or no_args:154 benchmark(config, args.username, args.password)155 if args.visualize or no_args:156 visualize()157 # if args.check or no_args:...
run.py
Source:run.py
...21 """22 for tool in conf.Tools:23 config = ConfigParser.ConfigParser()24 config.read(os.path.join(BASE_DIRECTORY, "solutions", tool, "solution.ini"))25 set_working_directory("solutions", tool)26 if skip_tests:27 subprocess.check_call(config.get('build', 'skipTests'), shell=True)28 else:29 subprocess.check_call(config.get('build', 'default'), shell=True)30def generate(conf):31 """32 Generates additional change sequences33 """34 clean_dir("changes")35 set_working_directory("generator")36 generator_bin = os.path.join("bin", "Release", "Generator.exe")37 models_dir = os.path.join(BASE_DIRECTORY, "models")38 for change_set in conf.ChangeSets:39 full_change_path = os.path.abspath(os.path.join(BASE_DIRECTORY, "changes", change_set))40 subprocess.check_call([generator_bin,41 "--cim", os.path.join(models_dir, "CIM_DCIM.xmi"),42 "--cosem", os.path.join(models_dir, "COSEM.xmi"),43 "--substation", os.path.join(models_dir, "Substandard.xmi"),44 "--cimOut", os.path.join(full_change_path, "CIM_DCIM"),45 "--cosemOut", os.path.join(full_change_path, "COSEM"),46 "--substationOut", os.path.join(full_change_path, "Substandard"),47 "-n", str(conf.SequenceLength), "-d", str(conf.Sequences)])48def benchmark(conf):49 """50 Runs measurements51 """52 header = os.path.join(BASE_DIRECTORY, "output", "header.csv")53 result_file = os.path.join(BASE_DIRECTORY, "output", "output.csv")54 if os.path.exists(result_file):55 os.remove(result_file)56 shutil.copy(header, result_file)57 os.environ['Sequences'] = str(conf.Sequences)58 os.environ['SequenceLength'] = str(conf.SequenceLength)59 os.environ['Runs'] = str(conf.Runs)60 for r in range(0, conf.Runs):61 os.environ['RunIndex'] = str(r)62 for tool in conf.Tools:63 config = ConfigParser.ConfigParser()64 config.read(os.path.join(BASE_DIRECTORY, "solutions", tool, "solution.ini"))65 set_working_directory("solutions", tool)66 os.environ['Tool'] = tool67 for change_set in conf.ChangeSets:68 full_change_path = os.path.abspath(os.path.join(BASE_DIRECTORY, "changes", change_set))69 os.environ['ChangeSet'] = change_set70 os.environ['ChangePath'] = full_change_path71 for view in conf.Views:72 os.environ['View'] = view73 print("Running benchmark: tool = " + tool + ", change set = " + change_set +74 ", view = " + view)75 try:76 output = subprocess.check_output(config.get('run', view), shell=True)77 with open(result_file, "ab") as file:78 file.write(output)79 except CalledProcessError as e:80 print("Program exited with error")81def clean_dir(*path):82 dir = os.path.join(BASE_DIRECTORY, *path)83 if os.path.exists(dir):84 shutil.rmtree(dir)85 os.mkdir(dir)86def set_working_directory(*path):87 dir = os.path.join(BASE_DIRECTORY, *path)88 os.chdir(dir)89def visualize():90 """91 Visualizes the benchmark results92 """93 clean_dir("diagrams")94 set_working_directory("reporting")95 subprocess.call(["Rscript", "visualize.R", os.path.join(BASE_DIRECTORY, "config", "reporting.json")])96def extract_results():97 """98 Extracts the benchmark results99 """100 clean_dir("results")101 set_working_directory("reporting")102 subprocess.call(["Rscript", "extract_results.R"])103if __name__ == "__main__":104 parser = argparse.ArgumentParser()105 parser.add_argument("-b", "--build",106 help="build the project",107 action="store_true")108 parser.add_argument("-g", "--generate",109 help="generate models",110 action="store_true")111 parser.add_argument("-m", "--measure",112 help="run the benchmark",113 action="store_true")114 parser.add_argument("-s", "--skip-tests",115 help="skip JUNIT tests",116 action="store_true")117 parser.add_argument("-v", "--visualize",118 help="create visualizations",119 action="store_true")120 parser.add_argument("-e", "--extract",121 help="extract results",122 action="store_true")123 parser.add_argument("-t", "--test",124 help="run test",125 action="store_true")126 args = parser.parse_args()127 set_working_directory("config")128 with open("config.json", "r") as config_file:129 config = json.load(config_file, object_hook = JSONObject)130 if args.build:131 build(config, args.skip_tests)132 if args.generate:133 generate(config)134 if args.measure:135 benchmark(config)136 if args.test:137 build(config, False)138 if args.visualize:139 visualize()140 if args.extract:141 extract_results()...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!