Best Python code snippet using stestr_python
subunit_trace.py
Source:subunit_trace.py
...228 if key in item:229 if re.search(value, item[key]):230 count += 1231 return count232def get_stuck_in_progress():233 key = 'status'234 match = re.compile('^inprogress$')235 in_progress = []236 for k, v in RESULTS.items():237 for item in v:238 if key in item:239 if match.search(item[key]):240 in_progress.append(item['id'])241 return in_progress242def run_time():243 runtime = 0.0244 for k, v in RESULTS.items():245 for test in v:246 test_dur = get_duration(test['timestamps']).strip('s')247 # NOTE(toabctl): get_duration() can return an empty string248 # which leads to a ValueError when casting to float249 if test_dur:250 runtime += float(test_dur)251 return runtime252def worker_stats(worker):253 tests = RESULTS[worker]254 num_tests = len(tests)255 stop_time = tests[-1]['timestamps'][1]256 start_time = tests[0]['timestamps'][0]257 if not start_time or not stop_time:258 delta = 'N/A'259 else:260 delta = stop_time - start_time261 return num_tests, str(delta)262def print_summary(stream, elapsed_time):263 stream.write("\n======\nTotals\n======\n")264 stream.write("Ran: {} tests in {:.4f} sec.\n".format(265 count_tests('status', '.*'), total_seconds(elapsed_time)))266 stream.write(" - Passed: %s\n" % count_tests('status', '^success$'))267 stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$'))268 stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$'))269 stream.write(" - Unexpected Success: %s\n" % count_tests('status',270 '^uxsuccess$'))271 stream.write(" - Failed: %s\n" % count_tests('status', '^fail$'))272 stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time())273 # we could have no results, especially as we filter out the process-codes274 if RESULTS:275 stream.write("\n==============\nWorker Balance\n==============\n")276 for w in range(max(RESULTS.keys()) + 1):277 if w not in RESULTS:278 stream.write(279 " - WARNING: missing Worker %s!\n" % w)280 else:281 num, time = worker_stats(w)282 out_str = " - Worker {} ({} tests) => {}".format(w, num, time)283 if time.isdigit():284 out_str += 's'285 out_str += '\n'286 stream.write(out_str)287__version__ = pbr.version.VersionInfo('stestr').version_string()288def parse_args():289 parser = argparse.ArgumentParser()290 parser.add_argument('--version', action='version',291 version='%s' % __version__)292 parser.add_argument('--no-failure-debug', '-n', action='store_true',293 dest='print_failures', help='Disable printing failure '294 'debug information in realtime')295 parser.add_argument('--fails', '-f', action='store_true',296 dest='post_fails', help='Print failure debug '297 'information after the stream is proccesed')298 parser.add_argument('--failonly', action='store_true',299 dest='failonly', help="Don't print success items",300 default=(301 os.environ.get('TRACE_FAILONLY', False)302 is not False))303 parser.add_argument('--abbreviate', '-a', action='store_true',304 dest='abbreviate', help='Print one character status'305 'for each test')306 parser.add_argument('--perc-diff', '-d', action='store_true',307 dest='enable_diff',308 help="Print percent change in run time on each test ")309 parser.add_argument('--diff-threshold', '-t', dest='threshold',310 help="Threshold to use for displaying percent change "311 "from the avg run time. If one is not specified "312 "the percent change will always be displayed")313 parser.add_argument('--no-summary', action='store_true',314 help="Don't print the summary of the test run after "315 " completes")316 parser.add_argument('--color', action='store_true',317 help="Print results with colors")318 return parser.parse_args()319def trace(stdin, stdout, print_failures=False, failonly=False,320 enable_diff=False, abbreviate=False, color=False, post_fails=False,321 no_summary=False, suppress_attachments=False, all_attachments=False,322 show_binary_attachments=False):323 stream = subunit.ByteStreamToStreamResult(324 stdin, non_subunit_name='stdout')325 outcomes = testtools.StreamToDict(326 functools.partial(show_outcome, stdout,327 print_failures=print_failures,328 failonly=failonly,329 enable_diff=enable_diff,330 abbreviate=abbreviate,331 enable_color=color,332 suppress_attachments=suppress_attachments,333 all_attachments=all_attachments,334 show_binary_attachments=show_binary_attachments))335 summary = testtools.StreamSummary()336 result = testtools.CopyStreamResult([outcomes, summary])337 result = testtools.StreamResultRouter(result)338 cat = subunit.test_results.CatFiles(stdout)339 result.add_rule(cat, 'test_id', test_id=None)340 result.startTestRun()341 try:342 stream.run(result)343 finally:344 result.stopTestRun()345 start_times = []346 stop_times = []347 for worker in RESULTS:348 start_times += [349 x['timestamps'][0] for x in RESULTS[worker] if350 x['timestamps'][0] is not None]351 stop_times += [352 x['timestamps'][1] for x in RESULTS[worker] if353 x['timestamps'][1] is not None]354 if not start_times:355 print("The test run didn't actually run any tests", file=sys.stderr)356 return 1357 start_time = min(start_times)358 stop_time = max(stop_times)359 elapsed_time = stop_time - start_time360 if count_tests('status', '.*') == 0:361 print("The test run didn't actually run any tests", file=sys.stderr)362 return 1363 if post_fails:364 print_fails(stdout)365 if not no_summary:366 print_summary(stdout, elapsed_time)367 # NOTE(mtreinish): Ideally this should live in testtools streamSummary368 # this is just in place until the behavior lands there (if it ever does)369 if count_tests('status', '^success$') == 0:370 print("\nNo tests were successful during the run", file=sys.stderr)371 return 1372 in_progress = get_stuck_in_progress()373 if in_progress:374 print("\nThe following tests exited without returning a status\n"375 "and likely segfaulted or crashed Python:", file=sys.stderr)376 for test in in_progress:377 print("\n\t* %s" % test, file=sys.stderr)378 return 1379 return 0 if results.wasSuccessful(summary) else 1380def main():381 args = parse_args()382 exit(trace(sys.stdin, sys.stdout, args.print_failures, args.failonly,383 args.enable_diff, args.abbreviate, args.color, args.post_fails,384 args.no_summary))385if __name__ == '__main__':386 main()
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!