Best Python code snippet using locust
test_results.py
Source:test_results.py
1# Software License Agreement (BSD License)2#3# Copyright (c) 2012, Willow Garage, Inc.4# All rights reserved.5#6# Redistribution and use in source and binary forms, with or without7# modification, are permitted provided that the following conditions8# are met:9#10# * Redistributions of source code must retain the above copyright11# notice, this list of conditions and the following disclaimer.12# * Redistributions in binary form must reproduce the above13# copyright notice, this list of conditions and the following14# disclaimer in the documentation and/or other materials provided15# with the distribution.16# * Neither the name of Willow Garage, Inc. nor the names of its17# contributors may be used to endorse or promote products derived18# from this software without specific prior written permission.19#20# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS21# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT22# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS23# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE24# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,25# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,26# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;27# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER28# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT29# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN30# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE31# POSSIBILITY OF SUCH DAMAGE.32from __future__ import print_function33import errno34import os35import sys36from xml.etree.ElementTree import ElementTree, ParseError37from catkin.tidy_xml import tidy_xml38def remove_junit_result(filename):39 # if result file exists remove it before test execution40 if os.path.exists(filename):41 os.remove(filename)42 # if placeholder (indicating previous failure) exists remove it before test execution43 missing_filename = _get_missing_junit_result_filename(filename)44 if os.path.exists(missing_filename):45 os.remove(missing_filename)46def ensure_junit_result_exist(filename):47 if os.path.exists(filename):48 # if result file exists ensure that it contains valid xml49 tree = None50 try:51 tree = ElementTree(None, filename)52 except ParseError:53 # print('Invalid XML in result file "%s"' % filename)54 tidy_xml(filename)55 try:56 tree = ElementTree(None, filename)57 except ParseError as e:58 print("Invalid XML in result file '%s' (even after trying to tidy it): %s " % (filename, str(e)), file=sys.stderr)59 return False60 if tree:61 _, num_errors, num_failures = read_junit(filename)62 if num_errors or num_failures:63 return False64 else:65 # if result file does not exist create placeholder which indicates failure66 missing_filename = _get_missing_junit_result_filename(filename)67 print("Cannot find results, writing failure results to '%s'" % missing_filename, file=sys.stderr)68 # create folder if necessary69 if not os.path.exists(os.path.dirname(filename)):70 try:71 os.makedirs(os.path.dirname(filename))72 except OSError as e:73 # catch case where folder has been created in the mean time74 if e.errno != errno.EEXIST:75 raise76 with open(missing_filename, 'w') as f:77 data = {'test': os.path.basename(filename), 'test_file': filename}78 f.write('''<?xml version="1.0" encoding="UTF-8"?>79<testsuite tests="1" failures="1" time="1" errors="0" name="%(test)s">80 <testcase name="test_ran" status="run" time="1" classname="Results">81 <failure message="Unable to find test results for %(test)s, test did not run.\nExpected results in %(test_file)s" type=""/>82 </testcase>83</testsuite>''' % data)84 return False85 return True86def _get_missing_junit_result_filename(filename):87 return os.path.join(os.path.dirname(filename), 'MISSING-%s' % os.path.basename(filename))88def read_junit(filename):89 """Same as `read_junit2` except it doesn't return num_skipped."""90 num_tests, num_errors, num_failures, _ = read_junit2(filename)91 return (num_tests, num_errors, num_failures)92def read_junit2(filename):93 """94 parses xml file expected to follow junit/gtest conventions95 see http://code.google.com/p/googletest/wiki/AdvancedGuide#Generating_an_XML_Report96 :param filename: str junit xml file name97 :returns: num_tests, num_errors, num_failures, num_skipped98 :raises ParseError: if xml is not well-formed99 :raises IOError: if filename does not exist100 """101 tree = ElementTree()102 root = tree.parse(filename)103 num_tests = int(root.attrib['tests'])104 num_errors = int(root.attrib['errors'])105 num_failures = int(root.attrib['failures'])106 num_skipped = int(root.get('skip', '0')) + int(root.get('disabled', '0'))107 return (num_tests, num_errors, num_failures, num_skipped)108def test_results(test_results_dir, show_verbose=False, show_all=False):109 """Same as `test_results2` except the returned values don't include num_skipped."""110 results = {}111 results2 = test_results2(112 test_results_dir, show_verbose=show_verbose, show_all=show_all)113 for name, values in results2.items():114 num_tests, num_errors, num_failures, _ = values115 results[name] = (num_tests, num_errors, num_failures)116 return results117def test_results2(test_results_dir, show_verbose=False, show_all=False):118 '''119 Collects test results by parsing all xml files in given path,120 attempting to interpret them as junit results.121 :param test_results_dir: str foldername122 :param show_verbose: bool show output for tests which had errors or failed123 :returns: dict {rel_path, (num_tests, num_errors, num_failures, num_skipped)}124 '''125 results = {}126 for dirpath, dirnames, filenames in os.walk(test_results_dir):127 # do not recurse into folders starting with a dot128 dirnames[:] = [d for d in dirnames if not d.startswith('.')]129 for filename in [f for f in filenames if f.endswith('.xml')]:130 filename_abs = os.path.join(dirpath, filename)131 name = filename_abs[len(test_results_dir) + 1:]132 try:133 num_tests, num_errors, num_failures, num_skipped = read_junit2(filename_abs)134 except Exception as e:135 if show_all:136 print('Skipping "%s": %s' % (name, str(e)))137 continue138 results[name] = (num_tests, num_errors, num_failures, num_skipped)139 if show_verbose and (num_errors + num_failures > 0):140 print("Full test results for '%s'" % (name))141 print('-------------------------------------------------')142 with open(filename_abs, 'r') as f:143 print(f.read())144 print('-------------------------------------------------')145 return results146def aggregate_results(results, callback_per_result=None):147 """Same as `aggregate_results2` except it doesn't return num_skipped."""148 callback = None149 if callback_per_result is not None:150 def callback(name, num_tests, num_errors, num_failures, num_skipped):151 callback_per_result(name, num_tests, num_errors, num_failures)152 sum_tests, sum_errors, sum_failures, _ = aggregate_results2(153 results, callback_per_result=callback)154 return (sum_tests, sum_errors, sum_failures)155def aggregate_results2(results, callback_per_result=None):156 """157 Aggregate results158 :param results: dict as from test_results()159 :returns: tuple (num_tests, num_errors, num_failures, num_skipped)160 """161 sum_tests = sum_errors = sum_failures = sum_skipped = 0162 for name in sorted(results.keys()):163 (num_tests, num_errors, num_failures, num_skipped) = results[name]164 sum_tests += num_tests165 sum_errors += num_errors166 sum_failures += num_failures167 sum_skipped += num_skipped168 if callback_per_result:169 callback_per_result(170 name, num_tests, num_errors, num_failures, num_skipped)171 return sum_tests, sum_errors, sum_failures, sum_skipped172def print_summary(results, show_stable=False, show_unstable=True):173 """Same as `print_summary2` except it doesn't print skipped tests."""174 print_summary2(175 results, show_stable=show_stable, show_unstable=show_unstable,176 print_skipped=False)177def print_summary2(results, show_stable=False, show_unstable=True, print_skipped=True):178 """179 print summary to stdout180 :param results: dict as from test_results()181 :param show_stable: print tests without failures extra182 :param show_stable: print tests with failures extra183 :param print_skipped: include skipped tests in output184 """185 def callback(name, num_tests, num_errors, num_failures, num_skipped):186 if show_stable and not num_errors and not num_failures and not num_skipped:187 print('%s: %d tests' % (name, num_tests))188 if show_unstable and (num_errors or num_failures or num_skipped):189 msg = '{}: {} tests, {} errors, {} failures'190 msg_args = [name, num_tests, num_errors, num_failures]191 if print_skipped:192 msg += ', {} skipped'193 msg_args.append(num_skipped)194 print(msg.format(*msg_args))195 sum_tests, sum_errors, sum_failures, sum_skipped = aggregate_results2(results, callback)196 msg = 'Summary: {} tests, {} errors, {} failures'197 msg_args = [sum_tests, sum_errors, sum_failures]198 if print_skipped:199 msg += ', {} skipped'200 msg_args.append(sum_skipped)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!