How to use test_subTest_error method in unittest-xml-reporting

Best Python code snippet using unittest-xml-reporting_python

test_subtests.py

Source:test_subtests.py Github

copy

Full Screen

1import os2import tempfile3from xml.etree import ElementTree as ET4from nose2.tests._common import FunctionalTestCase5def read_report(path):6 with open(path, encoding="utf-8") as f:7 return ET.parse(f).getroot()8class TestSubtests(FunctionalTestCase):9 def test_success(self):10 proc = self.runIn(11 "scenario/subtests", "-v", "test_subtests.Case.test_subtest_success"12 )13 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")14 self.assertTestRunOutputMatches(proc, stderr="test_subtest_success")15 self.assertTestRunOutputMatches(proc, stderr="OK")16 self.assertEqual(proc.poll(), 0)17 def test_failure(self):18 proc = self.runIn(19 "scenario/subtests", "-v", "test_subtests.Case.test_subtest_failure"20 )21 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")22 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=1\)")23 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=3\)")24 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=5\)")25 self.assertTestRunOutputMatches(proc, stderr=r"FAILED \(failures=3\)")26 self.assertEqual(proc.poll(), 1)27 def test_error(self):28 proc = self.runIn(29 "scenario/subtests", "-v", "test_subtests.Case.test_subtest_error"30 )31 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")32 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=0\)")33 self.assertTestRunOutputMatches(proc, stderr="RuntimeError: 0")34 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=1\)")35 self.assertTestRunOutputMatches(proc, stderr="RuntimeError: 1")36 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=2\)")37 self.assertTestRunOutputMatches(proc, stderr="RuntimeError: 2")38 self.assertTestRunOutputMatches(proc, stderr=r"FAILED \(errors=3\)")39 self.assertEqual(proc.poll(), 1)40 def test_expected_failure(self):41 proc = self.runIn(42 "scenario/subtests",43 "-v",44 "test_subtests.Case.test_subtest_expected_failure",45 )46 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")47 self.assertTestRunOutputMatches(48 proc, stderr="test_subtest_expected_failure.*expected failure"49 )50 self.assertTestRunOutputMatches(proc, stderr=r"OK \(expected failures=1\)")51 self.assertEqual(proc.poll(), 0)52 def test_message(self):53 proc = self.runIn(54 "scenario/subtests", "-v", "test_subtests.Case.test_subtest_message"55 )56 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")57 self.assertTestRunOutputMatches(58 proc, stderr=r"test_subtest_message.*\[msg\] \(i=1\)"59 )60 self.assertTestRunOutputMatches(61 proc, stderr=r"test_subtest_message.*\[msg\] \(i=3\)"62 )63 self.assertTestRunOutputMatches(64 proc, stderr=r"test_subtest_message.*\[msg\] \(i=5\)"65 )66 self.assertTestRunOutputMatches(proc, stderr=r"FAILED \(failures=3\)")67 self.assertEqual(proc.poll(), 1)68 def test_all(self):69 proc = self.runIn("scenario/subtests", "-v", "test_subtests")70 self.assertTestRunOutputMatches(proc, stderr="Ran 5 tests")71 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=1\)")72 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=3\)")73 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=5\)")74 self.assertTestRunOutputMatches(75 proc, stderr=r"test_subtest_message.*\[msg\] \(i=1\)"76 )77 self.assertTestRunOutputMatches(78 proc, stderr=r"test_subtest_message.*\[msg\] \(i=3\)"79 )80 self.assertTestRunOutputMatches(81 proc, stderr=r"test_subtest_message.*\[msg\] \(i=5\)"82 )83 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=0\)")84 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=1\)")85 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=2\)")86 self.assertTestRunOutputMatches(87 proc, stderr=r"FAILED \(failures=6, errors=3, expected failures=1\)"88 )89 self.assertEqual(proc.poll(), 1)90class TestSubtestsMultiprocess(FunctionalTestCase):91 def test_success(self):92 proc = self.runIn(93 "scenario/subtests",94 "--plugin=nose2.plugins.mp",95 "--processes=2",96 "-v",97 "test_subtests.Case.test_subtest_success",98 )99 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")100 self.assertTestRunOutputMatches(proc, stderr="test_subtest_success")101 self.assertTestRunOutputMatches(proc, stderr="OK")102 self.assertEqual(proc.poll(), 0)103 def test_failure(self):104 proc = self.runIn(105 "scenario/subtests",106 "--plugin=nose2.plugins.mp",107 "--processes=2",108 "-v",109 "test_subtests.Case.test_subtest_failure",110 )111 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")112 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=1\)")113 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=3\)")114 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=5\)")115 self.assertTestRunOutputMatches(proc, stderr=r"FAILED \(failures=3\)")116 self.assertEqual(proc.poll(), 1)117 def test_error(self):118 proc = self.runIn(119 "scenario/subtests",120 "--plugin=nose2.plugins.mp",121 "--processes=2",122 "-v",123 "test_subtests.Case.test_subtest_error",124 )125 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")126 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=0\)")127 self.assertTestRunOutputMatches(proc, stderr="RuntimeError: 0")128 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=1\)")129 self.assertTestRunOutputMatches(proc, stderr="RuntimeError: 1")130 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=2\)")131 self.assertTestRunOutputMatches(proc, stderr="RuntimeError: 2")132 self.assertTestRunOutputMatches(proc, stderr=r"FAILED \(errors=3\)")133 self.assertEqual(proc.poll(), 1)134 def test_expected_failure(self):135 proc = self.runIn(136 "scenario/subtests",137 "--plugin=nose2.plugins.mp",138 "--processes=2",139 "-v",140 "test_subtests.Case.test_subtest_expected_failure",141 )142 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")143 self.assertTestRunOutputMatches(144 proc, stderr="test_subtest_expected_failure.*expected failure"145 )146 self.assertTestRunOutputMatches(proc, stderr=r"OK \(expected failures=1\)")147 self.assertEqual(proc.poll(), 0)148 def test_message(self):149 proc = self.runIn(150 "scenario/subtests",151 "--plugin=nose2.plugins.mp",152 "--processes=2",153 "-v",154 "test_subtests.Case.test_subtest_message",155 )156 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")157 self.assertTestRunOutputMatches(158 proc, stderr=r"test_subtest_message.*\[msg\] \(i=1\)"159 )160 self.assertTestRunOutputMatches(161 proc, stderr=r"test_subtest_message.*\[msg\] \(i=3\)"162 )163 self.assertTestRunOutputMatches(164 proc, stderr=r"test_subtest_message.*\[msg\] \(i=5\)"165 )166 self.assertTestRunOutputMatches(proc, stderr=r"FAILED \(failures=3\)")167 self.assertEqual(proc.poll(), 1)168 def test_all(self):169 proc = self.runIn(170 "scenario/subtests",171 "--plugin=nose2.plugins.mp",172 "--processes=2",173 "-v",174 "test_subtests",175 )176 self.assertTestRunOutputMatches(proc, stderr="Ran 5 tests")177 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=1\)")178 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=3\)")179 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=5\)")180 self.assertTestRunOutputMatches(181 proc, stderr=r"test_subtest_message.*\[msg\] \(i=1\)"182 )183 self.assertTestRunOutputMatches(184 proc, stderr=r"test_subtest_message.*\[msg\] \(i=3\)"185 )186 self.assertTestRunOutputMatches(187 proc, stderr=r"test_subtest_message.*\[msg\] \(i=5\)"188 )189 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=0\)")190 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=1\)")191 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_error.*\(i=2\)")192 self.assertTestRunOutputMatches(193 proc, stderr=r"FAILED \(failures=6, errors=3, expected failures=1\)"194 )195 self.assertEqual(proc.poll(), 1)196class TestSubtestsJunitXml(FunctionalTestCase):197 def setUp(self):198 super().setUp()199 tmp = tempfile.NamedTemporaryFile(delete=False)200 tmp.close()201 self.junit_report = tmp.name202 def tearDown(self):203 os.remove(self.junit_report)204 def test_success(self):205 proc = self.runIn(206 "scenario/subtests",207 "--plugin=nose2.plugins.junitxml",208 "--junit-xml",209 f"--junit-xml-path={self.junit_report}",210 "-v",211 "test_subtests.Case.test_subtest_success",212 )213 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")214 self.assertTestRunOutputMatches(proc, stderr="OK")215 self.assertEqual(proc.poll(), 0)216 tree = read_report(self.junit_report)217 self.assertEqual(tree.get("tests"), "1")218 self.assertEqual(tree.get("failures"), "0")219 self.assertEqual(tree.get("errors"), "0")220 self.assertEqual(tree.get("skipped"), "0")221 self.assertEqual(len(tree.findall("testcase")), 1)222 for test_case in tree.findall("testcase"):223 self.assertEqual(test_case.get("name"), "test_subtest_success")224 def test_failure(self):225 proc = self.runIn(226 "scenario/subtests",227 "--plugin=nose2.plugins.junitxml",228 "--junit-xml",229 f"--junit-xml-path={self.junit_report}",230 "-v",231 "test_subtests.Case.test_subtest_failure",232 )233 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")234 self.assertTestRunOutputMatches(proc, stderr="FAILED")235 self.assertEqual(proc.poll(), 1)236 tree = read_report(self.junit_report)237 self.assertEqual(tree.get("tests"), "1")238 self.assertEqual(tree.get("failures"), "3")239 self.assertEqual(tree.get("errors"), "0")240 self.assertEqual(tree.get("skipped"), "0")241 self.assertEqual(len(tree.findall("testcase")), 3)242 for index, test_case in enumerate(tree.findall("testcase")):243 self.assertEqual(244 test_case.get("name"),245 f"test_subtest_failure (i={index * 2 + 1})",246 )247 def test_error(self):248 proc = self.runIn(249 "scenario/subtests",250 "--plugin=nose2.plugins.junitxml",251 "--junit-xml",252 f"--junit-xml-path={self.junit_report}",253 "-v",254 "test_subtests.Case.test_subtest_error",255 )256 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")257 self.assertTestRunOutputMatches(proc, stderr="FAILED")258 self.assertEqual(proc.poll(), 1)259 tree = read_report(self.junit_report)260 self.assertEqual(tree.get("tests"), "1")261 self.assertEqual(tree.get("failures"), "0")262 self.assertEqual(tree.get("errors"), "3")263 self.assertEqual(tree.get("skipped"), "0")264 self.assertEqual(len(tree.findall("testcase")), 3)265 for index, test_case in enumerate(tree.findall("testcase")):266 self.assertEqual(test_case.get("name"), f"test_subtest_error (i={index})")267 def test_expected_failure(self):268 proc = self.runIn(269 "scenario/subtests",270 "--plugin=nose2.plugins.junitxml",271 "--junit-xml",272 f"--junit-xml-path={self.junit_report}",273 "-v",274 "test_subtests.Case.test_subtest_expected_failure",275 )276 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")277 self.assertTestRunOutputMatches(proc, stderr="OK")278 self.assertEqual(proc.poll(), 0)279 tree = read_report(self.junit_report)280 self.assertEqual(tree.get("tests"), "1")281 self.assertEqual(tree.get("failures"), "0")282 self.assertEqual(tree.get("errors"), "0")283 self.assertEqual(tree.get("skipped"), "1")284 self.assertEqual(len(tree.findall("testcase")), 1)285 for test_case in tree.findall("testcase"):286 self.assertEqual(test_case.get("name"), "test_subtest_expected_failure")287 def test_message(self):288 proc = self.runIn(289 "scenario/subtests",290 "--plugin=nose2.plugins.junitxml",291 "--junit-xml",292 f"--junit-xml-path={self.junit_report}",293 "-v",294 "test_subtests.Case.test_subtest_message",295 )296 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")297 self.assertTestRunOutputMatches(proc, stderr="FAILED")298 self.assertEqual(proc.poll(), 1)299 tree = read_report(self.junit_report)300 self.assertEqual(tree.get("tests"), "1")301 self.assertEqual(tree.get("failures"), "3")302 self.assertEqual(tree.get("errors"), "0")303 self.assertEqual(tree.get("skipped"), "0")304 self.assertEqual(len(tree.findall("testcase")), 3)305 for index, test_case in enumerate(tree.findall("testcase")):306 self.assertEqual(307 test_case.get("name"),308 f"test_subtest_message [msg] (i={index * 2 + 1})",309 )310 def test_all(self):311 proc = self.runIn(312 "scenario/subtests",313 "--plugin=nose2.plugins.junitxml",314 "--junit-xml",315 f"--junit-xml-path={self.junit_report}",316 "-v",317 "test_subtests",318 )319 self.assertTestRunOutputMatches(proc, stderr="Ran 5 tests")320 self.assertTestRunOutputMatches(proc, stderr="FAILED")321 self.assertEqual(proc.poll(), 1)322 tree = read_report(self.junit_report)323 self.assertEqual(tree.get("tests"), "5")324 self.assertEqual(tree.get("failures"), "6")325 self.assertEqual(tree.get("errors"), "3")326 self.assertEqual(tree.get("skipped"), "1")327 self.assertEqual(len(tree.findall("testcase")), 11)328class TestSubtestsFailFast(FunctionalTestCase):329 def test_failure(self):330 proc = self.runIn(331 "scenario/subtests", "-v", "test_subtests.Case.test_subtest_failure"332 )333 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")334 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=1\)")335 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=3\)")336 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=5\)")337 self.assertTestRunOutputMatches(proc, stderr=r"FAILED \(failures=3\)")338 self.assertEqual(proc.poll(), 1)339 def test_failfast(self):340 proc = self.runIn(341 "scenario/subtests",342 "--fail-fast",343 "-v",344 "test_subtests.Case.test_subtest_failure",345 )346 self.assertTestRunOutputMatches(proc, stderr="Ran 1 test")347 self.assertTestRunOutputMatches(proc, stderr=r"test_subtest_failure.*\(i=1\)")348 self.assertTestRunOutputMatches(proc, stderr=r"FAILED \(failures=1\)")...

Full Screen

Full Screen

check_runner.py

Source:check_runner.py Github

copy

Full Screen

1import mpi_test_runner as mtr2import unittest as ut3class Simple(mtr.MPITestCase):4 def test_pass(self):5 pass6 def test_fail(self):7 self.fail()8 def test_fail_0(self):9 from mpi4py import MPI10 if MPI.COMM_WORLD.rank == 0:11 self.fail()12 def test_fail_1(self):13 from mpi4py import MPI14 if MPI.COMM_WORLD.rank == 1:15 self.fail()16 def test_fail_nonzero(self):17 from mpi4py import MPI18 if MPI.COMM_WORLD.rank != 0:19 self.fail()20 def test_error(self):21 raise ValueError22 def test_error_nonzero(self):23 from mpi4py import MPI24 if MPI.COMM_WORLD.rank != 0:25 raise ValueError26 def test_valid_error(self):27 with self.assertRaises(ValueError):28 raise ValueError29 def test_mixed(self):30 from mpi4py import MPI31 if MPI.COMM_WORLD.rank < 2:32 self.fail()33 else:34 raise ValueError35 def test_subtest_fail(self):36 with self.subTest(sub='test'):37 self.fail()38 def test_subtest_double(self):39 with self.subTest(sub='error'):40 raise ValueError41 with self.subTest(sub='fail'):42 self.fail()43 def test_subtest_error(self):44 with self.subTest(sub='test'):45 raise ValueError46 @ut.expectedFailure47 def test_expected_fail(self):48 self.fail()49 @ut.expectedFailure50 def test_unexpected_success(self):51 pass52if __name__ == '__main__':53 import argparse54 parser = argparse.ArgumentParser(description='Check MPI test runner behavior.')55 parser.add_argument('name', nargs='?', default=None,56 help='Glob expression to specify specific test cases')57 parser.add_argument('-f', '--failfast', action='store_true',58 help='Stop the tests on first failure.')59 parser.add_argument('-v', '--verbose', choices=[0, 1, 2], default=1, type=int,60 help='Level of detail to show')61 args = parser.parse_args()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run unittest-xml-reporting automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful