How to use verify_env_results method in lisa

Best Python code snippet using lisa_python

test_lisa_runner.py

Source:test_lisa_runner.py Github

copy

Full Screen

...210 test_testsuite.generate_cases_metadata()211 env_runbook = generate_env_runbook(is_single_env=True, remote=True)212 runner = generate_runner(env_runbook)213 test_results = self._run_all_tests(runner)214 self.verify_env_results(215 expected_prepared=["customized_0"],216 expected_deployed_envs=["customized_0"],217 expected_deleted_envs=["customized_0"],218 runner=runner,219 )220 self.verify_test_results(221 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],222 expected_envs=["", "customized_0", "customized_0"],223 expected_status=[TestStatus.SKIPPED, TestStatus.PASSED, TestStatus.PASSED],224 expected_message=[self.__skipped_no_env, "", ""],225 test_results=test_results,226 )227 def test_fit_a_bigger_env(self) -> None:228 # similar with test_fit_a_predefined_env, but predefined 2 nodes,229 # it doesn't equal to any case req, but reusable for all cases.230 test_testsuite.generate_cases_metadata()231 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)232 runner = generate_runner(env_runbook)233 test_results = self._run_all_tests(runner)234 self.verify_env_results(235 expected_prepared=["customized_0"],236 expected_deployed_envs=["customized_0"],237 expected_deleted_envs=["customized_0"],238 runner=runner,239 )240 self.verify_test_results(241 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],242 expected_envs=["customized_0", "customized_0", "customized_0"],243 expected_status=[TestStatus.PASSED, TestStatus.PASSED, TestStatus.PASSED],244 expected_message=["", "", ""],245 test_results=test_results,246 )247 def test_case_new_env_run_only_1_needed_customized(self) -> None:248 # same predefined env as test_fit_a_bigger_env,249 # but all case want to run on a new env250 test_testsuite.generate_cases_metadata()251 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)252 runner = generate_runner(env_runbook, case_use_new_env=True)253 test_results = self._run_all_tests(runner)254 self.verify_env_results(255 expected_prepared=["customized_0"],256 expected_deployed_envs=["customized_0"],257 expected_deleted_envs=["customized_0"],258 runner=runner,259 )260 self.verify_test_results(261 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],262 expected_envs=["customized_0", "", ""],263 expected_status=[TestStatus.PASSED, TestStatus.SKIPPED, TestStatus.SKIPPED],264 expected_message=["", self.__skipped_no_env, self.__skipped_no_env],265 test_results=test_results,266 )267 def test_case_new_env_run_only_1_needed_generated(self) -> None:268 # same predefined env as test_fit_a_bigger_env,269 # but all case want to run on a new env270 test_testsuite.generate_cases_metadata()271 env_runbook = generate_env_runbook()272 runner = generate_runner(env_runbook, case_use_new_env=True, times=2)273 test_results = self._run_all_tests(runner)274 self.verify_env_results(275 expected_prepared=[276 "generated_0",277 "generated_1",278 "generated_2",279 "generated_3",280 "generated_4",281 "generated_5",282 ],283 expected_deployed_envs=[284 "generated_0",285 "generated_1",286 "generated_2",287 "generated_3",288 "generated_4",289 "generated_5",290 ],291 expected_deleted_envs=[292 "generated_0",293 "generated_1",294 "generated_2",295 "generated_3",296 "generated_4",297 "generated_5",298 ],299 runner=runner,300 )301 self.verify_test_results(302 expected_test_order=[303 "mock_ut1",304 "mock_ut1",305 "mock_ut2",306 "mock_ut2",307 "mock_ut3",308 "mock_ut3",309 ],310 expected_envs=[311 "generated_0",312 "generated_1",313 "generated_2",314 "generated_3",315 "generated_4",316 "generated_5",317 ],318 expected_status=[319 TestStatus.PASSED,320 TestStatus.PASSED,321 TestStatus.PASSED,322 TestStatus.PASSED,323 TestStatus.PASSED,324 TestStatus.PASSED,325 ],326 expected_message=["", "", "", "", "", ""],327 test_results=test_results,328 )329 def test_no_needed_env(self) -> None:330 # two 1 node env predefined, but only customized_0 go to deploy331 # no cases assigned to customized_1, as fit cases run on customized_0 already332 test_testsuite.generate_cases_metadata()333 env_runbook = generate_env_runbook(local=True, remote=True)334 runner = generate_runner(env_runbook)335 test_results = self._run_all_tests(runner)336 self.verify_env_results(337 expected_prepared=[338 "customized_0",339 "customized_1",340 ],341 expected_deployed_envs=["customized_0"],342 expected_deleted_envs=["customized_0"],343 runner=runner,344 )345 self.verify_test_results(346 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],347 expected_envs=["", "customized_0", "customized_0"],348 expected_status=[TestStatus.SKIPPED, TestStatus.PASSED, TestStatus.PASSED],349 expected_message=[self.__skipped_no_env, "", ""],350 test_results=test_results,351 )352 def test_deploy_no_more_resource(self) -> None:353 # platform may see no more resource, like no azure quota.354 # cases skipped due to this.355 # In future, will add retry on wait more resource.356 platform_schema = test_platform.MockPlatformSchema()357 platform_schema.wait_more_resource_error = True358 test_testsuite.generate_cases_metadata()359 env_runbook = generate_env_runbook(is_single_env=True, local=True)360 runner = generate_runner(env_runbook, platform_schema=platform_schema)361 test_results = self._run_all_tests(runner)362 self.verify_env_results(363 expected_prepared=["customized_0"],364 expected_deployed_envs=[],365 expected_deleted_envs=["customized_0"],366 runner=runner,367 )368 no_awaitable_resource_message = "deployment skipped: awaitable resource"369 no_more_resource_message = "no available environment"370 self.verify_test_results(371 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],372 expected_envs=["", "customized_0", ""],373 expected_status=[374 TestStatus.SKIPPED,375 TestStatus.SKIPPED,376 TestStatus.SKIPPED,377 ],378 expected_message=[379 no_more_resource_message,380 no_awaitable_resource_message,381 no_more_resource_message,382 ],383 test_results=test_results,384 )385 def test_skipped_on_suite_failure(self) -> None:386 # First two tests were skipped because the setup is made to fail.387 test_testsuite.fail_on_before_suite = True388 test_testsuite.generate_cases_metadata()389 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)390 runner = generate_runner(env_runbook)391 test_results = self._run_all_tests(runner)392 self.verify_env_results(393 expected_prepared=["customized_0"],394 expected_deployed_envs=["customized_0"],395 expected_deleted_envs=["customized_0"],396 runner=runner,397 )398 before_suite_failed = "before_suite: failed"399 self.verify_test_results(400 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],401 expected_envs=["customized_0", "customized_0", "customized_0"],402 expected_status=[403 TestStatus.SKIPPED,404 TestStatus.SKIPPED,405 TestStatus.PASSED,406 ],407 expected_message=[before_suite_failed, before_suite_failed, ""],408 test_results=test_results,409 )410 def test_env_failed_not_prepared_env(self) -> None:411 # test env not prepared, so test cases cannot find an env to run412 platform_schema = test_platform.MockPlatformSchema()413 platform_schema.return_prepared = False414 test_testsuite.generate_cases_metadata()415 runner = generate_runner(None, platform_schema=platform_schema)416 test_results = self._run_all_tests(runner)417 self.verify_env_results(418 expected_prepared=[419 "generated_0",420 "generated_1",421 "generated_2",422 ],423 expected_deployed_envs=[],424 expected_deleted_envs=[],425 runner=runner,426 )427 no_available_env = (428 "deployment failed. LisaException: no capability found for environment: "429 )430 self.verify_test_results(431 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],432 expected_envs=[433 "generated_0",434 "generated_1",435 "generated_2",436 ],437 expected_status=[438 TestStatus.FAILED,439 TestStatus.FAILED,440 TestStatus.FAILED,441 ],442 expected_message=[443 no_available_env,444 no_available_env,445 no_available_env,446 ],447 test_results=test_results,448 )449 def test_env_failed_more_failed_env_on_prepare(self) -> None:450 # test env not prepared, so test cases cannot find an env to run451 platform_schema = test_platform.MockPlatformSchema()452 platform_schema.return_prepared = False453 env_runbook = generate_env_runbook(is_single_env=True, local=True, remote=True)454 runner = generate_runner(env_runbook, platform_schema=platform_schema)455 with self.assertRaises(LisaException) as cm:456 _ = self._run_all_tests(runner)457 self.assertIn(458 "There are no remaining test results to run, ",459 str(cm.exception),460 )461 def test_env_deploy_failed(self) -> None:462 # env prepared, but deployment failed, so cases failed463 platform_schema = test_platform.MockPlatformSchema()464 platform_schema.deployed_status = EnvironmentStatus.Prepared465 test_testsuite.generate_cases_metadata()466 env_runbook = generate_env_runbook()467 runner = generate_runner(env_runbook, platform_schema=platform_schema)468 test_results = self._run_all_tests(runner)469 self.verify_env_results(470 expected_prepared=[471 "generated_0",472 "generated_1",473 "generated_2",474 ],475 expected_deployed_envs=[476 "generated_0",477 "generated_1",478 "generated_2",479 ],480 expected_deleted_envs=[481 "generated_0",482 "generated_1",483 "generated_2",484 ],485 runner=runner,486 )487 no_available_env = (488 "deployment failed. LisaException: "489 "expected status is EnvironmentStatus.Prepared"490 )491 self.verify_test_results(492 expected_test_order=["mock_ut1", "mock_ut2", "mock_ut3"],493 expected_envs=["generated_0", "generated_1", "generated_2"],494 expected_status=[495 TestStatus.FAILED,496 TestStatus.FAILED,497 TestStatus.FAILED,498 ],499 expected_message=[no_available_env, no_available_env, no_available_env],500 test_results=test_results,501 )502 def test_env_skipped_no_case(self) -> None:503 # no case found, as not call generate_case_metadata504 # in this case, not deploy any env505 env_runbook = generate_env_runbook(is_single_env=True, remote=True)506 runner = generate_runner(env_runbook)507 test_results = self._run_all_tests(runner)508 # still prepare predefined, but not deploy509 self.verify_env_results(510 expected_prepared=["customized_0"],511 expected_deployed_envs=[],512 expected_deleted_envs=[],513 runner=runner,514 )515 self.verify_test_results(516 expected_test_order=[],517 expected_envs=[],518 expected_status=[],519 expected_message=[],520 test_results=test_results,521 )522 def verify_test_results(523 self,524 expected_test_order: List[str],525 expected_envs: List[str],526 expected_status: List[TestStatus],527 expected_message: List[str],528 test_results: Union[List[TestResultMessage], List[TestResult]],529 ) -> None:530 test_names: List[str] = []531 env_names: List[str] = []532 for test_result in test_results:533 if isinstance(test_result, TestResult):534 test_names.append(test_result.runtime_data.metadata.name)535 env_names.append(536 test_result.environment.name537 if test_result.environment is not None538 else ""539 )540 else:541 assert isinstance(test_result, TestResultMessage)542 test_names.append(test_result.full_name.split(".")[1])543 env_names.append(test_result.information.get("environment", ""))544 self.assertListEqual(545 expected_test_order,546 test_names,547 "test order inconsistent",548 )549 self.assertListEqual(550 expected_envs,551 env_names,552 "test env inconsistent",553 )554 self.assertListEqual(555 expected_status,556 [x.status for x in test_results],557 "test result inconsistent",558 )559 # compare it's begin with560 actual_messages = [561 test_results[index].message[0 : len(expected)]562 for index, expected in enumerate(expected_message)563 ]564 self.assertListEqual(565 expected_message,566 actual_messages,567 "test message inconsistent",568 )569 def verify_env_results(570 self,571 expected_prepared: List[str],572 expected_deployed_envs: List[str],573 expected_deleted_envs: List[str],574 runner: LisaRunner,575 ) -> None:576 platform = cast(test_platform.MockPlatform, runner.platform)577 platform_test_data = platform.test_data578 self.assertListEqual(579 expected_prepared,580 list(platform_test_data.prepared_envs),581 "prepared envs inconsistent",582 )583 self.assertListEqual(...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful