Best Python code snippet using unittest-xml-reporting_python
django_test.py
Source: django_test.py
...41 def _override_settings(self, **kwargs):42 # see django.test.utils.override_settings43 for key, new_value in kwargs.items():44 setattr(self.override, key, new_value)45 def _check_runner(self, runner):46 suite = runner.build_suite(test_labels=['app2', 'app'])47 test_ids = [test.id() for test in suite]48 self.assertEqual(test_ids, [49 'app2.tests.DummyTestCase.test_pass',50 'app.tests.DummyTestCase.test_negative_comment1',51 'app.tests.DummyTestCase.test_negative_comment2',52 'app.tests.DummyTestCase.test_pass',53 ])54 suite = runner.build_suite(test_labels=[])55 test_ids = [test.id() for test in suite]56 self.assertEqual(set(test_ids), set([57 'app.tests.DummyTestCase.test_pass',58 'app.tests.DummyTestCase.test_negative_comment1',59 'app.tests.DummyTestCase.test_negative_comment2',60 'app2.tests.DummyTestCase.test_pass',61 ]))62 def test_django_runner(self):63 runner_class = get_runner(settings)64 runner = runner_class()65 self._check_runner(runner)66 def test_django_xmlrunner(self):67 self._override_settings(68 TEST_RUNNER='xmlrunner.extra.djangotestrunner.XMLTestRunner')69 runner_class = get_runner(settings)70 runner = runner_class()71 self._check_runner(runner)72 def test_django_verbose(self):73 self._override_settings(74 TEST_OUTPUT_VERBOSE=True,75 TEST_RUNNER='xmlrunner.extra.djangotestrunner.XMLTestRunner')76 runner_class = get_runner(settings)77 runner = runner_class()78 self._check_runner(runner)79 def test_django_single_report(self):80 self._override_settings(81 TEST_OUTPUT_DIR=self.tmpdir,82 TEST_OUTPUT_FILE_NAME='results.xml',83 TEST_OUTPUT_VERBOSE=0,84 TEST_RUNNER='xmlrunner.extra.djangotestrunner.XMLTestRunner')85 apps.populate(settings.INSTALLED_APPS)86 runner_class = get_runner(settings)87 runner = runner_class()88 suite = runner.build_suite()89 runner.run_suite(suite)90 expected_file = path.join(self.tmpdir, 'results.xml')91 self.assertTrue(path.exists(expected_file),92 'did not generate xml report where expected.')...
test_runner.py
Source: test_runner.py
...43 print()44@docker_test45def test_run_default_registry(runner, dockerenv_local, runner_test_images):46 img = DockerImage(IMAGE_NAME)47 _check_runner(runner, img, dockerenv_local)48@docker_test49def test_run_remote_registry(runner, dockerenv_remote, runner_test_images):50 img = DockerImage(IMAGE_NAME, repository=REPOSITORY_NAME, registry=dockerenv_remote.registry)51 _check_runner(runner, img, dockerenv_remote)52@docker_test53def test_run_local_image_name_that_will_never_exist(runner, dockerenv_local):54 img = DockerImage('ebonite_image_name_that_will_never_exist')55 with pytest.raises(HTTPError):56 _check_runner(runner, img, dockerenv_local)57@docker_test58@pytest.mark.parametrize('detach', [True, False])59def test_run_local_fail_inside_container(runner, dockerenv_remote, detach, runner_test_images):60 img = DockerImage(BROKEN_IMAGE_NAME,61 repository=REPOSITORY_NAME, registry=dockerenv_remote.registry)62 with pytest.raises(DockerRunnerException):63 _check_runner(runner, img, dockerenv_remote, detach=detach, rm=True)64@docker_test65def test_instance_creation_with_kwargs(runner, dockerenv_remote):66 runner = DockerRunner()67 kwargs = {'key': 'val', 'host': '', 'int_key': 1, 'port_mapping': {8000: 8000}}68 instance = runner.create_instance('instance', **kwargs)69 assert 'port_mapping' not in instance.params70 assert instance.port_mapping == {8000: 8000}71 kwargs = {'key': 'val', 'host': '', 'int_key': 1}72 instance = runner.create_instance('instance_2', **kwargs)73 assert instance.port_mapping == {}74def _check_runner(runner, img, env: DockerEnv, **kwargs):75 instance = DockerContainer(CONTAINER_NAME, port_mapping={80: None})76 runner = runner(env, img, CONTAINER_NAME)77 assert not runner.is_running(instance, env)78 runner.run(instance, img, env, **kwargs)79 time.sleep(.1)80 assert runner.is_running(instance, env)81 runner.stop(instance, env)82 time.sleep(.1)...
test_deploy.py
Source: test_deploy.py
...42@docker_test43def test_run_default_registry(44 dockerenv_local, _test_images, model_meta_saved_single45):46 _check_runner(IMAGE_NAME, dockerenv_local, model_meta_saved_single)47@docker_test48def test_run_remote_registry(49 dockerenv_remote, _test_images, model_meta_saved_single50):51 _check_runner(IMAGE_NAME, dockerenv_remote, model_meta_saved_single)52@docker_test53def test_run_local_image_name_that_will_never_exist(54 dockerenv_local, model_meta_saved_single55):56 with pytest.raises(HTTPError):57 _check_runner(58 "mlem_image_name_that_will_never_exist",59 dockerenv_local,60 model_meta_saved_single,61 )62@docker_test63def test_run_local_fail_inside_container(64 dockerenv_remote, _test_images, model_meta_saved_single65):66 with pytest.raises(DeploymentError):67 _check_runner(68 f"{dockerenv_remote.registry.get_host()}/{REPOSITORY_NAME}/{BROKEN_IMAGE_NAME}",69 dockerenv_remote,70 model_meta_saved_single,71 )72def _check_runner(img, env: DockerEnv, model):73 with tempfile.TemporaryDirectory() as tmpdir:74 instance = DockerContainer(75 container_name=CONTAINER_NAME,76 port_mapping={80: 8008},77 state=DockerContainerState(image=DockerImage(name=img)),78 server=FastAPIServer(),79 model_link=model.make_link(),80 env_link=env.make_link(),81 rm=False,82 )83 instance.update_model_hash(model)84 instance.dump(os.path.join(tmpdir, "deploy"))85 assert env.get_status(instance) == DeployStatus.NOT_DEPLOYED86 env.deploy(instance)...
Check out the latest blogs from LambdaTest on this topic:
Are members of agile teams different from members of other teams? Both yes and no. Yes, because some of the behaviors we observe in agile teams are more distinct than in non-agile teams. And no, because we are talking about individuals!
People love to watch, read and interact with quality content — especially video content. Whether it is sports, news, TV shows, or videos captured on smartphones, people crave digital content. The emergence of OTT platforms has already shaped the way people consume content. Viewers can now enjoy their favorite shows whenever they want rather than at pre-set times. Thus, the OTT platform’s concept of viewing anything, anytime, anywhere has hit the right chord.
When software developers took years to create and introduce new products to the market is long gone. Users (or consumers) today are more eager to use their favorite applications with the latest bells and whistles. However, users today don’t have the patience to work around bugs, errors, and design flaws. People have less self-control, and if your product or application doesn’t make life easier for users, they’ll leave for a better solution.
Sometimes, in our test code, we need to handle actions that apparently could not be done automatically. For example, some mouse actions such as context click, double click, drag and drop, mouse movements, and some special key down and key up actions. These specific actions could be crucial depending on the project context.
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!