Best Python code snippet using SeleniumBase
test_basic_features.py
Source:test_basic_features.py
1from test_junkie.constants import TestCategory, DecoratorType2from test_junkie.runner import Runner3from tests.QualityManager import QualityManager4from tests.junkie_suites.BasicSuite import BasicSuite5from tests.junkie_suites.ExecutionSquence import ExecutionSequence1, ExecutionSequence2, ExecutionSequence3, \6 ExecutionSequence47from tests.junkie_suites.ParametersSuite import ParametersSuite8runner = Runner([BasicSuite])9runner.run()10results = runner.get_executed_suites()11tests = results[0].get_test_objects()12def test_class_metrics():13 metrics = results[0].metrics.get_metrics()14 QualityManager.check_class_metrics(metrics,15 expected_status="fail",16 expected_beforeclass_exception_count=1,17 expected_beforeclass_exception_object=None,18 expected_beforeclass_performance_count=1,19 expected_afterclass_exception_count=1,20 expected_afterclass_exception_object=None,21 expected_afterclass_performance_count=1,22 expected_beforetest_exception_count=8,23 expected_beforetest_exception_object=None,24 expected_beforetest_performance_count=8,25 expected_aftertest_exception_count=8,26 expected_aftertest_exception_object=None,27 expected_aftertest_performance_count=8)28def test_failure():29 tested = False30 for test in tests:31 if test.get_function_name() == "failure":32 metrics = test.metrics.get_metrics()["None"]["None"]33 QualityManager.check_test_metrics(metrics,34 expected_status="fail",35 expected_exception=AssertionError)36 tested = True37 if not tested:38 raise Exception("Test did not run")39def test_error():40 tested = False41 for test in tests:42 if test.get_function_name() == "error":43 metrics = test.metrics.get_metrics()["None"]["None"]44 QualityManager.check_test_metrics(metrics,45 expected_status="error",46 expected_exception=Exception)47 tested = True48 if not tested:49 raise Exception("Test did not run")50def test_skip():51 tested = False52 for test in tests:53 if test.get_function_name() == "skip":54 metrics = test.metrics.get_metrics()["None"]["None"]55 QualityManager.check_test_metrics(metrics,56 expected_status="skip")57 tested = True58 if not tested:59 raise Exception("Test did not run")60def test_skip_function():61 tested = False62 for test in tests:63 if test.get_function_name() == "skip_function":64 metrics = test.metrics.get_metrics()["None"]["None"]65 QualityManager.check_test_metrics(metrics,66 expected_status="skip")67 tested = True68 if not tested:69 raise Exception("Test did not run")70def test_retry():71 tested = False72 for test in tests:73 if test.get_function_name() == "retry":74 metrics = test.metrics.get_metrics()["None"]["None"]75 QualityManager.check_test_metrics(metrics,76 expected_status="fail",77 expected_exception=AssertionError,78 expected_retry_count=2,79 expected_performance_count=2,80 expected_exception_count=2)81 tested = True82 if not tested:83 raise Exception("Test did not run")84def test_parameters():85 tested = False86 for test in tests:87 if test.get_function_name() == "parameters":88 properties = test.metrics.get_metrics()["None"]89 for param, metrics in properties.items():90 QualityManager.check_test_metrics(metrics,91 expected_status="success",92 expected_param=param)93 tested = True94 if not tested:95 raise Exception("Test did not run")96def test_parameters_plus_plus():97 runner = Runner([ParametersSuite])98 aggregator = runner.run()99 metrics = aggregator.get_basic_report()["tests"]100 assert metrics[TestCategory.SUCCESS] == 36101 assert metrics[TestCategory.IGNORE] == 4102 suites = runner.get_executed_suites()103 metrics = suites[0].metrics.get_metrics()104 QualityManager.check_class_metrics(metrics, expected_status="fail")105def test_execution_sequence1():106 runner = Runner([ExecutionSequence1])107 aggregator = runner.run()108 metrics = aggregator.get_basic_report()["tests"]109 assert metrics[TestCategory.SUCCESS] == 0110 assert metrics[TestCategory.FAIL] == 6111 suites = runner.get_executed_suites()112 metrics = suites[0].metrics.get_metrics()113 QualityManager.check_class_metrics(metrics, expected_status="fail",114 expected_retry_count=2,115 expected_beforetest_exception_count=20,116 expected_beforetest_performance_count=20,117 expected_beforetest_exception_object=AssertionError,118 expected_aftertest_exception_count=20,119 expected_aftertest_performance_count=20,120 expected_aftertest_exception_object=None121 )122 for test in suites[0].get_test_objects():123 test_metrics = test.metrics.get_metrics()124 for class_param, class_param_data in test_metrics.items():125 for param, param_data in class_param_data.items():126 expected_value = 4 if param_data["param"] is not None else 2127 assert len(param_data[DecoratorType.BEFORE_TEST]["exceptions"]) == expected_value128 assert len(param_data[DecoratorType.BEFORE_TEST]["tracebacks"]) == expected_value129 assert len(param_data[DecoratorType.BEFORE_TEST]["performance"]) == expected_value130def test_execution_sequence2():131 runner = Runner([ExecutionSequence2])132 aggregator = runner.run()133 metrics = aggregator.get_basic_report()["tests"]134 assert metrics[TestCategory.SUCCESS] == 0135 assert metrics[TestCategory.FAIL] == 6136 suites = runner.get_executed_suites()137 metrics = suites[0].metrics.get_metrics()138 QualityManager.check_class_metrics(metrics, expected_status="fail",139 expected_retry_count=2,140 expected_beforetest_exception_count=20,141 expected_beforetest_performance_count=20,142 expected_beforetest_exception_object=None,143 expected_aftertest_exception_count=20,144 expected_aftertest_performance_count=20,145 expected_aftertest_exception_object=AssertionError)146 for test in suites[0].get_test_objects():147 test_metrics = test.metrics.get_metrics()148 for class_param, class_param_data in test_metrics.items():149 for param, param_data in class_param_data.items():150 expected_value = 4 if param_data["param"] is not None else 2151 assert len(param_data[DecoratorType.AFTER_TEST]["exceptions"]) == expected_value152 assert len(param_data[DecoratorType.AFTER_TEST]["tracebacks"]) == expected_value153 assert len(param_data[DecoratorType.AFTER_TEST]["performance"]) == expected_value154def test_execution_sequence3():155 runner = Runner([ExecutionSequence3])156 aggregator = runner.run()157 metrics = aggregator.get_basic_report()["tests"]158 assert metrics[TestCategory.SUCCESS] == 5159 assert metrics[TestCategory.FAIL] == 1160 suites = runner.get_executed_suites()161 metrics = suites[0].metrics.get_metrics()162 QualityManager.check_class_metrics(metrics, expected_status="fail",163 expected_retry_count=2,164 expected_beforetest_exception_count=7,165 expected_beforetest_performance_count=7,166 expected_beforetest_exception_object=None,167 expected_aftertest_exception_count=7,168 expected_aftertest_performance_count=7,169 expected_aftertest_exception_object=None)170 for test in suites[0].get_test_objects():171 test_metrics = test.metrics.get_metrics()172 for class_param, class_param_data in test_metrics.items():173 for param, param_data in class_param_data.items():174 status = test.get_status(class_param, param)175 expected_value = 2 if status == TestCategory.FAIL else 1176 assert len(param_data[DecoratorType.AFTER_TEST]["exceptions"]) == expected_value177 assert len(param_data[DecoratorType.AFTER_TEST]["tracebacks"]) == expected_value178 assert len(param_data[DecoratorType.AFTER_TEST]["performance"]) == expected_value179 assert len(param_data[DecoratorType.BEFORE_TEST]["exceptions"]) == expected_value180 assert len(param_data[DecoratorType.BEFORE_TEST]["tracebacks"]) == expected_value181 assert len(param_data[DecoratorType.BEFORE_TEST]["performance"]) == expected_value182def test_execution_sequence4():183 runner = Runner([ExecutionSequence4])184 aggregator = runner.run()185 metrics = aggregator.get_basic_report()["tests"]186 assert metrics[TestCategory.SUCCESS] == 6187 assert metrics[TestCategory.FAIL] == 0188 suites = runner.get_executed_suites()189 metrics = suites[0].metrics.get_metrics()190 QualityManager.check_class_metrics(metrics, expected_status="success")191 for test in suites[0].get_test_objects():192 test_metrics = test.metrics.get_metrics()193 for class_param, class_param_data in test_metrics.items():194 for param, param_data in class_param_data.items():195 expected_value = 0196 assert len(param_data[DecoratorType.AFTER_TEST]["exceptions"]) == expected_value197 assert len(param_data[DecoratorType.AFTER_TEST]["tracebacks"]) == expected_value198 assert len(param_data[DecoratorType.AFTER_TEST]["performance"]) == expected_value199 assert len(param_data[DecoratorType.BEFORE_TEST]["exceptions"]) == expected_value200 assert len(param_data[DecoratorType.BEFORE_TEST]["tracebacks"]) == expected_value...
QualityManager.py
Source:QualityManager.py
1class QualityManager:2 @staticmethod3 def check_class_metrics(metrics,4 expected_retry_count=1,5 expected_status="success",6 expected_runtime=0,7 expected_afterclass_exception_count=0,8 expected_beforeclass_exception_count=0,9 expected_aftertest_exception_count=0,10 expected_beforetest_exception_count=0,11 expected_afterclass_exception_object=None,12 expected_beforeclass_exception_object=None,13 expected_aftertest_exception_object=None,14 expected_beforetest_exception_object=None,15 expected_afterclass_performance_count=0,16 expected_beforeclass_performance_count=0,17 expected_aftertest_performance_count=0,18 expected_beforetest_performance_count=0,19 expected_afterclass_performance_time=0,20 expected_beforeclass_performance_time=0,21 expected_aftertest_performance_time=0,22 expected_beforetest_performance_time=0):23 assert metrics["retry"] == expected_retry_count, \24 "Expected retry count: {} Actual retry count: {}".format(expected_retry_count, metrics["retry"])25 assert metrics["status"] == expected_status26 assert metrics["runtime"] >= expected_runtime27 assert len(metrics["afterClass"]["exceptions"]) == expected_afterclass_exception_count28 for i in metrics["afterClass"]["exceptions"]:29 assert type(i) == type(expected_afterclass_exception_object) \30 if not isinstance(expected_afterclass_exception_object, type) else expected_afterclass_exception_object31 assert len(metrics["afterClass"]["performance"]) == expected_afterclass_performance_count32 for i in metrics["afterClass"]["performance"]:33 assert i >= expected_afterclass_performance_time34 assert len(metrics["beforeClass"]["exceptions"]) == expected_beforeclass_exception_count35 for i in metrics["beforeClass"]["exceptions"]:36 assert type(i) == type(expected_beforeclass_exception_object) \37 if not isinstance(expected_beforeclass_exception_object, type) else expected_beforeclass_exception_object38 assert len(metrics["beforeClass"]["performance"]) == expected_beforeclass_performance_count39 for i in metrics["beforeClass"]["performance"]:40 assert i >= expected_beforeclass_performance_time41 assert len(metrics["afterTest"]["exceptions"]) == expected_aftertest_exception_count, \42 "Expected: {} Actual: {}".format(expected_aftertest_exception_count,43 len(metrics["afterTest"]["exceptions"]))44 for i in metrics["afterTest"]["exceptions"]:45 assert type(i) == type(expected_aftertest_exception_object) \46 if not isinstance(expected_aftertest_exception_object, type) else expected_aftertest_exception_object47 assert len(metrics["afterTest"]["performance"]) == expected_aftertest_performance_count, \48 "Expected: {} Actual: {}".format(expected_aftertest_performance_count,49 len(metrics["afterTest"]["performance"]))50 for i in metrics["afterTest"]["performance"]:51 assert i >= expected_aftertest_performance_time52 assert len(metrics["beforeTest"]["exceptions"]) == expected_beforetest_exception_count, \53 "Expected: {} Actual: {}".format(expected_beforetest_exception_count,54 len(metrics["beforeTest"]["exceptions"]))55 for i in metrics["beforeTest"]["exceptions"]:56 assert type(i) == type(expected_beforetest_exception_object) \57 if not isinstance(expected_beforetest_exception_object, type) else expected_beforetest_exception_object58 assert len(metrics["beforeTest"]["performance"]) == expected_beforetest_performance_count, \59 "Expected: {} Actual: {}".format(expected_beforetest_performance_count,60 len(metrics["beforeTest"]["performance"]))61 for i in metrics["beforeTest"]["performance"]:62 assert i >= expected_beforetest_performance_time63 @staticmethod64 def check_test_metrics(metrics,65 expected_retry_count=1,66 expected_status="success",67 expected_param=None,68 expected_class_param=None,69 expected_exception_count=1,70 expected_exception=None,71 expected_performance_count=1,72 expected_performance=0):73 assert metrics["status"] == expected_status, \74 "Expected status: {} Actual Status: {}".format(expected_status, metrics["status"])75 assert metrics["retry"] == expected_retry_count, \76 "Expected retry: {} Actual: {}".format(expected_retry_count, metrics["retry"])77 assert str(metrics["param"]) == str(expected_param)78 assert str(metrics["class_param"]) == str(expected_class_param)79 assert len(metrics["exceptions"]) == expected_exception_count80 for i in metrics["exceptions"]:81 assert type(i) == type(expected_exception) \82 if not isinstance(expected_exception, type) else expected_exception83 assert len(metrics["performance"]) == expected_performance_count84 for i in metrics["performance"]:...
interceptors-test.js
Source:interceptors-test.js
...57 58 store.Model('User', function(){59 should.not.exist(this.myInterceptor);60 61 this.beforeTest(function(){62 this.should.be.equal(phil);63 return false;64 });65 });66 67 var User = store.Model('User');68 phil = new User();69 70 it('has the right scope', function(done){71 phil.callInterceptors('beforeTest', function(){72 done();73 });74 });75 76 });77 78 79 80 81 describe('call (with params)', function(){82 var store = new Store();83 var phil;84 85 store.addInterceptor('beforeTest');86 87 store.Model('User', function(){88 should.not.exist(this.myInterceptor);89 90 this.beforeTest(function(arg1, arg2){91 arg1.should.be.equal('A');92 arg2.should.be.equal('B');93 return false;94 });95 });96 97 var User = store.Model('User');98 phil = new User();99 100 it('gets the right params', function(done){101 phil.callInterceptors('beforeTest', ['A', 'B'], function(result){102 result.should.be.false;103 done();104 });105 });106 107 });108 109 110 111 112 describe('call (with params and async)', function(){113 var store = new Store();114 var phil;115 116 store.addInterceptor('beforeTest');117 118 store.Model('User', function(){119 should.not.exist(this.myInterceptor);120 121 this.beforeTest(function(arg1, next){122 arg1.should.be.equal('A');123 next(false)124 });125 });126 127 var User = store.Model('User');128 phil = new User();129 130 it('gets the right params', function(done){131 phil.callInterceptors('beforeTest', ['A'], function(){132 done();133 });134 });135 136 });137 138 139 140 141 142 describe('call (with multiple interceptors: false)', function(){143 var store = new Store();144 var phil;145 146 store.addInterceptor('beforeTest');147 148 store.Model('User', function(){ 149 this.beforeTest(function(arg1, next){150 next(false)151 });152 153 this.beforeTest(function(){154 return true;155 });156 });157 158 var User = store.Model('User');159 phil = new User();160 161 it('is false', function(done){162 phil.callInterceptors('beforeTest', ['A'], function(result){163 result.should.be.false;164 done();165 });166 });167 ...
test_install.py
Source:test_install.py
...5import utilsphwrt6import master_transcoder7TEST_FOLDER="./test folder/"8TEST_PATH=os.path.join(TEST_FOLDER,"Plex Transcoder")9def beforeTest():10 utilsphwrt.DEBUG=True11 if not os.path.exists(TEST_FOLDER):12 os.makedirs(TEST_FOLDER)13 #create fake origin transcoder14 with io.FileIO(TEST_PATH, "w") as file:15 file.write("Hello!")16 file.close()17 #remove new transcode18 if os.path.exists(utilsphwrt.getNewTranscoderPath()):19 os.remove(utilsphwrt.getNewTranscoderPath())20class TestInstall(TestCase):21 """Unit test class to test other methods in the app."""22 def test_valid_install(self):23 beforeTest()24 self.assertTrue(master_transcoder.install_phwrt())25 self.assertTrue(os.path.exists(utilsphwrt.getNewTranscoderPath()))26 self.assertTrue(os.path.exists(utilsphwrt.getOriginalTranscoderPath()))27 self.assertTrue(os.path.exists(utilsphwrt.getPHWRTTranscoderPath()))28 def test_valid_uninstall(self):29 beforeTest()30 master_transcoder.install_phwrt()31 self.assertTrue(master_transcoder.uninstall_phwrt())32 self.assertFalse(os.path.exists(utilsphwrt.getNewTranscoderPath()))33 self.assertTrue(os.path.exists(utilsphwrt.getOriginalTranscoderPath()))34 self.assertTrue(os.path.exists(utilsphwrt.getPHWRTTranscoderPath()))35 def test_install_on_install(self):36 beforeTest()37 self.assertTrue(master_transcoder.install_phwrt())38 self.assertFalse(master_transcoder.install_phwrt())39 def test_uninstall_before_install(self):40 beforeTest()...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!