Best Python code snippet using autotest_python
test_importer.py
Source:test_importer.py
...122 profiler.name)123 if not os.path.exists(full_path):124 logging.info("Removing %s", profiler.name)125 _log_or_execute(repr(profiler), profiler.delete)126def db_clean_all(autotest_dir):127 """128 Remove all tests from autotest_web - very destructive129 This function invoked when -C supplied on the command line.130 Removes ALL tests from the database.131 :param autotest_dir: prepended to path strings132 (see global_config.ini, COMMON, autotest_top_path).133 """134 for test in models.Test.objects.all():135 logging.info("Removing %s", test.path)136 _log_or_execute(repr(test), test.delete)137 # Find profilers that are no longer present138 for profiler in models.Profiler.objects.all():139 logging.info("Removing %s", profiler.name)140 _log_or_execute(repr(profiler), profiler.delete)141def update_profilers_in_db(profilers, description='NA',142 add_noncompliant=False):143 """144 Add only profilers to the database from the filesystem.145 This function invoked when -p supplied on command line.146 Only adds profilers to the database - does not delete any.147 Profilers are formatted slightly differently than tests.148 :param profilers: list of profilers found in the file system.149 :param description: simple text to satisfy docstring.150 :param add_noncompliant: attempt adding test with invalid control files.151 """152 for profiler in profilers:153 name = os.path.basename(profiler)154 if name.endswith('.py'):155 name = name[:-3]156 if not profilers[profiler]:157 if add_noncompliant:158 doc = description159 else:160 logging.warn("Skipping %s, missing docstring", profiler)161 continue162 else:163 doc = profilers[profiler]164 model = models.Profiler.objects.get_or_create(name=name)[0]165 model.description = doc166 _log_or_execute(repr(model), model.save)167def update_tests_in_db(tests, dry_run=False, add_experimental=False,168 add_noncompliant=False, autotest_dir=None):169 """170 Scans through all tests and add them to the database.171 This function invoked when -t supplied and for update_all.172 When test code is discovered in the file system new tests may be added173 :param tests: list of tests found in the filesystem.174 :param dry_run: not used at this time.175 :param add_experimental: add tests with experimental attribute set.176 :param add_noncompliant: attempt adding test with invalid control files.177 :param autotest_dir: prepended to path strings178 (see global_config.ini, COMMON, autotest_top_path).179 """180 site_set_attributes_module = utils.import_site_module(181 __file__, 'autotest.utils.site_test_importer_attributes')182 for test in tests:183 # if test path is not inside base test dir, the subsequent184 # test load will fail so instead notify user right away185 if not test.startswith(autotest_dir):186 raise Exception('Test path ' +187 '%s not in %s, did you forget to use -z option?' %188 (test, autotest_dir))189 new_test = models.Test.objects.get_or_create(190 path=test.replace(autotest_dir, '').lstrip('/'))[0]191 logging.info("Processing %s", new_test.path)192 # Set the test's attributes193 data = tests[test]194 _set_attributes_clean(new_test, data)195 # Custom Attribute Update196 if site_set_attributes_module:197 site_set_attributes_module._set_attributes_custom(new_test, data)198 # This only takes place if --add-noncompliant is provided on the CLI199 if not new_test.name:200 test_new_test = test.split('/')201 if test_new_test[-1] == 'control':202 new_test.name = test_new_test[-2]203 else:204 control_name = "%s:%s"205 control_name %= (test_new_test[-2],206 test_new_test[-1])207 new_test.name = control_name.replace('control.', '')208 # Experimental Check209 if not add_experimental and new_test.experimental:210 continue211 _log_or_execute(repr(new_test), new_test.save)212 add_label_dependencies(new_test)213 # save TestParameter214 for para_name in data.test_parameters:215 test_parameter = models.TestParameter.objects.get_or_create(216 test=new_test, name=para_name)[0]217 test_parameter.save()218def _set_attributes_clean(test, data):219 """220 First pass sets the attributes of the Test object from file system.221 :param test: a test object to be populated for the database.222 :param data: object with test data from the file system.223 """224 test_type = {'client': 1,225 'server': 2}226 test_time = {'short': 1,227 'medium': 2,228 'long': 3}229 test.test_type = test_type[data.test_type.lower()]230 test.test_time = test_time[data.time.lower()]231 string_attributes = ('name', 'author', 'test_class', 'test_category',232 'test_category', 'sync_count')233 for attribute in string_attributes:234 setattr(test, attribute, getattr(data, attribute))235 test.description = data.doc236 test.dependencies = ", ".join(data.dependencies)237 int_attributes = ('experimental', 'run_verify')238 for attribute in int_attributes:239 setattr(test, attribute, int(getattr(data, attribute)))240def add_label_dependencies(test):241 """242 Add proper many-to-many relationships from DEPENDENCIES field.243 :param test: test object for the database.244 """245 # clear out old relationships246 _log_or_execute(repr(test), test.dependency_labels.clear,247 subject='clear dependencies from')248 for label_name in test.dependencies.split(','):249 label_name = label_name.strip().lower()250 if not label_name:251 continue252 try:253 label = models.Label.objects.get(name=label_name)254 except models.Label.DoesNotExist:255 log_dependency_not_found(label_name)256 continue257 _log_or_execute(repr(label), test.dependency_labels.add, label,258 subject='add dependency to %s' % test.name)259def log_dependency_not_found(label_name):260 """261 Exception processing when label not found in database.262 :param label_name: from test dependencies.263 """264 if label_name in DEPENDENCIES_NOT_FOUND:265 return266 logging.info("Dependency %s not found", label_name)267 DEPENDENCIES_NOT_FOUND.add(label_name)268def get_tests_from_fs(parent_dir, control_pattern, add_noncompliant=False):269 """270 Find control files in file system and load a list with their info.271 :param parent_dir: directory to search recursively.272 :param control_pattern: name format of control file.273 :param add_noncompliant: ignore control file parse errors.274 :return: dictionary of the form: tests[file_path] = parsed_object275 """276 tests = {}277 profilers = False278 if 'client/profilers' in parent_dir:279 profilers = True280 for dir in [parent_dir]:281 files = recursive_walk(dir, control_pattern)282 for file in files:283 if '__init__.py' in file or '.svn' in file:284 continue285 if not profilers:286 if not add_noncompliant:287 try:288 found_test = control_data.parse_control(file,289 raise_warnings=True)290 tests[file] = found_test291 except control_data.ControlVariableException as e:292 logging.warn("Skipping %s\n%s", file, e)293 except Exception as e:294 logging.error("Bad %s\n%s", file, e)295 else:296 found_test = control_data.parse_control(file)297 tests[file] = found_test298 else:299 tests[file] = compiler.parseFile(file).doc300 return tests301def recursive_walk(path, wildcard):302 """303 Recursively go through a directory.304 This function invoked by get_tests_from_fs().305 :param path: base directory to start search.306 :param wildcard: name format to match.307 :return: A list of files that match wildcard308 """309 files = []310 directories = [path]311 while len(directories) > 0:312 directory = directories.pop()313 for name in os.listdir(directory):314 fullpath = os.path.join(directory, name)315 if os.path.isfile(fullpath):316 # if we are a control file317 if re.search(wildcard, name):318 files.append(fullpath)319 elif os.path.isdir(fullpath):320 directories.append(fullpath)321 return files322def _log_or_execute(content, func, *args, **kwargs):323 """324 Log a message if dry_run is enabled, or execute the given function.325 Relies on the DRY_RUN global variable.326 :param content: the actual log message.327 :param func: function to execute if dry_run is not enabled.328 :param subject: (Optional) The type of log being written. Defaults to329 the name of the provided function.330 """331 subject = kwargs.get('subject', func.__name__)332 if DRY_RUN:333 logging.info("Would %s: %s", subject, content)334 else:335 func(*args)336def _create_whitelist_set(whitelist_path):337 """338 Create a set with contents from a whitelist file for membership testing.339 :param whitelist_path: full path to the whitelist file.340 :return: set with files listed one/line - newlines included.341 """342 f = open(whitelist_path, 'r')343 whitelist_set = set([line.strip() for line in f])344 f.close()345 return whitelist_set346def update_from_whitelist(whitelist_set, add_experimental, add_noncompliant,347 autotest_dir):348 """349 Scans through all tests in the whitelist and add them to the database.350 This function invoked when -w supplied.351 :param whitelist_set: set of tests in full-path form from a whitelist.352 :param add_experimental: add tests with experimental attribute set.353 :param add_noncompliant: attempt adding test with invalid control files.354 :param autotest_dir: prepended to path strings355 (see global_config.ini, COMMON, autotest_top_path).356 """357 tests = {}358 profilers = {}359 for file_path in whitelist_set:360 if file_path.find('client/profilers') == -1:361 try:362 found_test = control_data.parse_control(file_path,363 raise_warnings=True)364 tests[file_path] = found_test365 except control_data.ControlVariableException as e:366 logging.warn("Skipping %s\n%s", file, e)367 else:368 profilers[file_path] = compiler.parseFile(file_path).doc369 if len(tests) > 0:370 update_tests_in_db(tests, add_experimental=add_experimental,371 add_noncompliant=add_noncompliant,372 autotest_dir=autotest_dir)373 if len(profilers) > 0:374 update_profilers_in_db(profilers, add_noncompliant=add_noncompliant,375 description='NA')376def main(argv):377 """Main function"""378 global DRY_RUN379 parser = optparse.OptionParser()380 parser.add_option('-c', '--db-clean-tests',381 dest='clean_tests', action='store_true',382 default=False,383 help='Clean client and server tests with invalid control files')384 parser.add_option('-C', '--db-clear-all-tests',385 dest='clear_all_tests', action='store_true',386 default=False,387 help='Clear ALL client and server tests')388 parser.add_option('-d', '--dry-run',389 dest='dry_run', action='store_true', default=False,390 help='Dry run for operation')391 parser.add_option('-A', '--add-all',392 dest='add_all', action='store_true',393 default=False,394 help='Add site_tests, tests, and test_suites')395 parser.add_option('-S', '--add-samples',396 dest='add_samples', action='store_true',397 default=False,398 help='Add samples.')399 parser.add_option('-E', '--add-experimental',400 dest='add_experimental', action='store_true',401 default=True,402 help='Add experimental tests to frontend')403 parser.add_option('-N', '--add-noncompliant',404 dest='add_noncompliant', action='store_true',405 default=False,406 help='Add non-compliant tests (i.e. tests that do not '407 'define all required control variables)')408 parser.add_option('-p', '--profile-dir', dest='profile_dir',409 help='Directory to recursively check for profiles')410 parser.add_option('-t', '--tests-dir', dest='tests_dir',411 help='Directory to recursively check for control.*')412 parser.add_option('-r', '--control-pattern', dest='control_pattern',413 default='^control.*',414 help='The pattern to look for in directories for control files')415 parser.add_option('-v', '--verbose',416 dest='verbose', action='store_true', default=False,417 help='Run in verbose mode')418 parser.add_option('-w', '--whitelist-file', dest='whitelist_file',419 help='Filename for list of test names that must match')420 parser.add_option('-z', '--autotest-dir', dest='autotest_dir',421 default=os.path.join(os.path.dirname(__file__), '..'),422 help='Autotest directory root, or base test directory')423 options, args = parser.parse_args()424 logging_manager.configure_logging(TestImporterLoggingConfig(),425 verbose=options.verbose)426 DRY_RUN = options.dry_run427 if DRY_RUN:428 logging.getLogger().setLevel(logging.WARN)429 # Make sure autotest_dir is the absolute path430 options.autotest_dir = os.path.abspath(options.autotest_dir)431 if len(args) > 0:432 logging.error("Invalid option(s) provided: %s", args)433 parser.print_help()434 return 1435 if options.verbose:436 logging.getLogger().setLevel(logging.DEBUG)437 if len(argv) == 1 or (len(argv) == 2 and options.verbose):438 update_all(options.autotest_dir, options.add_noncompliant,439 options.add_experimental)440 db_clean_broken(options.autotest_dir)441 return 0442 if options.clear_all_tests:443 if (options.clean_tests or options.add_all or options.add_samples or444 options.add_noncompliant):445 logging.error(446 "Can only pass --autotest-dir, --dry-run and --verbose with "447 "--db-clear-all-tests")448 return 1449 db_clean_all(options.autotest_dir)450 whitelist_set = None451 if options.whitelist_file:452 if options.add_all:453 logging.error("Cannot pass both --add-all and --whitelist-file")454 return 1455 whitelist_path = os.path.abspath(options.whitelist_file)456 if not os.path.isfile(whitelist_path):457 logging.error("--whitelist-file (%s) not found", whitelist_path)458 return 1459 logging.info("Using whitelist file %s", whitelist_path)460 whitelist_set = _create_whitelist_set(whitelist_path)461 update_from_whitelist(whitelist_set,462 add_experimental=options.add_experimental,463 add_noncompliant=options.add_noncompliant,...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!