Best Python code snippet using pytest-django_python
runner.py
Source:runner.py
1from importlib import import_module2import os3from optparse import make_option4import unittest5from unittest import TestSuite, defaultTestLoader6from django.conf import settings7from django.core.exceptions import ImproperlyConfigured8from django.test import SimpleTestCase, TestCase9from django.test.utils import setup_test_environment, teardown_test_environment10class DiscoverRunner(object):11 """12 A Django test runner that uses unittest2 test discovery.13 """14 test_suite = TestSuite15 test_runner = unittest.TextTestRunner16 test_loader = defaultTestLoader17 reorder_by = (TestCase, SimpleTestCase)18 option_list = (19 make_option('-t', '--top-level-directory',20 action='store', dest='top_level', default=None,21 help='Top level of project for unittest discovery.'),22 make_option('-p', '--pattern', action='store', dest='pattern',23 default="test*.py",24 help='The test matching pattern. Defaults to test*.py.'),25 )26 def __init__(self, pattern=None, top_level=None,27 verbosity=1, interactive=True, failfast=False,28 **kwargs):29 self.pattern = pattern30 self.top_level = top_level31 self.verbosity = verbosity32 self.interactive = interactive33 self.failfast = failfast34 def setup_test_environment(self, **kwargs):35 setup_test_environment()36 settings.DEBUG = False37 unittest.installHandler()38 def build_suite(self, test_labels=None, extra_tests=None, **kwargs):39 suite = self.test_suite()40 test_labels = test_labels or ['.']41 extra_tests = extra_tests or []42 discover_kwargs = {}43 if self.pattern is not None:44 discover_kwargs['pattern'] = self.pattern45 if self.top_level is not None:46 discover_kwargs['top_level_dir'] = self.top_level47 for label in test_labels:48 kwargs = discover_kwargs.copy()49 tests = None50 label_as_path = os.path.abspath(label)51 # if a module, or "module.ClassName[.method_name]", just run those52 if not os.path.exists(label_as_path):53 tests = self.test_loader.loadTestsFromName(label)54 elif os.path.isdir(label_as_path) and not self.top_level:55 # Try to be a bit smarter than unittest about finding the56 # default top-level for a given directory path, to avoid57 # breaking relative imports. (Unittest's default is to set58 # top-level equal to the path, which means relative imports59 # will result in "Attempted relative import in non-package.").60 # We'd be happy to skip this and require dotted module paths61 # (which don't cause this problem) instead of file paths (which62 # do), but in the case of a directory in the cwd, which would63 # be equally valid if considered as a top-level module or as a64 # directory path, unittest unfortunately prefers the latter.65 top_level = label_as_path66 while True:67 init_py = os.path.join(top_level, '__init__.py')68 if os.path.exists(init_py):69 try_next = os.path.dirname(top_level)70 if try_next == top_level:71 # __init__.py all the way down? give up.72 break73 top_level = try_next74 continue75 break76 kwargs['top_level_dir'] = top_level77 if not (tests and tests.countTestCases()) and is_discoverable(label):78 # Try discovery if path is a package or directory79 tests = self.test_loader.discover(start_dir=label, **kwargs)80 # Make unittest forget the top-level dir it calculated from this81 # run, to support running tests from two different top-levels.82 self.test_loader._top_level_dir = None83 suite.addTests(tests)84 for test in extra_tests:85 suite.addTest(test)86 return reorder_suite(suite, self.reorder_by)87 def setup_databases(self, **kwargs):88 return setup_databases(self.verbosity, self.interactive, **kwargs)89 def run_suite(self, suite, **kwargs):90 return self.test_runner(91 verbosity=self.verbosity,92 failfast=self.failfast,93 ).run(suite)94 def teardown_databases(self, old_config, **kwargs):95 """96 Destroys all the non-mirror databases.97 """98 old_names, mirrors = old_config99 for connection, old_name, destroy in old_names:100 if destroy:101 connection.creation.destroy_test_db(old_name, self.verbosity)102 def teardown_test_environment(self, **kwargs):103 unittest.removeHandler()104 teardown_test_environment()105 def suite_result(self, suite, result, **kwargs):106 return len(result.failures) + len(result.errors)107 def run_tests(self, test_labels, extra_tests=None, **kwargs):108 """109 Run the unit tests for all the test labels in the provided list.110 Test labels should be dotted Python paths to test modules, test111 classes, or test methods.112 A list of 'extra' tests may also be provided; these tests113 will be added to the test suite.114 Returns the number of tests that failed.115 """116 self.setup_test_environment()117 suite = self.build_suite(test_labels, extra_tests)118 old_config = self.setup_databases()119 result = self.run_suite(suite)120 self.teardown_databases(old_config)121 self.teardown_test_environment()122 return self.suite_result(suite, result)123def is_discoverable(label):124 """125 Check if a test label points to a python package or file directory.126 Relative labels like "." and ".." are seen as directories.127 """128 try:129 mod = import_module(label)130 except (ImportError, TypeError):131 pass132 else:133 return hasattr(mod, '__path__')134 return os.path.isdir(os.path.abspath(label))135def dependency_ordered(test_databases, dependencies):136 """137 Reorder test_databases into an order that honors the dependencies138 described in TEST[DEPENDENCIES].139 """140 ordered_test_databases = []141 resolved_databases = set()142 # Maps db signature to dependencies of all it's aliases143 dependencies_map = {}144 # sanity check - no DB can depend on its own alias145 for sig, (_, aliases) in test_databases:146 all_deps = set()147 for alias in aliases:148 all_deps.update(dependencies.get(alias, []))149 if not all_deps.isdisjoint(aliases):150 raise ImproperlyConfigured(151 "Circular dependency: databases %r depend on each other, "152 "but are aliases." % aliases)153 dependencies_map[sig] = all_deps154 while test_databases:155 changed = False156 deferred = []157 # Try to find a DB that has all it's dependencies met158 for signature, (db_name, aliases) in test_databases:159 if dependencies_map[signature].issubset(resolved_databases):160 resolved_databases.update(aliases)161 ordered_test_databases.append((signature, (db_name, aliases)))162 changed = True163 else:164 deferred.append((signature, (db_name, aliases)))165 if not changed:166 raise ImproperlyConfigured(167 "Circular dependency in TEST[DEPENDENCIES]")168 test_databases = deferred169 return ordered_test_databases170def reorder_suite(suite, classes):171 """172 Reorders a test suite by test type.173 `classes` is a sequence of types174 All tests of type classes[0] are placed first, then tests of type175 classes[1], etc. Tests with no match in classes are placed last.176 """177 class_count = len(classes)178 suite_class = type(suite)179 bins = [suite_class() for i in range(class_count + 1)]180 partition_suite(suite, classes, bins)181 for i in range(class_count):182 bins[0].addTests(bins[i + 1])183 return bins[0]184def partition_suite(suite, classes, bins):185 """186 Partitions a test suite by test type.187 classes is a sequence of types188 bins is a sequence of TestSuites, one more than classes189 Tests of type classes[i] are added to bins[i],190 tests with no match found in classes are place in bins[-1]191 """192 suite_class = type(suite)193 for test in suite:194 if isinstance(test, suite_class):195 partition_suite(test, classes, bins)196 else:197 for i in range(len(classes)):198 if isinstance(test, classes[i]):199 bins[i].addTest(test)200 break201 else:202 bins[-1].addTest(test)203def setup_databases(verbosity, interactive, **kwargs):204 from django.db import connections, DEFAULT_DB_ALIAS205 # First pass -- work out which databases actually need to be created,206 # and which ones are test mirrors or duplicate entries in DATABASES207 mirrored_aliases = {}208 test_databases = {}209 dependencies = {}210 default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()211 for alias in connections:212 connection = connections[alias]213 test_settings = connection.settings_dict['TEST']214 if test_settings['MIRROR']:215 # If the database is marked as a test mirror, save216 # the alias.217 mirrored_aliases[alias] = test_settings['MIRROR']218 else:219 # Store a tuple with DB parameters that uniquely identify it.220 # If we have two aliases with the same values for that tuple,221 # we only need to create the test database once.222 item = test_databases.setdefault(223 connection.creation.test_db_signature(),224 (connection.settings_dict['NAME'], set())225 )226 item[1].add(alias)227 if 'DEPENDENCIES' in test_settings:228 dependencies[alias] = test_settings['DEPENDENCIES']229 else:230 if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:231 dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])232 # Second pass -- actually create the databases.233 old_names = []234 mirrors = []235 for signature, (db_name, aliases) in dependency_ordered(236 test_databases.items(), dependencies):237 test_db_name = None238 # Actually create the database for the first connection239 for alias in aliases:240 connection = connections[alias]241 if test_db_name is None:242 test_db_name = connection.creation.create_test_db(243 verbosity,244 autoclobber=not interactive,245 serialize=connection.settings_dict.get("TEST", {}).get("SERIALIZE", True),246 )247 destroy = True248 else:249 connection.settings_dict['NAME'] = test_db_name250 destroy = False251 old_names.append((connection, db_name, destroy))252 for alias, mirror_alias in mirrored_aliases.items():253 mirrors.append((alias, connections[alias].settings_dict['NAME']))254 connections[alias].settings_dict['NAME'] = (255 connections[mirror_alias].settings_dict['NAME'])...
data_tp.py
Source:data_tp.py
1def dataframe2numeric_tp():2 import os3 import pandas as pd4 from systool import data5 file_path = os.path.dirname(os.path.abspath(__file__))6 df = pd.read_csv(os.path.join(file_path, r'test_databases\test_dataframe2numeric.csv'))7 if df.shape[1] == 1:8 df = pd.read_csv(os.path.join(file_path, r'test_databases\test_dataframe2numeric.csv'), sep=";")9 else:10 pass11 df = data.dataframe2numeric(df, col_dt_preffix='H_', col_td_preffix='TD_')12 datetime_cols = [col for col in df.columns if col.startswith('H_')]13 timedelta_cols = [col for col in df.columns if col.startswith('TD_')]14 other_cols = [col for col in df.columns if col.find('H_') == -1 and col.find('TD_') == -1]15 datetime = 016 timedelta = 017 other = 018 for col in datetime_cols:19 if str(df[col].dtype) != 'datetime64[ns]':20 continue21 else:22 datetime += 123 for col in timedelta_cols:24 if str(df[col].dtype) != 'timedelta64[ns]':25 continue26 else:27 timedelta += 128 for col in other_cols:29 if str(df[col].dtype) != 'int64' and str(df[col].dtype) != 'float64':30 continue31 else:32 other += 133 assert datetime == 534 assert timedelta == 335 assert other == 1836def openfile_tp():37 import os38 from systool import data39 file_path = os.path.dirname(os.path.abspath(__file__))40 df1 = data.open_file(file_path, r'test_databases\test_openfile.csv', usa=True)41 df2 = data.open_file(file_path, r'test_databases\test_openfile.xlsx', usa=True)42 df3 = data.open_file(file_path, r'test_databases\test_openfile.shp', usa=True)43 df4 = data.open_file(file_path, r'test_databases\test_openfile.parquet', usa=True)44 df5 = data.open_file(file_path, r'test_databases\test_openfile.txt', usa=True)45 assert df1.equals(df2)46 assert df2.equals(df4)47 assert df4.equals(df5)48 assert df5.equals(df3.iloc[:, :-1])49 assert str(type(df3)) == "<class 'geopandas.geodataframe.GeoDataFrame'>"50def savefile_tp():51 import os52 import geopandas as gpd53 from systool import data54 file_path = os.path.dirname(os.path.abspath(__file__))55 geodf = gpd.read_file(os.path.join(file_path, r'test_databases\test_openfile.shp'))56 df = geodf.iloc[:, :-1]57 extensions = ['csv', 'parquet', 'xlsx']58 for extension in extensions:59 data.save_file(df, os.path.join(file_path, r'test_databases\\'), ext=extension)60 data.save_file(geodf, os.path.join(file_path, r'test_databases\\'))61 extensions_ver = ['csv', 'parquet', 'xlsx', 'shp']62 vers = []63 for extension in extensions_ver:64 ver = os.path.isfile(os.path.join(file_path, rf'test_databases\test.{extension}'))65 vers.append(ver)66 assert vers[0]67 assert vers[1]68 assert vers[2]69 assert vers[3]70def getcols_tp():71 import os72 import pandas as pd73 from systool import data74 file_path = os.path.dirname(os.path.abspath(__file__))75 right = pd.read_excel(os.path.join(file_path, r'test_databases\test_getcols_right.xlsx'), sheet_name='Sheet1')76 left = pd.read_excel(os.path.join(file_path, r'test_databases\test_getcols_left.xlsx'), sheet_name='Sheet1')77 merged = pd.read_excel(os.path.join(file_path, r'test_databases\test_getcols_merged.xlsx'), sheet_name='Sheet1')78 df = data.get_col(left, ['D', 'E'], right, 'C', 'C')79 assert df.equals(merged)80def getmaskisin_tp():81 import os82 import pandas as pd83 from systool import data84 file_path = os.path.dirname(os.path.abspath(__file__))85 df = pd.read_excel(os.path.join(file_path, r'test_databases\test_getmaskisin.xlsx'),86 sheet_name='Sheet1')87 mask1 = data.get_mask_isin(df, ['A', 'B'], [(1000, 1), (1001, 1)])88 mask1 = df[mask1]89 mask2 = df[((df['A'] == 1000) & (df['B'] == 1)) | ((df['A'] == 1001) & (df['B'] == 1))]90 assert mask1.equals(mask2)91def removeduplicatessafe_tp():92 import os93 import pandas as pd94 from systool import data95 file_path = os.path.dirname(os.path.abspath(__file__))96 df = pd.read_excel(os.path.join(file_path, r'test_databases\test_removeduplicatessafe.xlsx'), sheet_name='Sheet1')97 df = data.remove_duplicate_safe(df, ['A', 'B'])98 df = df.reset_index()99 df = df.drop(columns='index')100 df_expected = pd.read_excel(os.path.join(file_path, r'test_databases\test_removeduplicatessafe_expected.xlsx'),101 sheet_name='Sheet1')102 assert df.equals(df_expected)103def flattenhierarchicalcol_tp():104 import os105 import pandas as pd106 from systool import data107 file_path = os.path.dirname(os.path.abspath(__file__))108 df = pd.read_excel(os.path.join(file_path, r'test_databases\test_flattenhierarchical_col.xlsx'),109 sheet_name='Sheet1')110 df_badge = pd.read_csv(os.path.join(file_path, r'test_databases\test_flattenhierarchical_col_badge.csv'))111 df_badge.index = df_badge['B']112 df_badge.drop(columns=['B'], inplace=True)113 df = df.pivot_table(index='B', columns='A').swaplevel(axis=1).sort_index(1)114 mapper = {}115 for column in df.columns:116 mapper[column] = data.flatten_hierarchical_col(column)117 df.columns = df.columns.map(mapper)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!