Best Python code snippet using sure_python
DK_Numerical.py
Source:DK_Numerical.py
1"""2Collection of algorithms (geared towards abstract math and numerical problems)34Picked up from MA and now continued from Cooling Singapore. Oct 20195"""67# This control flag is used to skip sections if their prerequisite modules are not available, so that DK_Numerical8# can be imported more flexibly with regards to the environment.910_control = {section: True for section in ('NetworkX', 'Pandas', 'numpy')}1112try:13 import networkx as nx14except ModuleNotFoundError:15 _control['NetworkX'] = False1617try:18 import pandas as pd19except ModuleNotFoundError:20 _control['Pandas'] = False2122try:23 import numpy as np24except ModuleNotFoundError:25 _control['numpy'] = False2627import warnings282930# -------------------------------------------- NetworkX ------------------------------------------------ #31if _control['NetworkX']:32 def get_treeroot(tree, guess=None):33 """34 Finds the root node of a NetworkX DiGraph tree.3536 Args:37 tree: NetworkX DiGraph tree38 guess: Best bet for which node is the root. The closer this node is to the root, the faster the search.3940 Returns:41 Root node of tree.42 """4344 # Step 1: Param checks45 if not isinstance(tree, nx.DiGraph):46 raise TypeError("Must pass a NetworkX Digraph to parameter 'tree.'")4748 if not nx.is_tree(tree):49 raise ValueError("Passed graph is not a tree.")5051 if guess is None:52 guess = list(tree.nodes)[0]53 else:54 if guess not in tree:55 warnings.warn("The guessed root node is not in the tree.")56 guess = list(tree.nodes)[0]575859 # Step 2: Follow predecessors until you find the root60 root = guess61 counter = 162 n_nodes = len(tree)6364 # This is a redundant loop, but Iterator Protocol + NetworkX guarantees that this is unnecessary.65 while counter <= n_nodes:66 try:67 root = next(tree.predecessors(root))68 counter += 169 except StopIteration: # raised for the root node (no predecessors)70 break717273 return root747576 def compare_trees(T1, T2):77 """78 Returns true if the trees are IDENTICAL (isomorphism is necessary but insufficient), and False otherwise.7980 Two trees are identical iff:81 1) same set of nodes82 2) same set of branches83 3) same branch params84 (for now, assumed no node params)8586 DEVELOPER'S CORNER:87 I learned that dictionaries can be tested for equality via ==. There's also talk on StackOverflow about this88 equality test holding for nested dicts (and other nesting combinations with other data structures).8990 """91 if not isinstance(T1, nx.DiGraph) or not isinstance(T2, nx.DiGraph):92 raise TypeError("This function only accepts NetworkX DiGraphs.")9394 if not nx.faster_could_be_isomorphic(T1, T2):95 return False9697 # Check 1: Compare nodes98 if set(T1) != set(T2):99 return False100101 # Check 2: Compare branches102 br1 = set(br for br in T1.edges())103 br2 = set(br for br in T2.edges())104105 if br1 != br2:106 return False107108 # Check 3: Compare branch parameters (final test)109 return all([T1.edges[up, down] == T2.edges[up, down] for up, down in br1])110111112# -------------------------------------------- Pandas ------------------------------------------------ #113if _control['Pandas']:114 def empty_df_with_dtypes(col_dtype: dict):115 """Initializes a pandas DataFrame with NO ROWS, but with col labels AND datatypes. The DataFrame constructor only116 lets you specify one datatype for all the columns, so this function fills that gap.117118 Args:119 col_dtype: as dictionary of 'column label':'numpy dtype' pairs120121 Note: This solution is adapted from the one proposed in stackoverflow,122 https://stackoverflow.com/questions/36462257/create-empty-dataframe-in-pandas-specifying-column-types/48374031#48374031123124125 Returns:126 Empty DataFrame, with columns + dtype specified.127128 """129 if not isinstance(col_dtype, dict):130 raise TypeError("Pls. pass a dictionary of 'column label':'numpy dtype' pairs.")131132 if len(col_dtype) == 0:133 raise RuntimeError("Requested table has no columns.")134135 df = pd.DataFrame()136137 for c, d in col_dtype.items():138 df[c] = pd.Series(dtype=d)139140 return df141142143 def is_xymonotonic(x, y, slope='pos', getdf=False, as_assertion=False):144 """Checks if the given relationship <x, y> is monotonic in the specified direction ('pos' or 'neg' slope). A145 monotonic line is one whose slope is consistently either non-neg or non-pos. Pass vectors (iterables) to x146 and y. x has to be strictly increasing, whereas y can have repeating subsequent points (flat slope).147148 Returns a tuple (bool, list):149 Bool is True if monotonic in the specified direction.150151 List is empty if bool is True. Otherwise, contains the indices (0-indexed range is assigned to the data)152153 if getdf is True, then a third item is returned as a pandas DataFrame of vectors x and y, with column154 names as 'x' and 'y'.155156 To assert monotonicity:157 assert is_xymonotonic(x, y, slope='pos')[0]158159 To assert monotinicity internally160 is_xymonotonic(x, y, slope='pos', as_assertion=True)161162 """163164 # ----------------------------------------------------------------------- Checks165 length = len(x)166 if length != len(y):167 raise ValueError("Vectors x and y have to be of the same length.")168 if length < 3:169 raise ValueError("Vectors x and y must have at least 3 points.")170171172173 # ----------------------------------------------------------------------- Convert to Series174 # x2, y2 is the same data but shifted one index forward wrt x1 and y1 -- old implementation175 x = pd.Series(data=(pt for pt in x))176 # x2 = pd.Series(data=x1.values, index=pd.RangeIndex(-1, length-1))177178 y = pd.Series(data=(pt for pt in y))179 # y2 = pd.Series(data=y1.values, index=pd.RangeIndex(-1, length-1))180181 # ----------------------------------------------------------------------- Calc dx and dy182 # dx = x2-x1183 dx = x.diff()184 dx = dx.loc[dx.notna()]185186 # dy = y2-y1187 dy = y.diff()188 dy = dy.loc[dy.notna()]189190 # Check that all dx > 0191 Lf_invalid_dx = dx <= 0192 if Lf_invalid_dx.any():193 # Collect indeces of points (+next point) where dx <= 0194 invalid_idx = Lf_invalid_dx.loc[Lf_invalid_dx].index195 invalid_idx_withadj = pd.Index(set(invalid_idx).union(invalid_idx+1))196197 print("x vector can only be increasing. Pls. view the slice: \n{}".format(x.loc[invalid_idx_withadj]))198 raise ValueError(invalid_idx_withadj)199200 # ----------------------------------------------------------------------- Check if monotonic201 sign = {'pos': 1}.get(slope, -1)202 # sign * dy must be >= 0 for all entries to fulfill the specified monotonicity203 Lf_failed = sign * dy < 0204205 returned_value = (not any(Lf_failed), list(Lf_failed.loc[Lf_failed].index))206207 if as_assertion:208 assert returned_value[0], returned_value[1]209210 if getdf:211 returned_value += (pd.DataFrame({'x': x, 'y': y}), )212 return returned_value213214215 def sortxy(x, y, ascending=True):216 """Given vectors x and y (as iterables that are ordered and 1:1), sortxy() sorts these vectors in ascending217 or descending order (via param 'ascending'), and returns them in a DataFrame with a range index. No check is218 done if x has repeating values."""219 # todo consider keeping the original index, if ever necessary220221 # ----------------------------------------------------------------------- Checks222 length = len(x)223 if length != len(y):224 raise ValueError("Vectors x and y have to be of the same length.")225226 # ----------------------------------------------------------------------- Convert to Series227 # Range index here stores the mapping x --> y228 Serx = pd.Series(data=(pt for pt in x))229 Sery = pd.Series(data=(pt for pt in y))230231 # ----------------------------------------------------------------------- Sort and return df232 Serx_sorted = Serx.sort_values(ascending=ascending)233234 return pd.DataFrame({235 'x': Serx_sorted.values,236 'y': Sery.loc[Serx_sorted.index].values,237 })238239240# -------------------------------------------- numpy ------------------------------------------------ #241if _control['numpy']:242 def clip(f, lb=None, ub=None):243 """Return a clipped version of callable f(). This is the counterpart of numpy.clip(),244 which only works on explicit arrays, for callables.245246 lb and ub follow the same requirements as in their counterparts in numpy.clip().247 """248 if ub < lb:249 raise ValueError("ub < lb")250 return lambda x: np.clip(f(x), lb, ub)251252253 def apply_f_every_n(arr, n, func=np.sum, res_dtype='f8'):254 """Apply function func() to every n elements of array. The length of array arr must be a multiple of n,255 but this is not checked (last operation would fall short of elements w/o raising an exception).256257258 sample usage:259 # Downscale the resolution by half, while getting the mean of the data points260 apply_f_every_n(ser.values, 2, np.mean)261262 # You can use this via DataFrame.apply(). Here, we are changing a 24-h, half-hourly dataset into an hourly one.263 df_24h_hh.apply(apply_f_every_n, axis=0, args=(2, np.mean))264 """265 start_idxs = np.arange(0, arr.shape[0], n)266267 return np.fromiter((func(arr[idx:idx + n]) for idx in start_idxs), dtype=res_dtype)268269270# -------------------------------------------- Uncategorized Python ------------------------------------------------ #271def get_dictvals(mydict):272 """This function recursively gets all non-dict items in a hierarchical dict data structure. The order of the items have no intended meaning.273274 e.g.275 myheir = {276 0: '0.0',277 1: '0.1',278 2: {0: '1.0', 1: '1.1'},279 3: {0: '1.2', 1: {0: '2.0', 1: '2.1', 2: {0: '3.0'}, 3:'2.2'}},280 4: {0: '1.3', 1: '1.4'},281 5: '0.2',282 }283284 get_dictvals(myheir)285 >> ['0.0', '0.1', '1.0', '1.1', '1.2', '2.0', '2.1', '3.0', '2.2', '1.3', '1.4', '0.2']286287 """288 values = []289290 for val in mydict.values():291 if isinstance(val, dict):292 values.extend(get_dictvals(val))293 else:294 values.append(val)295 return values296297298299# -------------------------------------------- Tuple Arithmetic ------------------------------------------------ #300# FOR DEVELOPMENT:301# 1) Sequences of tuples should be contained in sets.302#303304def totuple(item):305 if isinstance(item, (str, int)):306 return tuple([item])307 else:308 return tuple(item)309310311def tupsum(*addends, strAsOne=True):312 """313 Performs a tuple addition on an indefinite number of tuples. Arguments must be tuple-coercibles (they must be314 iterables or iterators), or scalars (i.e. tuple([arg]) succeeds).315316 Returns:317 A tuple of the tuple sum of the addends.318319 DEVELOPER'S CORNER:320 Tuples support concatenation, ie (1) + (0) = (1,0), which is really what tupple addition is in the context of321 the Cartesian product.322323 tuple() returns an empty tuple, which guarantees that 'sum' will support concatenation.324325 If an addend is not a tuple, tuple() coerces it (same elements).326 If an addend cannot be coerced directly, then it is expected to be a scalar (usu. numeric or string),327 and thus containing it via tuple([a]) should work.328 """329 sum = tuple()330331 for a in addends:332 if type(a) is str and strAsOne:333 a = tuple([a])334 try:335 sum = sum + tuple(a)336 except TypeError:337 sum = sum + tuple([a])338339 return sum340341342def tupadd(a, b):343 """344 Performs a tuple addition of a and b.345346 Args: a and b can either be 1) scalars (i.e. single coordinate) or 2) elementary tuples347348 Any argument that is a tuple (assumed elementary; i.e. all its elements are scalars) is unpacked as can be seen349 below.350351 As far as the author knows, this function cannot be extended to an arbitrary number of addends (by taking a352 var-positional parameter), because the statement a = *a is not allowed. To sum an indefinite number of addends,353 one way to achieve this is by looping every addend pair, much like how tuppi works.354355 Returns:356 The tuple sum of a and b (GUARANTEED elementary tuple if a and b are elementary tuples or scalars).357 """358 if type(a) == tuple and type(b) == tuple:359 return tuple([*a, *b])360 elif type(a) == tuple:361 return tuple([*a, b])362 elif type(b) == tuple:363 return tuple([a, *b])364 else:365 return tuple([a, b])366367368def tuppi(*factors):369 """370 Performs a Cartesian product of all of its factors.371372 Args:373 *factors: An iterable of tuple-coercibles. If you want a 1-dim tuple, pass it as (a,) and not a.374375 Returns:376 The Cartesian product as a tuple.377378 DEVELOPER'S CORNER:379 If the factors are all elementary tuples or scalars, the product is GUARANTEED to be an elementary tuple.380 Even if after every iteration of the for factor .. loop, product is contained in a tuple, this container is381 "opened up" by the same generator expression come next iteration. The container is opened and new (and more)382 elements are repacked in it.383384 """385 # Require that factors is an iterable of tuple-coercibles386 tup_factors = [tuple(i) for i in factors]387388 # init product to 1st factor * 1389 product = tup_factors[0]390391 for factor in tup_factors[1:]:392 product = tuple(tupadd(a, b) for a in product for b in factor)393 # RHS is precisely the def'n of the Cartesian Product, product x factor394395 return product396397398def tupsumproduct(tree):399 """400 This function performs a sum of products.401402 Args:403 tree: dictionary of {iterable of parents : iterable of children}404405 Returns:406 The tuple sum of products as a tuple, i.e., sum( keys x values )407408409 DEVELOPER'S CORNER:410 This generator expression will iterate through all its for clauses, before stringing them together.411 As tupadd() is guaranteed to return elementary tuples (so long as arguments are elementary tuples or scalars),412 the generator produces a sequence of elementary tuples, which are then finally packed in the tuple() constructor.413 I could have used a list(), but using tuple() is safe as tuple(any_tuple) == any_tuple. The external414 container type can be any sequence. I just chose tuples for both this function and tuppi so that the415 containers are immutable, if you want to use them directlty.416417 """418 prm_tuples = {totuple(key): totuple(val) for key, val in tree.items()}419 return tuple(tupadd(parent, child) for key, val in prm_tuples.items() for parent in key for child in val)420 # SUM (of heterogenous key:val mappings) PRODUCTS (key x val)421422423
...
core.py
Source:core.py
...84 header = "given\nX = %s\n and\nY = %s\n%s" % params85 return yellow(header).strip()86 def get_assertion(self, X, Y):87 return AssertionError(self.get_header(X, Y, self))88 def as_assertion(self, X, Y):89 raise self.get_assertion(X, Y)90class DeepComparison(object):91 def __init__(self, X, Y, epsilon=None, parent=None):92 self.operands = X, Y93 self.epsilon = epsilon94 self.parent = parent95 self._context = None96 def is_simple(self, obj):97 return isinstance(obj, (98 string_types, integer_types99 ))100 def compare_complex_stuff(self, X, Y):101 kind = type(X)102 mapping = {103 float: self.compare_floats,104 dict: self.compare_dicts,105 list: self.compare_iterables,106 tuple: self.compare_iterables,107 }108 return mapping.get(kind, self.compare_generic)(X, Y)109 def compare_generic(self, X, Y):110 c = self.get_context()111 if X == Y:112 return True113 else:114 m = 'X%s != Y%s' % (red(c.current_X_keys), green(c.current_Y_keys))115 return DeepExplanation(m)116 def compare_floats(self, X, Y):117 c = self.get_context()118 if self.epsilon is None:119 return self.compare_generic(X, Y)120 if abs(X - Y) <= self.epsilon:121 return True122 else:123 m = 'X%s±%s != Y%s±%s' % (red(c.current_X_keys), self.epsilon, green(c.current_Y_keys), self.epsilon)124 return DeepExplanation(m)125 def compare_dicts(self, X, Y):126 c = self.get_context()127 x_keys = list(sorted(X.keys()))128 y_keys = list(sorted(Y.keys()))129 diff_x = list(set(x_keys).difference(set(y_keys)))130 diff_y = list(set(y_keys).difference(set(x_keys)))131 if diff_x:132 msg = "X%s has the key %%r whereas Y%s does not" % (133 red(c.current_X_keys),134 green(c.current_Y_keys),135 ) % diff_x[0]136 return DeepExplanation(msg)137 elif diff_y:138 msg = "X%s does not have the key %%r whereas Y%s has it" % (139 red(c.current_X_keys),140 green(c.current_Y_keys),141 ) % diff_y[0]142 return DeepExplanation(msg)143 elif X == Y:144 return True145 else:146 for key_X, key_Y in zip(x_keys, y_keys):147 self.key_X = key_X148 self.key_Y = key_Y149 value_X = X[key_X]150 value_Y = Y[key_Y]151 child = DeepComparison(152 value_X,153 value_Y,154 epsilon=self.epsilon,155 parent=self,156 ).compare()157 if isinstance(child, DeepExplanation):158 return child159 def get_context(self):160 if self._context:161 return self._context162 X_keys = []163 Y_keys = []164 comp = self165 while comp.parent:166 X_keys.insert(0, comp.parent.key_X)167 Y_keys.insert(0, comp.parent.key_Y)168 comp = comp.parent169 def get_keys(i):170 if not i:171 return ''172 return '[%s]' % ']['.join(map(safe_repr, i))173 class ComparisonContext:174 current_X_keys = get_keys(X_keys)175 current_Y_keys = get_keys(Y_keys)176 parent = comp177 self._context = ComparisonContext()178 return self._context179 def compare_iterables(self, X, Y):180 len_X, len_Y = map(len, (X, Y))181 if len_X > len_Y:182 msg = "X has %d items whereas Y has only %d" % (len_X, len_Y)183 return DeepExplanation(msg)184 elif len_X < len_Y:185 msg = "Y has %d items whereas X has only %d" % (len_Y, len_X)186 return DeepExplanation(msg)187 elif X == Y:188 return True189 else:190 for i, (value_X, value_Y) in enumerate(zip(X, Y)):191 self.key_X = self.key_Y = i192 child = DeepComparison(193 value_X,194 value_Y,195 epsilon=self.epsilon,196 parent=self,197 ).compare()198 if isinstance(child, DeepExplanation):199 return child200 def compare(self):201 X, Y = self.operands202 if isinstance(X, mock._CallList):203 X = list(X)204 if isinstance(Y, mock._CallList):205 X = list(Y)206 c = self.get_context()207 if self.is_simple(X) and self.is_simple(Y): # both simple208 if X == Y:209 return True210 c = self.get_context()211 m = "X%s is %%r whereas Y%s is %%r"212 msg = m % (red(c.current_X_keys), green(c.current_Y_keys)) % (X, Y)213 return DeepExplanation(msg)214 elif type(X) is not type(Y): # different types215 xname, yname = map(lambda _: type(_).__name__, (X, Y))216 msg = 'X%s is a %%s and Y%s is a %%s instead' % (217 red(c.current_X_keys),218 green(c.current_Y_keys),219 ) % (xname, yname)220 exp = DeepExplanation(msg)221 else:222 exp = self.compare_complex_stuff(X, Y)223 if isinstance(exp, DeepExplanation):224 original_X, original_Y = c.parent.operands225 raise exp.as_assertion(original_X, original_Y)226 return exp227 def explanation(self):228 return self._explanation229def _get_file_name(func):230 try:231 name = inspect.getfile(func)232 except AttributeError:233 name = get_function_code(func).co_filename234 return os.path.abspath(name)235def _get_line_number(func):236 try:237 return inspect.getlineno(func)238 except AttributeError:239 return get_function_code(func).co_firstlineno...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!