Best Python code snippet using tempest_python
fast.py
Source:fast.py
1"""2Basically a parser that is faster, because it tries to parse only parts and if3anything changes, it only reparses the changed parts. But because it's not4finished (and still not working as I want), I won't document it any further.5"""6import re7from itertools import chain8from jedi._compatibility import use_metaclass9from jedi import settings10from jedi.parser import Parser11from jedi.parser import tree12from jedi import cache13from jedi import debug14from jedi.parser.tokenize import (source_tokens, NEWLINE,15 ENDMARKER, INDENT, DEDENT)16FLOWS = 'if', 'else', 'elif', 'while', 'with', 'try', 'except', 'finally', 'for'17class FastModule(tree.Module):18 type = 'file_input'19 def __init__(self, module_path):20 super(FastModule, self).__init__([])21 self.modules = []22 self.reset_caches()23 self.names_dict = {}24 self.path = module_path25 def reset_caches(self):26 self.modules = []27 try:28 del self._used_names # Remove the used names cache.29 except AttributeError:30 pass # It was never used.31 @property32 @cache.underscore_memoization33 def used_names(self):34 return MergedNamesDict([m.used_names for m in self.modules])35 @property36 def global_names(self):37 return [name for m in self.modules for name in m.global_names]38 @property39 def error_statement_stacks(self):40 return [e for m in self.modules for e in m.error_statement_stacks]41 def __repr__(self):42 return "<fast.%s: %s@%s-%s>" % (type(self).__name__, self.name,43 self.start_pos[0], self.end_pos[0])44 # To avoid issues with with the `parser.Parser`, we need setters that do45 # nothing, because if pickle comes along and sets those values.46 @global_names.setter47 def global_names(self, value):48 pass49 @error_statement_stacks.setter50 def error_statement_stacks(self, value):51 pass52 @used_names.setter53 def used_names(self, value):54 pass55class MergedNamesDict(object):56 def __init__(self, dicts):57 self.dicts = dicts58 def __iter__(self):59 return iter(set(key for dct in self.dicts for key in dct))60 def __getitem__(self, value):61 return list(chain.from_iterable(dct.get(value, []) for dct in self.dicts))62 def items(self):63 dct = {}64 for d in self.dicts:65 for key, values in d.items():66 try:67 dct_values = dct[key]68 dct_values += values69 except KeyError:70 dct[key] = list(values)71 return dct.items()72 def values(self):73 lst = []74 for dct in self.dicts:75 lst += dct.values()76 return lst77class CachedFastParser(type):78 """ This is a metaclass for caching `FastParser`. """79 def __call__(self, grammar, source, module_path=None):80 if not settings.fast_parser:81 return Parser(grammar, source, module_path)82 pi = cache.parser_cache.get(module_path, None)83 if pi is None or isinstance(pi.parser, Parser):84 p = super(CachedFastParser, self).__call__(grammar, source, module_path)85 else:86 p = pi.parser # pi is a `cache.ParserCacheItem`87 p.update(source)88 return p89class ParserNode(object):90 def __init__(self, fast_module, parser, source):91 self._fast_module = fast_module92 self.parent = None93 self._node_children = []94 self.source = source95 self.hash = hash(source)96 self.parser = parser97 try:98 # With fast_parser we have either 1 subscope or only statements.99 self._content_scope = parser.module.subscopes[0]100 except IndexError:101 self._content_scope = parser.module102 else:103 self._rewrite_last_newline()104 # We need to be able to reset the original children of a parser.105 self._old_children = list(self._content_scope.children)106 def _rewrite_last_newline(self):107 """108 The ENDMARKER can contain a newline in the prefix. However this prefix109 really belongs to the function - respectively to the next function or110 parser node. If we don't rewrite that newline, we end up with a newline111 in the wrong position, i.d. at the end of the file instead of in the112 middle.113 """114 c = self._content_scope.children115 if tree.is_node(c[-1], 'suite'): # In a simple_stmt there's no DEDENT.116 end_marker = self.parser.module.children[-1]117 # Set the DEDENT prefix instead of the ENDMARKER.118 c[-1].children[-1].prefix = end_marker.prefix119 end_marker.prefix = ''120 def __repr__(self):121 module = self.parser.module122 try:123 return '<%s: %s-%s>' % (type(self).__name__, module.start_pos, module.end_pos)124 except IndexError:125 # There's no module yet.126 return '<%s: empty>' % type(self).__name__127 def reset_node(self):128 """129 Removes changes that were applied in this class.130 """131 self._node_children = []132 scope = self._content_scope133 scope.children = list(self._old_children)134 try:135 # This works if it's a MergedNamesDict.136 # We are correcting it, because the MergedNamesDicts are artificial137 # and can change after closing a node.138 scope.names_dict = scope.names_dict.dicts[0]139 except AttributeError:140 pass141 def close(self):142 """143 Closes the current parser node. This means that after this no further144 nodes should be added anymore.145 """146 # We only need to replace the dict if multiple dictionaries are used:147 if self._node_children:148 dcts = [n.parser.module.names_dict for n in self._node_children]149 # Need to insert the own node as well.150 dcts.insert(0, self._content_scope.names_dict)151 self._content_scope.names_dict = MergedNamesDict(dcts)152 def parent_until_indent(self, indent=None):153 if (indent is None or self._indent >= indent) and self.parent is not None:154 self.close()155 return self.parent.parent_until_indent(indent)156 return self157 @property158 def _indent(self):159 if not self.parent:160 return 0161 return self.parser.module.children[0].start_pos[1]162 def add_node(self, node, line_offset):163 """Adding a node means adding a node that was already added earlier"""164 # Changing the line offsets is very important, because if they don't165 # fit, all the start_pos values will be wrong.166 m = node.parser.module167 node.parser.position_modifier.line = line_offset168 self._fast_module.modules.append(m)169 node.parent = self170 self._node_children.append(node)171 # Insert parser objects into current structure. We only need to set the172 # parents and children in a good way.173 scope = self._content_scope174 for child in m.children:175 child.parent = scope176 scope.children.append(child)177 return node178 def all_sub_nodes(self):179 """180 Returns all nodes including nested ones.181 """182 for n in self._node_children:183 yield n184 for y in n.all_sub_nodes():185 yield y186 @cache.underscore_memoization # Should only happen once!187 def remove_last_newline(self):188 self.parser.remove_last_newline()189class FastParser(use_metaclass(CachedFastParser)):190 _FLOWS_NEED_SPACE = 'if', 'elif', 'while', 'with', 'except', 'for'191 _FLOWS_NEED_COLON = 'else', 'try', 'except', 'finally'192 _keyword_re = re.compile('^[ \t]*(def |class |@|(?:%s)|(?:%s)\s*:)'193 % ('|'.join(_FLOWS_NEED_SPACE),194 '|'.join(_FLOWS_NEED_COLON)))195 def __init__(self, grammar, source, module_path=None):196 # set values like `tree.Module`.197 self._grammar = grammar198 self.module_path = module_path199 self._reset_caches()200 self.update(source)201 def _reset_caches(self):202 self.module = FastModule(self.module_path)203 self.current_node = ParserNode(self.module, self, '')204 def update(self, source):205 # For testing purposes: It is important that the number of parsers used206 # can be minimized. With these variables we can test against that.207 self.number_parsers_used = 0208 self.number_of_splits = 0209 self.number_of_misses = 0210 self.module.reset_caches()211 try:212 self._parse(source)213 except:214 # FastParser is cached, be careful with exceptions.215 self._reset_caches()216 raise217 def _split_parts(self, source):218 """219 Split the source code into different parts. This makes it possible to220 parse each part seperately and therefore cache parts of the file and221 not everything.222 """223 def gen_part():224 text = ''.join(current_lines)225 del current_lines[:]226 self.number_of_splits += 1227 return text228 def just_newlines(current_lines):229 for line in current_lines:230 line = line.lstrip('\t \n\r')231 if line and line[0] != '#':232 return False233 return True234 # Split only new lines. Distinction between \r\n is the tokenizer's235 # job.236 # It seems like there's no problem with form feed characters here,237 # because we're not counting lines.238 self._lines = source.splitlines(True)239 current_lines = []240 is_decorator = False241 # Use -1, because that indent is always smaller than any other.242 indent_list = [-1, 0]243 new_indent = False244 parentheses_level = 0245 flow_indent = None246 previous_line = None247 # All things within flows are simply being ignored.248 for i, l in enumerate(self._lines):249 # Handle backslash newline escaping.250 if l.endswith('\\\n') or l.endswith('\\\r\n'):251 if previous_line is not None:252 previous_line += l253 else:254 previous_line = l255 continue256 if previous_line is not None:257 l = previous_line + l258 previous_line = None259 # check for dedents260 s = l.lstrip('\t \n\r')261 indent = len(l) - len(s)262 if not s or s[0] == '#':263 current_lines.append(l) # Just ignore comments and blank lines264 continue265 if new_indent:266 if indent > indent_list[-2]:267 # Set the actual indent, not just the random old indent + 1.268 indent_list[-1] = indent269 new_indent = False270 while indent <= indent_list[-2]: # -> dedent271 indent_list.pop()272 # This automatically resets the flow_indent if there was a273 # dedent or a flow just on one line (with one simple_stmt).274 new_indent = False275 if flow_indent is None and current_lines and not parentheses_level:276 yield gen_part()277 flow_indent = None278 # Check lines for functions/classes and split the code there.279 if flow_indent is None:280 m = self._keyword_re.match(l)281 if m:282 # Strip whitespace and colon from flows as a check.283 if m.group(1).strip(' \t\r\n:') in FLOWS:284 if not parentheses_level:285 flow_indent = indent286 else:287 if not is_decorator and not just_newlines(current_lines):288 yield gen_part()289 is_decorator = '@' == m.group(1)290 if not is_decorator:291 parentheses_level = 0292 # The new indent needs to be higher293 indent_list.append(indent + 1)294 new_indent = True295 elif is_decorator:296 is_decorator = False297 parentheses_level = \298 max(0, (l.count('(') + l.count('[') + l.count('{')299 - l.count(')') - l.count(']') - l.count('}')))300 current_lines.append(l)301 if current_lines:302 yield gen_part()303 def _parse(self, source):304 """ :type source: str """305 added_newline = False306 if not source or source[-1] != '\n':307 # To be compatible with Pythons grammar, we need a newline at the308 # end. The parser would handle it, but since the fast parser abuses309 # the normal parser in various ways, we need to care for this310 # ourselves.311 source += '\n'312 added_newline = True313 next_line_offset = line_offset = 0314 start = 0315 nodes = list(self.current_node.all_sub_nodes())316 # Now we can reset the node, because we have all the old nodes.317 self.current_node.reset_node()318 last_end_line = 1319 for code_part in self._split_parts(source):320 next_line_offset += code_part.count('\n')321 # If the last code part parsed isn't equal to the current end_pos,322 # we know that the parser went further (`def` start in a323 # docstring). So just parse the next part.324 if line_offset + 1 == last_end_line:325 self.current_node = self._get_node(code_part, source[start:],326 line_offset, nodes)327 else:328 # Means that some lines where not fully parsed. Parse it now.329 # This is a very rare case. Should only happens with very330 # strange code bits.331 self.number_of_misses += 1332 while last_end_line < next_line_offset + 1:333 line_offset = last_end_line - 1334 # We could calculate the src in a more complicated way to335 # make caching here possible as well. However, this is336 # complicated and error-prone. Since this is not very often337 # called - just ignore it.338 src = ''.join(self._lines[line_offset:])339 self.current_node = self._get_node(code_part, src,340 line_offset, nodes)341 last_end_line = self.current_node.parser.module.end_pos[0]342 debug.dbg('While parsing %s, line %s slowed down the fast parser.',343 self.module_path, line_offset + 1)344 line_offset = next_line_offset345 start += len(code_part)346 last_end_line = self.current_node.parser.module.end_pos[0]347 if added_newline:348 self.current_node.remove_last_newline()349 # Now that the for loop is finished, we still want to close all nodes.350 self.current_node = self.current_node.parent_until_indent()351 self.current_node.close()352 debug.dbg('Parsed %s, with %s parsers in %s splits.'353 % (self.module_path, self.number_parsers_used,354 self.number_of_splits))355 def _get_node(self, source, parser_code, line_offset, nodes):356 """357 Side effect: Alters the list of nodes.358 """359 indent = len(source) - len(source.lstrip('\t '))360 self.current_node = self.current_node.parent_until_indent(indent)361 h = hash(source)362 for index, node in enumerate(nodes):363 if node.hash == h and node.source == source:364 node.reset_node()365 nodes.remove(node)366 break367 else:368 tokenizer = FastTokenizer(parser_code)369 self.number_parsers_used += 1370 p = Parser(self._grammar, parser_code, self.module_path, tokenizer=tokenizer)371 end = line_offset + p.module.end_pos[0]372 used_lines = self._lines[line_offset:end - 1]373 code_part_actually_used = ''.join(used_lines)374 node = ParserNode(self.module, p, code_part_actually_used)375 self.current_node.add_node(node, line_offset)376 return node377class FastTokenizer(object):378 """379 Breaks when certain conditions are met, i.e. a new function or class opens.380 """381 def __init__(self, source):382 self.source = source383 self._gen = source_tokens(source)384 self._closed = False385 # fast parser options386 self.current = self.previous = NEWLINE, '', (0, 0)387 self._in_flow = False388 self._is_decorator = False389 self._first_stmt = True390 self._parentheses_level = 0391 self._indent_counter = 0392 self._flow_indent_counter = 0393 self._returned_endmarker = False394 self._expect_indent = False395 def __iter__(self):396 return self397 def next(self):398 """ Python 2 Compatibility """399 return self.__next__()400 def __next__(self):401 if self._closed:402 return self._finish_dedents()403 typ, value, start_pos, prefix = current = next(self._gen)404 if typ == ENDMARKER:405 self._closed = True406 self._returned_endmarker = True407 return current408 self.previous = self.current409 self.current = current410 if typ == INDENT:411 self._indent_counter += 1412 if not self._expect_indent and not self._first_stmt and not self._in_flow:413 # This does not mean that there is an actual flow, it means414 # that the INDENT is syntactically wrong.415 self._flow_indent_counter = self._indent_counter - 1416 self._in_flow = True417 self._expect_indent = False418 elif typ == DEDENT:419 self._indent_counter -= 1420 if self._in_flow:421 if self._indent_counter == self._flow_indent_counter:422 self._in_flow = False423 else:424 self._closed = True425 return current426 if value in ('def', 'class') and self._parentheses_level \427 and re.search(r'\n[ \t]*\Z', prefix):428 # Account for the fact that an open parentheses before a function429 # will reset the parentheses counter, but new lines before will430 # still be ignored. So check the prefix.431 # TODO what about flow parentheses counter resets in the tokenizer?432 self._parentheses_level = 0433 return self._close()434 # Parentheses ignore the indentation rules. The other three stand for435 # new lines.436 if self.previous[0] in (NEWLINE, INDENT, DEDENT) \437 and not self._parentheses_level and typ not in (INDENT, DEDENT):438 if not self._in_flow:439 if value in FLOWS:440 self._flow_indent_counter = self._indent_counter441 self._first_stmt = False442 elif value in ('def', 'class', '@'):443 # The values here are exactly the same check as in444 # _split_parts, but this time with tokenize and therefore445 # precise.446 if not self._first_stmt and not self._is_decorator:447 return self._close()448 self._is_decorator = '@' == value449 if not self._is_decorator:450 self._first_stmt = False451 self._expect_indent = True452 elif self._expect_indent:453 return self._close()454 else:455 self._first_stmt = False456 if value in '([{' and value:457 self._parentheses_level += 1458 elif value in ')]}' and value:459 # Ignore closing parentheses, because they are all460 # irrelevant for the indentation.461 self._parentheses_level = max(self._parentheses_level - 1, 0)462 return current463 def _close(self):464 if self._first_stmt:465 # Continue like nothing has happened, because we want to enter466 # the first class/function.467 if self.current[1] != '@':468 self._first_stmt = False469 return self.current470 else:471 self._closed = True472 return self._finish_dedents()473 def _finish_dedents(self):474 if self._indent_counter:475 self._indent_counter -= 1476 return DEDENT, '', self.current[2], ''477 elif not self._returned_endmarker:478 self._returned_endmarker = True479 return ENDMARKER, '', self.current[2], self._get_prefix()480 else:481 raise StopIteration482 def _get_prefix(self):483 """484 We're using the current prefix for the endmarker to not loose any485 information. However we care about "lost" lines. The prefix of the486 current line (indent) will always be included in the current line.487 """488 cur = self.current489 while cur[0] == DEDENT:490 cur = next(self._gen)491 prefix = cur[3]492 # \Z for the end of the string. $ is bugged, because it has the493 # same behavior with or without re.MULTILINE....
help.py
Source:help.py
1import inspect2import logging3import autobot4LOG = logging.getLogger(__name__)5class HelpPlugin(autobot.Plugin):6 def __init__(self, factory):7 super().__init__(factory)8 self.docs = {9 'events': None,10 'plugins': {}11 }12 @autobot.subscribe_to(autobot.event.ALL_PLUGINS_LOADED)13 def _load_events(self, context, event_args):14 self.docs['events'] = autobot.event15 @autobot.subscribe_to(autobot.event.ALL_PLUGINS_LOADED)16 def _load_plugin_docs(self, context, event_args):17 self.docs['plugins'] = {}18 plugin_classes = event_args['plugins']19 LOG.debug('Loading help for classes: %s', plugin_classes.keys())20 for plugin_class in plugin_classes.values():21 docs = _PluginDoc(plugin_class)22 if docs:23 self.docs['plugins'][docs.plugin_name] = docs24 @autobot.respond_to(r'^(H|h)elp')25 def print_user_help(self, message):26 msg = 'General Information\n'27 for plugin in self.docs['plugins'].values():28 msg += '%s\n%s' % (29 plugin.plugin_name,30 plugin.plugin_help31 )32 if plugin.method_help:33 msg += '\nMethods:\n'34 for m_help in plugin.method_help:35 msg += '%s\nPatterns: %s\n%s' % (36 m_help['name'],37 ', '.join(m_help['patterns']),38 m_help['help']39 )40 message.reply(msg)41 @autobot.respond_to(r'^dev(eloper)? help')42 def print_developer_help(self, message):43 message.reply(repr(self.docs))44class _PluginDoc(object):45 def __init__(self, cls):46 self.plugin_name = cls.__class__.__name__.replace('Plugin', '')47 self.plugin_help = cls.__doc__48 if self.plugin_help:49 self.plugin_help = self.plugin_help.strip()50 self.method_help = self._parse_methods(cls.__class__)51 def _parse_methods(self, cls):52 method_help = []53 for _, method in inspect.getmembers(cls, predicate=inspect.isfunction):54 # Checking for _is_decorator means only including decorated methods55 if method.__doc__ and hasattr(method, '_is_decorator'):56 LOG.debug('Found method %s with help text!', method.__name__)57 # Find all patterns that executes the method58 objs = method._callback_objects59 matchers = [m for m in objs if isinstance(m, autobot.Matcher)]60 patterns = [m.pattern for m in matchers if method == m._func]61 method_help.append({62 'name': method.__name__,63 'patterns': patterns,64 'help': method.__doc__.strip()})65 return method_help66 def __repr__(self):67 repr_dict = {68 'plugin_name': self.plugin_name,69 'plugin_help': self.plugin_help,70 'method_help': self.method_help71 }72 return repr(repr_dict)73 def __bool__(self):...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!