Best Python code snippet using avocado_python
runtime.py
Source:runtime.py
1from copy import deepcopy2from enum import Enum3from itertools import chain4from avocado.core.dispatcher import TestPreDispatcher5from avocado.core.nrunner.task import Task6from avocado.core.test_id import TestID7from avocado.core.varianter import dump_variant8class RuntimeTaskStatus(Enum):9 WAIT_DEPENDENCIES = "WAITING DEPENDENCIES"10 WAIT = "WAITING"11 FINISHED = "FINISHED"12 TIMEOUT = "FINISHED TIMEOUT"13 IN_CACHE = "FINISHED IN CACHE"14 FAILFAST = "FINISHED FAILFAST"15 FAIL_TRIAGE = "FINISHED WITH FAILURE ON TRIAGE"16 FAIL_START = "FINISHED FAILING TO START"17 STARTED = "STARTED"18 @staticmethod19 def finished_statuses():20 return [21 status22 for _, status in RuntimeTaskStatus.__members__.items()23 if "FINISHED" in status.value24 ]25class RuntimeTask:26 """Task with extra status information on its life cycle status.27 The :class:`avocado.core.nrunner.Task` class contains information28 that is necessary to describe its persistence and execution by itself.29 This class wraps a :class:`avocado.core.nrunner.Task`, with extra30 information about its execution by a spawner within a state machine.31 """32 def __init__(self, task):33 """Instantiates a new RuntimeTask.34 :param task: The task to keep additional information about35 :type task: :class:`avocado.core.nrunner.Task`36 """37 #: The :class:`avocado.core.nrunner.Task`38 self.task = task39 #: Additional descriptive information about the task status40 self.status = None41 #: Information about task result when it is finished42 self.result = None43 #: Timeout limit for the completion of the task execution44 self.execution_timeout = None45 #: A handle that may be set by a spawner, and that may be46 #: spawner implementation specific, to keep track the task47 #: execution. This may be a PID, a container ID, a FQDN+PID48 #: etc.49 self.spawner_handle = None50 #: The result of the spawning of a Task51 self.spawning_result = None52 self.dependencies = []53 def __repr__(self):54 if self.status is None:55 return f'<RuntimeTask Task Identifier: "{self.task.identifier}">'56 else:57 return (58 f'<RuntimeTask Task Identifier: "{self.task.identifier}" '59 f'Status: "{self.status}">'60 )61 def __hash__(self):62 return hash(self.task.identifier)63 def __eq__(self, other):64 if isinstance(other, RuntimeTask):65 return hash(self) == hash(other)66 return False67 def are_dependencies_finished(self):68 for dependency in self.dependencies:69 if dependency.status not in RuntimeTaskStatus.finished_statuses():70 return False71 return True72 def get_finished_dependencies(self):73 """Returns all dependencies which already finished."""74 return [75 dep76 for dep in self.dependencies77 if dep.status in RuntimeTaskStatus.finished_statuses()78 ]79 def can_run(self):80 if not self.are_dependencies_finished():81 return False82 for dependency in self.dependencies:83 if dependency.result != "pass":84 return False85 return True86 @classmethod87 def from_runnable(88 cls,89 runnable,90 no_digits,91 index,92 variant,93 test_suite_name=None,94 status_server_uri=None,95 job_id=None,96 ):97 """Creates runtime task for test from runnable98 :param runnable: the "description" of what the task should run.99 :type runnable: :class:`avocado.core.nrunner.Runnable`100 :param no_digits: number of digits of the test uid101 :type no_digits: int102 :param index: index of tests inside test suite103 :type index: int104 :param test_suite_name: test suite name which this test is related to105 :type test_suite_name: str106 :param status_server_uri: the URIs for the status servers that this107 task should send updates to.108 :type status_server_uri: list109 :param job_id: the ID of the job, for authenticating messages that get110 sent to the destination job's status server and will111 make into the job's results.112 :type job_id: str113 :returns: RuntimeTask of the test from runnable114 """115 # create test ID116 if test_suite_name:117 prefix = f"{test_suite_name}-{index}"118 else:119 prefix = index120 test_id = TestID(prefix, runnable.identifier, variant, no_digits)121 # inject variant on runnable122 runnable.variant = dump_variant(variant)123 # handles the test task124 task = Task(125 runnable, identifier=test_id, status_uris=status_server_uri, job_id=job_id126 )127 return cls(task)128class PreRuntimeTask(RuntimeTask):129 @classmethod130 def from_runnable(131 cls, pre_runnable, status_server_uri=None, job_id=None132 ): # pylint: disable=W0221133 """Creates runtime task for pre_test plugin from runnable134 :param pre_runnable: the "description" of what the task should run.135 :type runnable: :class:`avocado.core.nrunner.Runnable`136 :param status_server_uri: the URIs for the status servers that this137 task should send updates to.138 :type status_server_uri: list139 :param job_id: the ID of the job, for authenticating messages that get140 sent to the destination job's status server and will141 make into the job's results.142 :type job_id: str143 :returns: RuntimeTask of the test from runnable144 """145 name = f'{pre_runnable.kind}-{pre_runnable.kwargs.get("name")}'146 prefix = 0147 # the human UI works with TestID objects, so we need to148 # use it to name Task149 task_id = TestID(prefix, name)150 # creates the dependency task151 task = Task(152 pre_runnable,153 identifier=task_id,154 status_uris=status_server_uri,155 category="pre_test",156 job_id=job_id,157 )158 return cls(task)159 @classmethod160 def get_pre_tasks_from_runnable(cls, runnable, status_server_uri=None, job_id=None):161 """Creates runtime tasks for preTest task from runnable162 :param runnable: the "description" of what the task should run.163 :type runnable: :class:`avocado.core.nrunner.Runnable`164 :param status_server_uri: the URIs for the status servers that this165 task should send updates to.166 :type status_server_uri: list167 :param job_id: the ID of the job, for authenticating messages that get168 sent to the destination job's status server and will169 make into the job's results.170 :type job_id: str171 :returns: Pre RuntimeTasks of the dependencies from runnable172 :rtype: list173 """174 pre_runnables = list(175 chain.from_iterable(176 TestPreDispatcher().map_method_with_return(177 "pre_test_runnables", runnable178 )179 )180 )181 pre_test_tasks = []182 for pre_runnable in pre_runnables:183 pre_task = cls.from_runnable(pre_runnable, status_server_uri, job_id)184 pre_test_tasks.append(pre_task)185 return pre_test_tasks186class RuntimeTaskGraph:187 """Graph representing dependencies between runtime tasks."""188 def __init__(self, tests, test_suite_name, status_server_uri, job_id):189 """Instantiates a new RuntimeTaskGraph.190 From the list of tests, it will create runtime tasks and connects them191 inside the graph by its dependencies.192 :param tests: variants of runnables from test suite193 :type tests: list194 :param test_suite_name: test suite name which this test is related to195 :type test_suite_name: str196 :param status_server_uri: the URIs for the status servers that this197 task should send updates to.198 :type status_server_uri: list199 :param job_id: the ID of the job, for authenticating messages that get200 sent to the destination job's status server and will201 make into the job's results.202 :type job_id: str203 """204 self.graph = {}205 # create graph206 no_digits = len(str(len(tests)))207 for index, (runnable, variant) in enumerate(tests, start=1):208 runnable = deepcopy(runnable)209 runtime_test = RuntimeTask.from_runnable(210 runnable,211 no_digits,212 index,213 variant,214 test_suite_name,215 status_server_uri,216 job_id,217 )218 self.graph[runtime_test] = runtime_test219 # with --dry-run we don't want to run dependencies220 if runnable.kind != "dry-run":221 pre_tasks = PreRuntimeTask.get_pre_tasks_from_runnable(222 runnable, status_server_uri, job_id223 )224 if pre_tasks:225 pre_tasks.append(runtime_test)226 self._connect_tasks(pre_tasks)227 def _connect_tasks(self, tasks):228 for dependency, task in zip(tasks, tasks[1:]):229 self.graph[task] = task230 self.graph[dependency] = dependency231 task.dependencies.append(dependency)232 def get_tasks_in_topological_order(self):233 """Computes the topological order of runtime tasks in graph234 :returns: runtime tasks in topological order235 :rtype: list236 """237 def topological_order_util(vertex, visited, topological_order):238 visited[vertex] = True239 for v in vertex.dependencies:240 if not visited[v]:241 topological_order_util(v, visited, topological_order)242 topological_order.append(vertex)243 visited = dict.fromkeys(self.graph, False)244 topological_order = []245 for vertex in self.graph.values():246 if not visited[vertex]:247 topological_order_util(vertex, visited, topological_order)...
dependency.py
Source:dependency.py
...20 """21 name = "dependency"22 description = "Dependency resolver for tests with dependencies"23 @staticmethod24 def pre_test_runnables(test_runnable): # pylint: disable=W022125 if not test_runnable.dependencies:26 return []27 dependency_runnables = []28 for dependency in test_runnable.dependencies:29 # make a copy to change the dictionary and do not affect the30 # original `dependencies` dictionary from the test31 dependency_copy = dependency.copy()32 kind = dependency_copy.pop("type")33 uri = dependency_copy.pop("uri", None)34 args = dependency_copy.pop("args", ())35 dependency_runnable = Runnable(36 kind, uri, *args, config=test_runnable.config, **dependency_copy37 )38 dependency_runnables.append(dependency_runnable)...
test_dependencies_resolver.py
Source:test_dependencies_resolver.py
...11 {"type": "package", "name": "foo"},12 {"type": "package", "name": "bar"},13 ],14 )15 dependency_runnables = DependencyResolver.pre_test_runnables(runnable)16 kind = "package"17 self.assertEqual(kind, dependency_runnables[0].kind)18 self.assertEqual(kind, dependency_runnables[1].kind)19 name0 = "foo"20 name1 = "bar"21 self.assertEqual(name0, dependency_runnables[0].kwargs["name"])22 self.assertEqual(name1, dependency_runnables[1].kwargs["name"])23 self.assertIsNone(dependency_runnables[0].kwargs.get("type"))24if __name__ == "__main__":...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!