Best Python code snippet using pytest
spydist.py
Source:spydist.py
...97 trace("master: interrupted")98 getattr(conn.root, "shutdown")()99 time.sleep(5)100 os._exit(0)101 def pytest_terminal_summary(self, terminalreporter):102 debug("master: pytest_terminal_summary", terminalreporter)103class BatchSlave(object):104 def __init__(self, config, logs_path):105 self.config = config106 self.items = []107 self.logs_path = logs_path108 @pytest.mark.trylast109 def pytest_sessionstart(self, session):110 debug("slave: pytest_sessionstart", session)111 def pytest_sessionfinish(self, session):112 debug("slave: pytest_sessionfinish", session)113 @pytest.hookimpl(trylast=True)114 def pytest_collection_modifyitems(self, session, config, items):115 debug("slave: pytest_collection_modifyitems", session, config, items)116 self.items = items117 def pytest_runtestloop(self):118 def search_nodeid(entries, nodeid):119 for ent in entries:120 if nodeid == ent.nodeid:121 return ent122 return None123 def finish_test(item):124 getattr(conn.root, "finish_test")(item.nodeid)125 def get_test(entries):126 while 1:127 nodeid = getattr(conn.root, "get_test")()128 if not nodeid:129 break130 item = search_nodeid(entries, nodeid)131 if item:132 return item133 return None134 # connect to batch server135 conn = None136 for _ in range(0, 10):137 try:138 filename = os.path.join(self.logs_path, "..", "batch.server")139 lines = utils.read_lines(filename)140 port = int(lines[0])141 conn = rpyc.connect("127.0.0.1", port)142 if conn and conn.root:143 break144 time.sleep(2)145 except Exception as exp:146 print("connect to batch server", exp, filename, port)147 time.sleep(2)148 try:149 item_list = []150 # wait for master ready151 is_ready = getattr(conn.root, "is_ready")152 while not is_ready(os.getpid()):153 trace("slave: waiting for master")154 time.sleep(2)155 # get first item156 item = get_test(self.items)157 if item:158 item_list.append(item)159 while 1:160 # check if there is some thing to do161 if not item_list:162 break163 # get next item164 item = get_test(self.items)165 if item:166 item_list.append(item)167 # get the item and next for the current execution168 [item, nextitem] = [item_list.pop(0), None]169 if item_list:170 nextitem = item_list[-1]171 debug("slave: pytest_runtestloop", item, nextitem)172 self.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)173 finish_test(item)174 except KeyboardInterrupt:175 trace("slave: interrupted")176 conn.close()177 trace("")178 os._exit(0)179 def pytest_terminal_summary(self, terminalreporter):180 debug("slave: pytest_terminal_summary", terminalreporter)181def get_impl_type():182 return 0 # not yet supported183 #new_bach_run = env.get("SPYTEST_BATCH_RUN_NEW")184 #if new_bach_run == "2":185 #return 2186 #return 1 if bool(new_bach_run) else 0187def shutdown():188 if get_impl_type() == 0:189 return190 if wa.server: wa.server.stop()191 if wa.service: wa.service.close()192def slave_main(index, testbed_file, logs_path):193 os.environ["PYTEST_XDIST_WORKER"] = str(index)...
test_plugin.py
Source:test_plugin.py
...62 options = parser.getgroup("terminal reporting").options63 assert options[0].names() == ["--timer-top-n"]64 assert options[1].names() == ["--timer-no-color"]65 assert options[2].names() == ["--timer-filter"]66 def test_pytest_terminal_summary(self, mocker, tr_mock):67 plugin.pytest_terminal_summary(terminalreporter=tr_mock)68 tr_mock.write_line.assert_has_calls(69 [70 mocker.call("[success] 60.08% 1: 3.0100s"),71 mocker.call("[success] 20.16% 2: 1.0100s"),72 mocker.call("[success] 19.76% 3: 0.9900s"),73 ]74 )75 def test_pytest_terminal_summary_with_timer_top_n(self, mocker, tr_mock):76 tr_mock.config.option.timer_top_n = 177 plugin.pytest_terminal_summary(terminalreporter=tr_mock)78 tr_mock.write_line.assert_has_calls(79 [mocker.call("[success] 60.08% 1: 3.0100s")]80 )81 def test_pytest_terminal_summary_with_timer_filter_error(self, mocker, tr_mock):82 tr_mock.config.option.timer_filter = "error"83 plugin.pytest_terminal_summary(terminalreporter=tr_mock)84 tr_mock.write_line.assert_has_calls(85 [mocker.call("[success] 60.08% 1: 3.0100s")]86 )87 def test_pytest_terminal_summary_with_timer_filter_warning(self, mocker, tr_mock):88 tr_mock.config.option.timer_filter = "warning"89 plugin.pytest_terminal_summary(terminalreporter=tr_mock)90 tr_mock.write_line.assert_has_calls(91 [mocker.call("[success] 20.16% 2: 1.0100s")]92 )93 def test_pytest_terminal_summary_with_timer_filter_error_warning(94 self, mocker, tr_mock95 ):96 tr_mock.config.option.timer_filter = "error,warning"97 plugin.pytest_terminal_summary(terminalreporter=tr_mock)98 tr_mock.write_line.assert_has_calls(99 [100 mocker.call("[success] 60.08% 1: 3.0100s"),101 mocker.call("[success] 20.16% 2: 1.0100s"),102 ]103 )104 def test_pytest_terminal_summary_with_user_warning(self, mocker, tr_mock):105 warnings.warn("Test Warning to be used in tests")106 plugin.pytest_terminal_summary(terminalreporter=tr_mock)107 tr_mock.write_line.assert_has_calls(108 [109 mocker.call("[success] 60.08% 1: 3.0100s"),110 mocker.call("[success] 20.16% 2: 1.0100s"),111 mocker.call("[success] 19.76% 3: 0.9900s"),112 ]...
test_terminal_report.py
Source:test_terminal_report.py
...38 def test_TerminalSummary_NoErrorsNoFailures_EmptyReport(self, mock_add_commands):39 mock_report = mock.MagicMock()40 mock_report.stats.get.return_value = []41 mock_config = mock.MagicMock()42 terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)43 mock_add_commands.assert_not_called()44 mock_report.config.getoption.assert_not_called()45 mock_report.section.assert_not_called()46 @mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands')47 def test_TerminalSummary_ErrorsAndFailures_SectionsAdded(self, mock_add_commands):48 mock_report = mock.MagicMock()49 mock_node = mock.MagicMock()50 mock_node.nodeid = 'something'51 mock_report.stats.get.return_value = [mock_node, mock_node]52 mock_config = mock.MagicMock()53 terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)54 assert len(mock_add_commands.mock_calls) == 255 mock_report.config.getoption.assert_called()56 mock_report.section.assert_called_once()57 @mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands', mock.MagicMock())58 @mock.patch('os.path.basename')59 def test_TerminalSummary_Failures_CallsWithBasename(self, mock_basename):60 mock_report = mock.MagicMock()61 mock_node = mock.MagicMock()62 mock_base = 'something'63 node_id = os.path.join('C:', mock_base)64 mock_node.nodeid = node_id65 mock_report.stats.get.side_effect = [[mock_node], []] # first item is failure list66 mock_config = mock.MagicMock()67 terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)68 mock_basename.assert_called_with(node_id)69 @mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands', mock.MagicMock())70 @mock.patch('os.path.basename')71 def test_TerminalSummary_Errors_CallsWithBasename(self, mock_basename):72 mock_report = mock.MagicMock()73 mock_node = mock.MagicMock()74 mock_base = 'something'75 node_id = os.path.join('C:', mock_base)76 mock_node.nodeid = node_id77 mock_report.stats.get.side_effect = [[], [mock_node]] # second item is error list78 mock_config = mock.MagicMock()79 terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)...
travis_fold.py
Source:travis_fold.py
...4import pytest5fold_plugins = {'_cov': 'Coverage report', 'flaky': 'Flaky report'}6def terminal_summary_wrapper(original, plugin_name):7 text = fold_plugins[plugin_name]8 def pytest_terminal_summary(terminalreporter):9 terminalreporter.write('travis_fold:start:plugin.{}\n{}\n'.format(plugin_name, text))10 original(terminalreporter)11 terminalreporter.write('travis_fold:end:plugin.{}\n'.format(plugin_name))12 return pytest_terminal_summary13@pytest.mark.trylast14def pytest_configure(config):15 for hookimpl in config.pluginmanager.hook.pytest_terminal_summary._nonwrappers:16 if hookimpl.plugin_name in fold_plugins.keys():17 hookimpl.function = terminal_summary_wrapper(hookimpl.function,18 hookimpl.plugin_name)19terminal = None20previous_name = None21failed = set()22durations = defaultdict(int)...
Output ASCII art to console on succesfull pytest run
How do I get PyCharm to show entire error diffs from pytest?
Writing a pytest function for checking the output on console (stdout)
How can I determine if a test passed or failed by examining the Item object passed to the pytest_runtest_teardown?
Local (?) variable referenced before assignment
Amazon S3 boto - how to delete folder?
How to print progress bar with negative values in Python 2.X?
Testing logging output with pytest
pyinstaller error - AttributeError: module 'pytest' has no attribute 'freeze_includes'
pytest fixture of fixture, not found
There are lots of places where you can print your own stuff in pytest
; select an appropriate hook from the hooks list and override it, adding your own printing. To spice the examples a bit up, I will print some system info using a screenfetch
wrapper function:
def screenfetch():
exec = shutil.which('screenfetch')
out = ''
if exec:
out = subprocess.run(exec, stdout=subprocess.PIPE, universal_newlines=True).stdout
return out
Create a file conftest.py
in your project root dir with the following contents:
from utils import screenfetch
def pytest_unconfigure(config):
print(screenfetch())
Result:
If you want a conditional print on successful test runs only, use pytest_sessionfinish
hook to store the exit code:
def pytest_sessionfinish(session, exitstatus):
session.config.exitstatus = exitstatus
def pytest_unconfigure(config):
if config.exitstatus == 0:
print(screenfetch())
Another examples:
# conftest.py
from utils import screenfetch
def pytest_terminal_summary(terminalreporter, exitstatus, config):
terminalreporter.ensure_newline()
terminalreporter.write(screenfetch())
pytest
output starts# conftest.py
from utils import screenfetch
def pytest_configure(config):
print(screenfetch())
pytest
's header info# conftest.py
import screenfetch
def pytest_report_header(config, startdir):
return screenfetch()
# conftest.py
import os
from utils import screenfetch
def pytest_collection_modifyitems(session, items):
terminalreporter = session.config.pluginmanager.get_plugin('terminalreporter')
terminalreporter.ensure_newline()
terminalreporter.write(screenfetch())
def pytest_report_teststatus(report, config):
if report.when == 'teardown': # you may e.g. also check the outcome here to filter passed or failed tests only
terminalreporter = config.pluginmanager.get_plugin('terminalreporter')
terminalreporter.ensure_newline()
terminalreporter.write(screenfetch())
Note that I use terminalreporter
plugin instead of just print
ing where possible - this is how pytest
itself emits the output.
Check out the latest blogs from LambdaTest on this topic:
There are a number of metrics that are considered during the development & release of any software product. One such metric is the ‘user-experience’ which is centred on the ease with which your customers can use your product. You may have developed a product that solves a problem at scale, but if your customers experience difficulties in using it, they may start looking out for other options. Website or web application’s which offers better web design, page load speed, usability (ease of use), memory requirements, and more. Today, I will show you how you can measure page load time with Selenium for automated cross browser testing. Before doing that, we ought to understand the relevance of page load time for a website or a web app.
This article is a part of our Content Hub. For more in-depth resources, check out our content hub on Jenkins Tutorial.
This article is a part of our Content Hub. For more in-depth resources, check out our content hub on Selenium Python Tutorial.
This article is a part of our Content Hub. For more in-depth resources, check out our content hub on WebDriverIO Tutorial.
This article is a part of our Content Hub. For more in-depth resources, check out our content hub on Selenium pytest Tutorial.
Looking for an in-depth tutorial around pytest? LambdaTest covers the detailed pytest tutorial that has everything related to the pytest, from setting up the pytest framework to automation testing. Delve deeper into pytest testing by exploring advanced use cases like parallel testing, pytest fixtures, parameterization, executing multiple test cases from a single file, and more.
Skim our below pytest tutorial playlist to get started with automation testing using the pytest framework.
https://www.youtube.com/playlist?list=PLZMWkkQEwOPlcGgDmHl8KkXKeLF83XlrP
Get 100 minutes of automation test minutes FREE!!