Best Python code snippet using avocado_python
monitors_test.py
Source:monitors_test.py
...78 self._actual_log(*args, **kwargs)79 logging.info = mockLog80 def tearDown(self):81 logging.info = self._actual_log82 def _run_monitor(self,83 monitor,84 num_epochs=3,85 num_steps_per_epoch=10,86 pass_max_steps=True):87 if pass_max_steps:88 max_steps = num_epochs * num_steps_per_epoch - 189 else:90 max_steps = None91 monitor.begin(max_steps=max_steps)92 for epoch in xrange(num_epochs):93 monitor.epoch_begin(epoch)94 should_stop = False95 step = epoch * num_steps_per_epoch96 next_epoch_step = step + num_steps_per_epoch97 while (not should_stop) and (step < next_epoch_step):98 tensors = monitor.step_begin(step)99 output = ops.get_default_session().run(tensors) if tensors else {}100 output = dict(101 zip([t.name if isinstance(t, ops.Tensor) else t for t in tensors],102 output))103 should_stop = monitor.step_end(step=step, output=output)104 monitor.post_step(step=step, session=None)105 step += 1106 monitor.epoch_end(epoch)107 monitor.end()108 def test_base_monitor(self):109 with ops.Graph().as_default() as g, self.test_session(g):110 self._run_monitor(learn.monitors.BaseMonitor())111 def test_every_0(self):112 monitor = _MyEveryN(every_n_steps=0, first_n_steps=-1)113 with ops.Graph().as_default() as g, self.test_session(g):114 self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)115 expected_steps = list(range(30))116 self.assertAllEqual(expected_steps, monitor.steps_begun)117 self.assertAllEqual(expected_steps, monitor.steps_ended)118 self.assertAllEqual(expected_steps, monitor.post_steps)119 def test_every_1(self):120 monitor = _MyEveryN(every_n_steps=1, first_n_steps=-1)121 with ops.Graph().as_default() as g, self.test_session(g):122 self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)123 expected_steps = list(range(1, 30))124 self.assertEqual(expected_steps, monitor.steps_begun)125 self.assertEqual(expected_steps, monitor.steps_ended)126 self.assertEqual(expected_steps, monitor.post_steps)127 def test_every_2(self):128 monitor = _MyEveryN(every_n_steps=2, first_n_steps=-1)129 with ops.Graph().as_default() as g, self.test_session(g):130 self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)131 expected_steps = list(range(2, 29, 2)) + [29]132 self.assertEqual(expected_steps, monitor.steps_begun)133 self.assertEqual(expected_steps, monitor.steps_ended)134 self.assertEqual(expected_steps, monitor.post_steps)135 def test_every_8(self):136 monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)137 with ops.Graph().as_default() as g, self.test_session(g):138 self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)139 expected_steps = [0, 1, 2, 10, 18, 26, 29]140 self.assertEqual(expected_steps, monitor.steps_begun)141 self.assertEqual(expected_steps, monitor.steps_ended)142 self.assertEqual(expected_steps, monitor.post_steps)143 def test_every_8_no_max_steps(self):144 monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)145 with ops.Graph().as_default() as g, self.test_session(g):146 self._run_monitor(147 monitor, num_epochs=3, num_steps_per_epoch=10, pass_max_steps=False)148 begin_end_steps = [0, 1, 2, 10, 18, 26]149 post_steps = [0, 1, 2, 10, 18, 26, 29]150 self.assertEqual(begin_end_steps, monitor.steps_begun)151 self.assertEqual(begin_end_steps, monitor.steps_ended)152 self.assertEqual(post_steps, monitor.post_steps)153 def test_every_8_recovered_after_step_begin(self):154 monitor = _MyEveryN(every_n_steps=8)155 with ops.Graph().as_default() as g, self.test_session(g):156 for step in [8, 16]:157 monitor.step_begin(step)158 monitor.step_begin(step)159 monitor.step_end(step, output=None)160 monitor.post_step(step, session=None)161 # It should call begin again since, end was not called162 self.assertEqual([8, 8, 16, 16], monitor.steps_begun)163 self.assertEqual([8, 16], monitor.steps_ended)164 self.assertEqual([8, 16], monitor.post_steps)165 def test_every_8_recovered_after_step_end(self):166 monitor = _MyEveryN(every_n_steps=8)167 with ops.Graph().as_default() as g, self.test_session(g):168 for step in [8, 16]:169 monitor.step_begin(step)170 monitor.step_end(step, output=None)171 monitor.post_step(step, session=None)172 monitor.step_begin(step)173 monitor.step_end(step, output=None)174 monitor.post_step(step, session=None)175 # It should not call begin twice since end was called176 self.assertEqual([8, 16], monitor.steps_begun)177 self.assertEqual([8, 16], monitor.steps_ended)178 self.assertEqual([8, 16], monitor.post_steps)179 def test_every_8_call_post_step_at_the_end(self):180 monitor = _MyEveryN(every_n_steps=8)181 with ops.Graph().as_default() as g, self.test_session(g):182 monitor.begin()183 for step in [8, 16]:184 monitor.step_begin(step)185 monitor.step_end(step, output=None)186 monitor.post_step(step, session=None)187 monitor.step_begin(19)188 monitor.step_end(19, output=None)189 monitor.post_step(19, session=None)190 monitor.end(session=None)191 # It should not call begin twice since end was called192 self.assertEqual([8, 16], monitor.steps_begun)193 self.assertEqual([8, 16], monitor.steps_ended)194 self.assertEqual([8, 16, 19], monitor.post_steps)195 def test_every_8_call_post_step_should_not_be_called_twice(self):196 monitor = _MyEveryN(every_n_steps=8)197 with ops.Graph().as_default() as g, self.test_session(g):198 monitor.begin()199 for step in [8, 16]:200 monitor.step_begin(step)201 monitor.step_end(step, output=None)202 monitor.post_step(step, session=None)203 monitor.step_begin(16)204 monitor.step_end(16, output=None)205 monitor.post_step(16, session=None)206 monitor.end(session=None)207 # It should not call begin twice since end was called208 self.assertEqual([8, 16], monitor.steps_begun)209 self.assertEqual([8, 16], monitor.steps_ended)210 self.assertEqual([8, 16], monitor.post_steps)211 def test_print(self):212 with ops.Graph().as_default() as g, self.test_session(g):213 t = constant_op.constant(42.0, name='foo')214 self._run_monitor(learn.monitors.PrintTensor(tensor_names=[t.name]))215 self.assertRegexpMatches(str(self.logged_message), t.name)216 def test_logging_trainable(self):217 with ops.Graph().as_default() as g, self.test_session(g):218 var = variables.Variable(constant_op.constant(42.0), name='foo')219 var.initializer.run()220 cof = constant_op.constant(1.0)221 loss = math_ops.subtract(222 math_ops.multiply(var, cof), constant_op.constant(1.0))223 train_step = gradient_descent.GradientDescentOptimizer(0.5).minimize(loss)224 ops.get_default_session().run(train_step)225 self._run_monitor(learn.monitors.LoggingTrainable('foo'))226 self.assertRegexpMatches(str(self.logged_message), var.name)227 def test_summary_saver(self):228 with ops.Graph().as_default() as g, self.test_session(g):229 log_dir = 'log/dir'230 summary_writer = testing.FakeSummaryWriter(log_dir, g)231 var = variables.Variable(0.0)232 var.initializer.run()233 tensor = state_ops.assign_add(var, 1.0)234 summary_op = summary.scalar('my_summary', tensor)235 self._run_monitor(236 learn.monitors.SummarySaver(237 summary_op=summary_op,238 save_steps=8,239 summary_writer=summary_writer),240 num_epochs=3,241 num_steps_per_epoch=10)242 summary_writer.assert_summaries(243 test_case=self,244 expected_logdir=log_dir,245 expected_graph=g,246 expected_summaries={247 0: {248 'my_summary': 1.0249 },250 1: {251 'my_summary': 2.0252 },253 9: {254 'my_summary': 3.0255 },256 17: {257 'my_summary': 4.0258 },259 25: {260 'my_summary': 5.0261 },262 29: {263 'my_summary': 6.0264 },265 })266 def _assert_validation_monitor(self,267 monitor,268 expected_early_stopped=False,269 expected_best_step=None,270 expected_best_value=None,271 expected_best_metrics=None):272 self.assertEqual(expected_early_stopped, monitor.early_stopped)273 self.assertEqual(expected_best_step, monitor.best_step)274 self.assertEqual(expected_best_value, monitor.best_value)275 self.assertEqual(expected_best_metrics, monitor.best_metrics)276 def test_validation_monitor_no_estimator(self):277 monitor = learn.monitors.ValidationMonitor(278 x=constant_op.constant(2.0), every_n_steps=0)279 self._assert_validation_monitor(monitor)280 with ops.Graph().as_default() as g, self.test_session(g):281 with self.assertRaisesRegexp(ValueError, 'set_estimator'):282 self._run_monitor(monitor)283 @test.mock.patch.object(estimators, 'Estimator', autospec=True)284 @test.mock.patch.object(saver, 'latest_checkpoint')285 def test_validation_monitor_no_ckpt(self, mock_latest_checkpoint,286 mock_estimator_class):287 estimator = mock_estimator_class()288 model_dir = 'model/dir'289 estimator.model_dir = model_dir290 mock_latest_checkpoint.return_value = None291 # Do nothing with no checkpoint.292 monitor = learn.monitors.ValidationMonitor(293 x=constant_op.constant(2.0), every_n_steps=0)294 self._assert_validation_monitor(monitor)295 monitor.set_estimator(estimator)296 with ops.Graph().as_default() as g, self.test_session(g):297 self._run_monitor(monitor)298 self._assert_validation_monitor(monitor)299 mock_latest_checkpoint.assert_called_with(model_dir)300 @test.mock.patch.object(estimators, 'Estimator', autospec=True)301 @test.mock.patch.object(saver, 'latest_checkpoint')302 def test_validation_monitor_no_early_stopping_rounds(self,303 mock_latest_checkpoint,304 mock_estimator_class):305 estimator = mock_estimator_class()306 model_dir = 'model/dir'307 estimator.model_dir = model_dir308 estimator.evaluate.return_value = {}309 mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir310 # Do nothing with early_stopping_rounds=None.311 monitor = learn.monitors.ValidationMonitor(312 x=constant_op.constant(2.0), every_n_steps=0)313 self._assert_validation_monitor(monitor)314 monitor.set_estimator(estimator)315 with ops.Graph().as_default() as g, self.test_session(g):316 self._run_monitor(monitor)317 self._assert_validation_monitor(monitor)318 @test.mock.patch.object(estimators, 'Estimator', autospec=True)319 @test.mock.patch.object(saver, 'latest_checkpoint')320 def test_validation_monitor_invalid_metric(self, mock_latest_checkpoint,321 mock_estimator_class):322 estimator = mock_estimator_class()323 model_dir = 'model/dir'324 estimator.model_dir = model_dir325 estimator.evaluate.return_value = {}326 mock_latest_checkpoint.return_value = '%s/ckpt' % model_dir327 # Fail for missing metric.328 monitor = learn.monitors.ValidationMonitor(329 x=constant_op.constant(2.0), every_n_steps=0, early_stopping_rounds=1)330 self._assert_validation_monitor(monitor)331 monitor.set_estimator(estimator)332 with ops.Graph().as_default() as g, self.test_session(g):333 with self.assertRaisesRegexp(ValueError, 'missing from outputs'):334 self._run_monitor(monitor, num_epochs=1, num_steps_per_epoch=1)335 @test.mock.patch.object(estimators, 'Estimator', autospec=True)336 @test.mock.patch.object(saver, 'latest_checkpoint')337 def test_validation_monitor(self, mock_latest_checkpoint,338 mock_estimator_class):339 estimator = mock_estimator_class()340 model_dir = 'model/dir'341 estimator.model_dir = model_dir342 validation_outputs = {'loss': None, 'auc': None}343 estimator.evaluate.return_value = validation_outputs344 monitor = learn.monitors.ValidationMonitor(345 x=constant_op.constant(2.0), every_n_steps=0, early_stopping_rounds=2)346 self._assert_validation_monitor(monitor)347 monitor.set_estimator(estimator)348 with ops.Graph().as_default() as g, self.test_session(g):349 monitor.begin(max_steps=100)350 monitor.epoch_begin(epoch=0)351 self.assertEqual(0, estimator.evaluate.call_count)352 # Step 0, initial loss.353 step = 0354 mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)355 validation_outputs['loss'] = 42.0356 validation_outputs['auc'] = 0.5357 self.assertEqual(0, len(monitor.step_begin(step=step)))358 self.assertFalse(monitor.step_end(step=step, output={}))359 self.assertEqual(1, estimator.evaluate.call_count)360 self._assert_validation_monitor(361 monitor, expected_best_step=0, expected_best_value=42.0,362 expected_best_metrics={'loss': 42.0, 'auc': 0.5})363 monitor.post_step(step=step, session=None)364 # Step 1, same checkpoint, no eval.365 step = 1366 self.assertEqual(0, len(monitor.step_begin(step=step)))367 self.assertFalse(monitor.step_end(step=step, output={}))368 self.assertEqual(1, estimator.evaluate.call_count)369 self._assert_validation_monitor(370 monitor, expected_best_step=0, expected_best_value=42.0,371 expected_best_metrics={'loss': 42.0, 'auc': 0.5})372 monitor.post_step(step=step, session=None)373 # Step 2, lower loss.374 step = 2375 mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)376 validation_outputs['loss'] = 40.0377 validation_outputs['auc'] = 0.6378 self.assertEqual(0, len(monitor.step_begin(step=step)))379 self.assertFalse(monitor.step_end(step=step, output={}))380 self.assertEqual(2, estimator.evaluate.call_count)381 self._assert_validation_monitor(382 monitor, expected_best_step=2, expected_best_value=40.0,383 expected_best_metrics={'loss': 40.0, 'auc': 0.6})384 monitor.post_step(step=step, session=None)385 # Step 3, higher loss.386 step = 3387 mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)388 validation_outputs['loss'] = 44.0389 validation_outputs['auc'] = 0.7390 self.assertEqual(0, len(monitor.step_begin(step=step)))391 self.assertFalse(monitor.step_end(step=step, output={}))392 self.assertEqual(3, estimator.evaluate.call_count)393 self._assert_validation_monitor(394 monitor, expected_best_step=2, expected_best_value=40.0,395 expected_best_metrics={'loss': 40.0, 'auc': 0.6})396 monitor.post_step(step=step, session=None)397 # Step 4, higher loss for 2 steps, early stopping.398 step = 4399 mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)400 validation_outputs['loss'] = 43.0401 self.assertEqual(0, len(monitor.step_begin(step=step)))402 self.assertTrue(monitor.step_end(step=step, output={}))403 self.assertEqual(4, estimator.evaluate.call_count)404 self._assert_validation_monitor(405 monitor,406 expected_early_stopped=True,407 expected_best_step=2,408 expected_best_value=40.0,409 expected_best_metrics={'loss': 40.0, 'auc': 0.6})410 monitor.post_step(step=step, session=None)411 monitor.epoch_end(epoch=0)412 monitor.end()413 @test.mock.patch.object(saver, 'latest_checkpoint')414 def test_validation_monitor_with_core_estimator(self, mock_latest_checkpoint):415 estimator = test.mock.Mock(spec=core_estimator.Estimator)416 model_dir = 'model/dir'417 estimator.model_dir = model_dir418 validation_outputs = {'loss': None, 'auc': None}419 estimator.evaluate.return_value = validation_outputs420 monitor = learn.monitors.ValidationMonitor(421 input_fn=lambda: constant_op.constant(2.0),422 every_n_steps=0, early_stopping_rounds=2)423 self._assert_validation_monitor(monitor)424 monitor.set_estimator(estimator)425 with ops.Graph().as_default() as g, self.test_session(g):426 monitor.begin(max_steps=100)427 monitor.epoch_begin(epoch=0)428 self.assertEqual(0, estimator.evaluate.call_count)429 # Step 0, initial loss.430 step = 0431 mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)432 validation_outputs['loss'] = 42.0433 validation_outputs['auc'] = 0.5434 self.assertEqual(0, len(monitor.step_begin(step=step)))435 self.assertFalse(monitor.step_end(step=step, output={}))436 self.assertEqual(1, estimator.evaluate.call_count)437 self._assert_validation_monitor(438 monitor, expected_best_step=0, expected_best_value=42.0,439 expected_best_metrics={'loss': 42.0, 'auc': 0.5})440 monitor.post_step(step=step, session=None)441 @test.mock.patch.object(saver, 'latest_checkpoint')442 def test_validation_monitor_fail_with_core_estimator_and_metrics(443 self, mock_latest_checkpoint):444 estimator = test.mock.Mock(spec=core_estimator.Estimator)445 model_dir = 'model/dir'446 estimator.model_dir = model_dir447 validation_outputs = {'loss': None}448 estimator.evaluate.return_value = validation_outputs449 monitor = learn.monitors.ValidationMonitor(450 input_fn=lambda: constant_op.constant(2.0),451 metrics=constant_op.constant(2.0),452 every_n_steps=0, early_stopping_rounds=2)453 monitor.set_estimator(estimator)454 with ops.Graph().as_default() as g, self.test_session(g):455 monitor.begin(max_steps=100)456 monitor.epoch_begin(epoch=0)457 with self.assertRaisesRegexp(458 ValueError,459 'tf.estimator.Estimator does not support .* metrics'):460 step = 0461 mock_latest_checkpoint.return_value = '%s/ckpt.%s' % (model_dir, step)462 validation_outputs['loss'] = 42.0463 self.assertEqual(0, len(monitor.step_begin(step=step)))464 self.assertFalse(monitor.step_end(step=step, output={}))465 def test_graph_dump(self):466 monitor0 = learn.monitors.GraphDump()467 monitor1 = learn.monitors.GraphDump()468 with ops.Graph().as_default() as g, self.test_session(g):469 const_var = variables.Variable(42.0, name='my_const')470 counter_var = variables.Variable(0.0, name='my_counter')471 assign_add = state_ops.assign_add(counter_var, 1.0, name='my_assign_add')472 variables.global_variables_initializer().run()473 self._run_monitor(monitor0, num_epochs=3, num_steps_per_epoch=10)474 self.assertEqual({475 step: {476 const_var.name: 42.0,477 counter_var.name: step + 1.0,478 assign_add.name: step + 1.0,479 }480 for step in xrange(30)481 }, monitor0.data)482 self._run_monitor(monitor1, num_epochs=3, num_steps_per_epoch=10)483 self.assertEqual({484 step: {485 const_var.name: 42.0,486 counter_var.name: step + 31.0,487 assign_add.name: step + 31.0,488 }489 for step in xrange(30)490 }, monitor1.data)491 for step in xrange(30):492 matched, non_matched = monitor1.compare(monitor0, step=step)493 self.assertEqual([const_var.name], matched)494 self.assertEqual({495 assign_add.name: (step + 31.0, step + 1.0),496 counter_var.name: (step + 31.0, step + 1.0),497 }, non_matched)498 matched, non_matched = monitor0.compare(monitor1, step=step)499 self.assertEqual([const_var.name], matched)500 self.assertEqual({501 assign_add.name: (step + 1.0, step + 31.0),502 counter_var.name: (step + 1.0, step + 31.0),503 }, non_matched)504 def test_capture_variable(self):505 monitor = learn.monitors.CaptureVariable(506 var_name='my_assign_add:0', every_n=8, first_n=2)507 with ops.Graph().as_default() as g, self.test_session(g):508 var = variables.Variable(0.0, name='my_var')509 var.initializer.run()510 state_ops.assign_add(var, 1.0, name='my_assign_add')511 self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)512 self.assertEqual({513 0: 1.0,514 1: 2.0,515 2: 3.0,516 10: 4.0,517 18: 5.0,518 26: 6.0,519 29: 7.0,520 }, monitor.values)521class StopAtStepTest(test.TestCase):522 def test_raise_in_both_last_step_and_num_steps(self):523 with self.assertRaises(ValueError):524 learn.monitors.StopAtStep(num_steps=10, last_step=20)525 def test_stop_based_on_last_step(self):...
test_resource_monitor.py
Source:test_resource_monitor.py
1# This Source Code Form is subject to the terms of the Mozilla Public2# License, v. 2.0. If a copy of the MPL was not distributed with this3# file, You can obtain one at http://mozilla.org/MPL/2.0/.4from __future__ import absolute_import5import multiprocessing6import time7import unittest8import mozunit9try:10 import psutil11except ImportError:12 psutil = None13from mozsystemmonitor.resourcemonitor import (14 SystemResourceMonitor,15 SystemResourceUsage,16)17@unittest.skipIf(psutil is None, 'Resource monitor requires psutil.')18class TestResourceMonitor(unittest.TestCase):19 def test_basic(self):20 monitor = SystemResourceMonitor(poll_interval=0.5)21 monitor.start()22 time.sleep(3)23 monitor.stop()24 data = list(monitor.range_usage())25 self.assertGreater(len(data), 3)26 self.assertIsInstance(data[0], SystemResourceUsage)27 def test_empty(self):28 monitor = SystemResourceMonitor(poll_interval=2.0)29 monitor.start()30 monitor.stop()31 data = list(monitor.range_usage())32 self.assertEqual(len(data), 0)33 def test_phases(self):34 monitor = SystemResourceMonitor(poll_interval=0.25)35 monitor.start()36 time.sleep(1)37 with monitor.phase('phase1'):38 time.sleep(1)39 with monitor.phase('phase2'):40 time.sleep(1)41 monitor.stop()42 self.assertEqual(len(monitor.phases), 2)43 self.assertEqual(['phase2', 'phase1'], monitor.phases.keys())44 all = list(monitor.range_usage())45 data1 = list(monitor.phase_usage('phase1'))46 data2 = list(monitor.phase_usage('phase2'))47 self.assertGreater(len(all), len(data1))48 self.assertGreater(len(data1), len(data2))49 # This could fail if time.time() takes more than 0.1s. It really50 # shouldn't.51 self.assertAlmostEqual(data1[-1].end, data2[-1].end, delta=0.25)52 def test_no_data(self):53 monitor = SystemResourceMonitor()54 data = list(monitor.range_usage())55 self.assertEqual(len(data), 0)56 def test_events(self):57 monitor = SystemResourceMonitor(poll_interval=0.25)58 monitor.start()59 time.sleep(0.5)60 t0 = time.time()61 monitor.record_event('t0')62 time.sleep(0.5)63 monitor.record_event('t1')64 time.sleep(0.5)65 monitor.stop()66 events = monitor.events67 self.assertEqual(len(events), 2)68 event = events[0]69 self.assertEqual(event[1], 't0')70 self.assertAlmostEqual(event[0], t0, delta=0.25)71 data = list(monitor.between_events_usage('t0', 't1'))72 self.assertGreater(len(data), 0)73 def test_aggregate_cpu(self):74 monitor = SystemResourceMonitor(poll_interval=0.25)75 monitor.start()76 time.sleep(1)77 monitor.stop()78 values = monitor.aggregate_cpu_percent()79 self.assertIsInstance(values, list)80 self.assertEqual(len(values), multiprocessing.cpu_count())81 for v in values:82 self.assertIsInstance(v, float)83 value = monitor.aggregate_cpu_percent(per_cpu=False)84 self.assertIsInstance(value, float)85 values = monitor.aggregate_cpu_times()86 self.assertIsInstance(values, list)87 self.assertGreater(len(values), 0)88 self.assertTrue(hasattr(values[0], 'user'))89 t = type(values[0])90 value = monitor.aggregate_cpu_times(per_cpu=False)91 self.assertIsInstance(value, t)92 def test_aggregate_io(self):93 monitor = SystemResourceMonitor(poll_interval=0.25)94 # There's really no easy way to ensure I/O occurs. For all we know95 # reads and writes will all be serviced by the page cache.96 monitor.start()97 time.sleep(1.0)98 monitor.stop()99 values = monitor.aggregate_io()100 self.assertTrue(hasattr(values, 'read_count'))101 def test_memory(self):102 monitor = SystemResourceMonitor(poll_interval=0.25)103 monitor.start()104 time.sleep(1.0)105 monitor.stop()106 v = monitor.min_memory_available()107 self.assertIsInstance(v, long)108 v = monitor.max_memory_percent()109 self.assertIsInstance(v, float)110 def test_as_dict(self):111 monitor = SystemResourceMonitor(poll_interval=0.25)112 monitor.start()113 time.sleep(0.1)114 monitor.begin_phase('phase1')115 monitor.record_event('foo')116 time.sleep(0.1)117 monitor.begin_phase('phase2')118 monitor.record_event('bar')119 time.sleep(0.2)120 monitor.finish_phase('phase1')121 time.sleep(0.2)122 monitor.finish_phase('phase2')123 time.sleep(0.4)124 monitor.stop()125 d = monitor.as_dict()126 self.assertEqual(d['version'], 2)127 self.assertEqual(len(d['events']), 2)128 self.assertEqual(len(d['phases']), 2)129 self.assertIn('system', d)130 self.assertIsInstance(d['system'], dict)131 self.assertIsInstance(d['overall'], dict)132 self.assertIn('duration', d['overall'])133 self.assertIn('cpu_times', d['overall'])134if __name__ == '__main__':...
test_monitor.py
Source:test_monitor.py
...23}24cronitor.api_key = FAKE_API_KEY25class MonitorTests(unittest.TestCase):26 @patch('cronitor.Monitor._put', return_value=[MONITOR])27 def test_create_monitor(self, mocked_create):28 monitor = cronitor.Monitor.put(**MONITOR)29 self.assertEqual(monitor.data.key, MONITOR['key'])30 self.assertEqual(monitor.data.assertions, MONITOR['assertions'])31 self.assertEqual(monitor.data.schedule, MONITOR['schedule'])32 @patch('cronitor.Monitor._put', return_value=[MONITOR, MONITOR_2])33 def test_create_monitors(self, mocked_create):34 monitors = cronitor.Monitor.put([MONITOR, MONITOR_2])35 self.assertEqual(len(monitors), 2)36 self.assertCountEqual([MONITOR['key'], MONITOR_2['key']], list(map(lambda m: m.data.key, monitors)))37 @patch('cronitor.Monitor._req.put')38 def test_create_monitor_fails(self, mocked_put):39 mocked_put.return_value.status_code = 40040 with self.assertRaises(cronitor.APIValidationError):41 cronitor.Monitor.put(**MONITOR)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!