Best Python code snippet using hypothesis
commons.py
Source:commons.py
...151 errors = []152 for (image, recon) in zip(images, decoded):153 # compute the mean squared error between the ground-truth image154 # and the reconstructed image, then add it to our list of errors155 mse = dm_func(image, recon)156 errors.append(mse)157 errors_sorted = np.argsort(errors)[::-1]158 # loop over our number of output samples159 for y in range(0, samples):160 outputs = None161 for x in range(0, samples):162 i = y * samples + x163 if i >= gt.shape[0]:164 original = np.full(gt[0].shape, 0)165 recon = original166 i_sorted = 0167 else:168 # grab the original image and reconstructed image169 i_sorted = errors_sorted[i]170 original = (gt[i_sorted] * 255).astype("uint8")171 recon = (decoded[i_sorted] * 255).astype("uint8")172 # stack the original and reconstructed image side-by-side173 output = np.hstack([original, recon])174 v = "" if i >= gt.shape[0] else ' %0.6f' % errors[errors_sorted[i]]175 color = 255176 if marked_first_half and i_sorted < gt.shape[0]/2:177 color = 128178 text = np.expand_dims(draw_text(v, color, width=decoded.shape[1]*2), axis=-1)179 output = np.vstack([output, text])180 # if the outputs array is empty, initialize it as the current181 # side-by-side image display182 if outputs is None:183 outputs = output184 # otherwise, vertically stack the outputs185 else:186 outputs = np.vstack([outputs, output])187 if outputs2 is None:188 outputs2 = outputs189 # otherwise, horizontally stack the outputs190 else:191 outputs2 = np.hstack([outputs2, outputs])192 # return the output images193 return outputs2, errors194def prepare_dataset(args, augmentation=False):195 if args["kind"] == "mnist":196 from tensorflow.keras.datasets import mnist197 print("[INFO] loading MNIST dataset...")198 ((train_set, trainY), (unused_set, unused_set2)) = mnist.load_data()199 else:200 from dataset_loader import load_dataset201 print("[INFO] loading CREDO dataset...")202 train_set, trainY = load_dataset()203 # build our unsupervised dataset of images with a small amount of204 # contamination (i.e., anomalies) added into it205 print("[INFO] creating unsupervised dataset...")206 images, anomalies = build_unsupervised_dataset(train_set, trainY, kind=args["kind"])207 # add a channel dimension to every image in the dataset, then scale208 # the pixel intensities to the range [0, 1]209 images = np.expand_dims(images, axis=-1)210 images = images.astype("float32") / 255.0211 anomalies = np.expand_dims(anomalies, axis=-1)212 anomalies = anomalies.astype("float32") / 255.0213 # construct the training and testing split214 (train_set, test_set) = train_test_split(images, test_size=0.2)215 if augmentation:216 train_set = do_augmentation(train_set)217 (train_set, validation_set) = train_test_split(train_set, test_size=0.2)218 # prepare test set219 max_test = min(anomalies.shape[0], test_set.shape[0])220 test_set = np.vstack([anomalies[0:max_test], test_set[0:max_test]])221 return train_set, validation_set, test_set222def original_autoencoder(size=60, kl=False):223 from pyimagesearch.convautoencoder import ConvAutoencoder224 (encoder, decoder, autoencoder) = ConvAutoencoder.build(size, size, 1)225 opt = tf.keras.optimizers.Adam(learning_rate=INIT_LR, decay=INIT_LR / EPOCHS)226 autoencoder.compile(loss="mse", optimizer=opt, metrics=['kullback_leibler_divergence' if kl else 'accuracy'])227 return autoencoder228def train_or_cache(train_set, autoencoder, fncache=None, force_train=False, epochs=EPOCHS, batch_size=BS, shuffle=False, validation_set=None, kl=False):229 from os.path import exists230 from keras.models import load_model231 import matplotlib.pyplot as plt232 fn = fncache # 'cache/%s.h5' % str(fncache)233 if fncache is not None and exists(fn) and not force_train:234 print('Load from: %s' % fn)235 return load_model(fn)236 #(input_set, validation_set) = train_test_split(train_set, test_size=0.2)237 # train the convolutional autoencoder238 H = autoencoder.fit(239 train_set,240 train_set,241 shuffle=shuffle,242 validation_data=(validation_set, validation_set) if validation_set is not None else None,243 epochs=epochs,244 batch_size=batch_size245 )246 # r = autoencoder.evaluate(validation_set, validation_set)247 if fncache is not None:248 autoencoder.save(fn)249 print('Saved in: %s' % fn)250 N = np.arange(0, EPOCHS)251 plt.style.use("ggplot")252 plt.figure()253 plt.plot(N, H.history["loss"], label="train_loss")254 if validation_set is not None:255 plt.plot(N, H.history["val_loss"], label="val_loss")256 plt.title("Training Loss")257 plt.xlabel("Epoch #")258 plt.ylabel("Loss")259 plt.legend(loc="lower left")260 plt.savefig(fn.replace('.h5', '_loss.png'))261 if kl:262 N = np.arange(0, EPOCHS)263 plt.style.use("ggplot")264 plt.figure()265 plt.plot(N, H.history["kullback_leibler_divergence"], label="kullback_leibler_divergence")266 if validation_set is not None:267 plt.plot(N, H.history["val_kullback_leibler_divergence"], label="val_kullback_leibler_divergence")268 plt.title("Training Loss")269 plt.xlabel("Epoch #")270 plt.ylabel("Loss")271 plt.legend(loc="lower left")272 plt.savefig(fn.replace('.h5', '_kullback_leibler_divergence.png'))273 else:274 N = np.arange(0, EPOCHS)275 plt.style.use("ggplot")276 plt.figure()277 plt.plot(N, H.history["accuracy"], label="accuracy")278 if validation_set is not None:279 plt.plot(N, H.history["val_accuracy"], label="val_accuracy")280 plt.title("Training Loss")281 plt.xlabel("Epoch #")282 plt.ylabel("Loss")283 plt.legend(loc="lower left")284 plt.savefig(fn.replace('.h5', '_accuracy.png'))285 return autoencoder286def binarize_image(image, cutoff_qt_value=0):287 """288 Binaryzacja obrazka na podstawie podanego progu. DomyÅlnie, co nie jest caÅkiem czarne, jest biaÅe.289 :param image: źródlowy obrazek290 :param cutoff_qt_value: próg binaryzacji, 1 - wiÄksze od progu291 :return:292 """293 return np.where(image > cutoff_qt_value, 1, 0)294def cutoff_reconstruction_background(image, reconstruction):295 """296 Odcina tÅo od rekonstrukcji, która w oryginalnym obrazku byÅa tÅem.297 W oryginalnym obrazku mamy coÅ na tle czerni (0).298 :return: rekonstrukcja z obciÄtym tÅem299 """300 return binarize_image(image, 0) * reconstruction301def count_non_black_pixels(image):302 """303 Zwraca liczbÄ nieczarnych pikseli z obrazka.304 :param image: obrazek.305 :return: liczba nieczarnych pikseli.306 """307 return np.count_nonzero(image)308def compute_errors(image, recon, dm_func, normalize=True):309 """310 Obliczanie bÅÄdu.311 :param image: obrazek źródÅowy.312 :param recon: rekonstrukcja.313 :param dm_func: funkcja porównujÄ
ca, jako parametr przyjmuje (image, recon), zwraca skalar bÄdÄ
cy miarÄ
podobieÅstwa.314 :param normalize: jeÅli true, to dzieli wynik dm_func przez liczbÄ nieczarnych pikseli z image.315 :return: tablica316 """317 return dm_func(image, recon, normalize)318def prepare_for_histogram(images, reconstructions, dm_func, normalize=True, cutoff_background=False, binarize_for_compare=False):319 errors = []320 for (image, recon) in zip(images, reconstructions):321 try:322 if cutoff_background:323 recon = cutoff_reconstruction_background(image, recon)324 if binarize_for_compare:325 image = binarize_image(image)326 recon = binarize_image(recon)327 mse = compute_errors(image, recon, dm_func, normalize)328 errors.append(mse)329 except:330 errors.append(0)331 return errors...
test_joyent.py
Source:test_joyent.py
1from datetime import (2 datetime,3 timedelta,4)5import json6from mock import patch7from unittest import TestCase8from joyent import (9 Client,10 ISO_8601_FORMAT,11 parse_args,12)13def make_machine(state='running', hours=2):14 then = datetime.utcnow() - timedelta(hours=hours)15 return {16 'id': 'id',17 'state': state,18 'created': then.strftime(ISO_8601_FORMAT)}19def fake_list_machines(machine):20 def list_machines(machine_id=None):21 if machine_id:22 return machine23 else:24 return [machine]25 return list_machines26class JoyentTestCase(TestCase):27 def test_parse_args(self):28 args = parse_args(29 ['-d', '-v', '-u', 'sdc_url', '-a', 'account', '-k', 'key_id',30 '-p', 'key/path', 'list-machines'])31 self.assertEqual('sdc_url', args.sdc_url)32 self.assertEqual('account', args.account)33 self.assertEqual('key_id', args.key_id)34 self.assertEqual('key/path', args.key_path)35 self.assertTrue(args.dry_run)36 self.assertTrue(args.verbose)37class ClientTestCase(TestCase):38 def test_init(self):39 client = Client(40 'sdc_url', 'account', 'key_id', './key', 'manta_url',41 dry_run=True, verbose=True)42 self.assertEqual('sdc_url', client.sdc_url)43 self.assertEqual('account', client.account)44 self.assertEqual('key_id', client.key_id)45 self.assertEqual('./key', client.key_path)46 self.assertEqual(3, client.pause)47 self.assertTrue(client.dry_run)48 self.assertTrue(client.verbose)49 def test_list_machine_tags(self):50 client = Client(51 'sdc_url', 'account', 'key_id', './key', 'manta_url', pause=0)52 headers = {}53 content = json.dumps({'env': 'foo'})54 with patch.object(client, '_request', autospec=True,55 return_value=(headers, content)) as mock:56 tags = client._list_machine_tags('bar')57 mock.assert_called_once_with('/machines/bar/tags')58 self.assertEqual({'env': 'foo'}, tags)59 def test_delete_old_machines(self):60 machine = make_machine('stopped')61 client = Client(62 'sdc_url', 'account', 'key_id', './key', 'manta_url', pause=0)63 with patch.object(client, '_list_machines',64 side_effect=fake_list_machines(machine)) as lm_mock:65 with patch.object(client, '_list_machine_tags', autospec=True,66 return_value={}) as lmt_mock:67 with patch.object(client, '_delete_running_machine',68 autospec=True) as drm_mock:69 with patch.object(client, 'attempt_deletion',70 autospec=True) as rd_mock:71 client.delete_old_machines(1)72 lm_mock.assert_called_once_with()73 lmt_mock.assert_called_once_with('id')74 drm_mock.assert_called_once_with('id')75 self.assertEqual(0, rd_mock.call_count)76 def test_delete_old_machines_stuck_provisioning(self):77 machine = make_machine('provisioning')78 client = Client(79 'sdc_url', 'account', 'key_id', './key', 'manta_url', pause=0)80 with patch.object(client, '_list_machines', autospec=True,81 side_effect=fake_list_machines(machine)):82 with patch.object(client, '_list_machine_tags', autospec=True):83 with patch.object(client, '_delete_running_machine',84 autospec=True) as drm_mock:85 with patch.object(client, 'attempt_deletion',86 autospec=True) as rd_mock:87 client.delete_old_machines(1)88 self.assertEqual(0, drm_mock.call_count)89 rd_mock.assert_called_once_with([machine])90 def test_delete_old_machines_permanent(self):91 machine = make_machine('provisioning')92 client = Client(93 'sdc_url', 'account', 'key_id', './key', 'manta_url', pause=0)94 with patch.object(client, '_list_machines', autospec=True,95 side_effect=fake_list_machines(machine)):96 with patch.object(client, '_list_machine_tags', autospec=True,97 return_value={'permanent': 'true'}) as lmt_mock:98 with patch.object(client, '_delete_running_machine',99 autospec=True) as drm_mock:100 with patch.object(client, 'attempt_deletion',101 autospec=True) as rd_mock:102 client.delete_old_machines(1)103 lmt_mock.assert_called_once_with('id')104 self.assertEqual(0, drm_mock.call_count)105 self.assertEqual(0, rd_mock.call_count)106 def test_attempt_deletion(self):107 client = Client(108 'sdc_url', 'account', 'key_id', './key', 'manta_url', pause=0)109 with patch.object(client, 'delete_machine', autospec=True) as dm_func:110 all_success = client.attempt_deletion(['a', 'b'])111 self.assertIs(True, all_success)112 dm_func.assert_any_call('a')113 dm_func.assert_any_call('b')114 with patch.object(client, 'delete_machine', autospec=True,115 side_effect=[Exception, None]) as dm_func:116 all_success = client.attempt_deletion(['a', 'b'])117 self.assertIs(False, all_success)118 dm_func.assert_any_call('a')...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!