Best Python code snippet using avocado_python
test_output_check.py
Source:test_output_check.py
...124 self.assertEqual(result.exit_status, expected_rc,125 "Avocado did not return rc %d:\n%s" %126 (expected_rc, result))127 self.assertIn(tampered_msg, result.stdout)128 def test_output_diff(self):129 self._check_output_record_all()130 tampered_msg_stdout = b"I PITY THE FOOL THAT STANDS ON STDOUT!"131 tampered_msg_stderr = b"I PITY THE FOOL THAT STANDS ON STDERR!"132 stdout_file = "%s.data/stdout.expected" % self.output_script.path133 with open(stdout_file, 'wb') as stdout_file_obj:134 stdout_file_obj.write(tampered_msg_stdout)135 stderr_file = "%s.data/stderr.expected" % self.output_script.path136 with open(stderr_file, 'wb') as stderr_file_obj:137 stderr_file_obj.write(tampered_msg_stderr)138 cmd_line = ('%s run --job-results-dir %s --sysinfo=off %s --json -'139 % (AVOCADO, self.tmpdir, self.output_script.path))140 result = process.run(cmd_line, ignore_status=True)141 expected_rc = exit_codes.AVOCADO_TESTS_FAIL142 self.assertEqual(result.exit_status, expected_rc,...
train.py
Source:train.py
1import torch2import torch.nn as nn3import os4import time5import pickle6import argparse7import numpy as np8from layer import QRNNLayer9from model import QRNNModel10import data.dataloader as data_loader11from torch.utils.data import DataLoader12from torch.utils.data.sampler import SubsetRandomSampler13"""14Train the model15"""16device = torch.device('cuda:3')17def create_model(config): # not to use config.num_symbols, but actual dict size18 print('Creating new model parameters..')19 model = QRNNModel(config.dim_in, config.dim_hid, config.dim_out,20 QRNNLayer, config.num_layers, config.kernel_size,21 config.hidden_size, config.batch_size,22 config.frames, config.dec_size, config.out_size, device)23 # Initialize a model state24 model_state = vars(config)25 model_state['epoch'], model_state['train_steps'] = 0, 026 model_state['state_dict'] = None27 model_path = os.path.join(config.model_dir, config.model_name)28 # If training stops half way, restart training from last checkpoint of previous training.29 if os.path.exists(model_path):30 print('Reloading model parameters..')31 checkpoint = torch.load(model_path)32 model_state['epoch'] = checkpoint['epoch']33 model_state['train_steps'] = checkpoint['train_steps']34 model.load_state_dict(checkpoint['state_dict'])35 print('Using gpu..')36 model.train().to(device)37 print(next(model.parameters()).is_cuda)38 return model, model_state39def diff_trans(output_diff, index):40 output = torch.zeros_like(output_diff).to(device)41 for i in range(len(output_diff)):42 ind = index[i][-1]43 with open('/home/tfukuda/qrnn/data/labels/'+str(int(ind[0])).zfill(4)+'_ped'+str(int(ind[2]))+'.pkl', 'rb') as f:44 bbox = pickle.load(f)45 46 p_bbox = torch.tensor(bbox[int(ind[1])]).to(device)47 for j in range(len(output_diff[i])):48 output[i][j] = p_bbox + output_diff[i][j]49 p_bbox = output[i][j]50 return output51def train(config):52 print('Loading data..')53 # Creating data for training and test splits54 dataset = data_loader.Seq_Data(config.frames, config.dim_in, config.dim_out)55 # Get random datas by using SubsetRandomSampler56 batch_size = config.batch_size57 test_split = 0.258 shuffle_dataset = True59 random_seed= 4260 dataset_size = len(dataset)61 indices = list(range(dataset_size))62 split = int(np.floor(test_split * dataset_size))63 if shuffle_dataset :64 np.random.seed(random_seed)65 np.random.shuffle(indices)66 train_indices, test_indices = indices[split:], indices[:split]67 train_sampler = SubsetRandomSampler(train_indices)68 test_sampler = SubsetRandomSampler(test_indices)69 train_datas = DataLoader(dataset, batch_size=batch_size, sampler=train_sampler, drop_last=True, num_workers=2)70 test_datas = DataLoader(dataset, batch_size=batch_size, sampler=test_sampler, drop_last=True, num_workers=2)71 print('train data length: ', len(train_datas))72 print('test data length: ', len(test_datas))73 model, model_state = create_model(config)74 criterion = nn.L1Loss()75 optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)76 loss = 0.077 start_time = time.time()78 # Training loop79 print('Training..')80 for epoch in range(config.epochs):81 model_state['epoch'] += 182 for data, label, index in train_datas:83 data = data.requires_grad_().to(device)84 label = label.to(device)85 index = index.to(device)86 # Execute a single training step87 optimizer.zero_grad()88 output_diff = model(data)89 output = diff_trans(output_diff, index)90 step_loss = criterion(output, label)91 step_loss.backward()92 nn.utils.clip_grad_norm(model.parameters(), config.max_grad_norm)93 optimizer.step()94 loss += float(step_loss) / config.display_freq95 model_state['train_steps'] += 196 # Display the average loss of training97 if model_state['train_steps'] % config.display_freq == 0:98 avg_loss = float(loss)99 time_elapsed = time.time() - start_time100 step_time = time_elapsed / config.display_freq101 print('Epoch ', model_state['epoch'], 'Step ', model_state['train_steps'], \102 'Loss {0:.2f}'.format(avg_loss), 'Step-time {0:.2f}'.format(step_time))103 loss = 0.0104 start_time = time.time()105 # Test step start106 if model_state['train_steps'] % config.test_freq == 0:107 model.eval()108 print('Test step')109 test_steps = 0110 test_loss = 0.0111 for test_data, test_label, test_index in test_datas:112 test_data = test_data.requires_grad_().to(device)113 test_label = test_label.to(device)114 test_index = test_index.to(device)115 test_output_diff = model(test_data)116 test_output = diff_trans(test_output_diff, test_index)117 step_loss = criterion(test_output, test_label)118 test_steps += 1 119 test_loss += float(step_loss)120 model.train()121 #Display loss of test steps122 print('Test Loss: {0:.2f}'.format(test_loss / test_steps))123 # Save the model checkpoint124 if model_state['train_steps'] % config.save_freq == 0:125 print('Saving the model..')126 model_state['state_dict'] = model.state_dict()127 model_path = os.path.join(config.model_dir, config.model_name)128 torch.save(model_state, model_path)129 # Increase the epoch index of the model130 print('Epoch {0:} DONE'.format(model_state['epoch']))131if __name__ == "__main__":132 parser = argparse.ArgumentParser()133 # Network parameters134 parser.add_argument('--dim_in', type=float, default=17)135 parser.add_argument('--dim_hid', type=float, default=8)136 parser.add_argument('--dim_out', type=float, default=4)137 parser.add_argument('--kernel_size', type=int, default=2)138 parser.add_argument('--hidden_size', type=int, default=1024)139 parser.add_argument('--num_layers', type=int, default=2)140 parser.add_argument('--dec_size', type=int, default=2)141 parser.add_argument('--out_size', type=int, default=4)142 # Training parameters143 parser.add_argument('--lr', type=float, default=0.001)144 parser.add_argument('--max_grad_norm', type=float, default=1.0)145 parser.add_argument('--batch_size', type=int, default=16)146 parser.add_argument('--frames', type=int, default=30)147 parser.add_argument('--epochs', type=int, default=16)148 parser.add_argument('--display_freq', type=int, default=100)149 parser.add_argument('--save_freq', type=int, default=200)150 parser.add_argument('--test_freq', type=int, default=200)151 parser.add_argument('--model_dir', type=str, default='model/')152 parser.add_argument('--model_name', type=str, default='model_diff_hid-8_dec-2_ep-16.pkl')153 config = parser.parse_args()154 print(config)155 train(config)...
test_tflite_debugger.py
Source:test_tflite_debugger.py
...92 )93 self.assertEqual(len(output_diffs), len(debug_tflite_model.output_names))94 self.assertEqual(len(output_diffs[0].metrics["mae"]), 2)95class OutputDiffTest(tf.test.TestCase):96 def test_output_diff(self):97 diff = OutputDiff(98 left_name="a",99 right_name="b",100 shape=(4, 5),101 left_dtype=np.float32,102 right_dtype=np.float32,103 metrics={"mse": [0, 1, 2, 1]},104 )105 self.assertEqual(diff.as_flat_dict()["metric/mse"], 1.0)106 df = OutputDiff.to_df([diff, diff])107 df = pd.DataFrame(df)108 df = df.set_index("left_name")...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!