Best Python code snippet using playwright-python
maml_regression.py
Source: maml_regression.py
1import torch2import torchvision3import torch.nn as nn4import numpy as np5import matplotlib.pyplot as plt6import pandas as pd7from sklearn.model_selection import train_test_split8import torchmeta9from torchmeta.modules import (MetaModule, MetaSequential, MetaLinear)10import os11import torch12import torch.nn.functional as F13from tqdm import tqdm14import logging15from torchmeta.utils.data import BatchMetaDataLoader16from torchmeta.utils.gradient_based import gradient_update_parameters17class RegressionNeuralNetwork(MetaModule):18 def __init__(self, in_channels, hidden1_size=40, hidden2_size=80):19 super(RegressionNeuralNetwork, self).__init__()20 self.in_channels = in_channels21 self.hidden1_size = hidden1_size22 self.hidden2_size = hidden2_size23 self.regressor = MetaSequential(24 MetaLinear(in_channels, hidden1_size),25 nn.ReLU(),26 MetaLinear(hidden1_size, hidden2_size),27 nn.ReLU(),28 MetaLinear(hidden2_size, hidden1_size),29 nn.ReLU(),30 MetaLinear(hidden1_size, 1)31 )32 def forward(self, inputs, params=None):33 values = self.regressor(inputs, params=self.get_subdict(params, 'regressor'))34 # values = values.view(values.size(0),-1)35 return values36class RegressionNeuralNetwork_v2(MetaModule):37 def __init__(self, in_channels, hidden1_size=40, hidden2_size=80):38 super(RegressionNeuralNetwork_v2, self).__init__()39 self.in_channels = in_channels40 self.hidden1_size = hidden1_size41 self.hidden2_size = hidden2_size42 self.regressor = MetaSequential(43 MetaLinear(in_channels, hidden1_size),44 nn.LeakyReLU(),45 MetaLinear(hidden1_size, hidden2_size),46 nn.LeakyReLU(),47 MetaLinear(hidden2_size, hidden1_size),48 nn.LeakyReLU(),49 MetaLinear(hidden1_size, 1)50 )51 def forward(self, inputs, params=None):52 values = self.regressor(inputs, params=self.get_subdict(params, 'regressor'))53 # values = values.view(values.size(0),-1)54 return values55def meta_train(args, metaDataloader):56 model = RegressionNeuralNetwork(args['in_channels'], hidden1_size=args['hidden1_size'],57 hidden2_size=args['hidden2_size'])58 model.train()59 meta_optimizer = torch.optim.Adam(model.parameters(), lr=args['beta'])60 loss_record = []61 # training loop62 for it_outer in range(args['num_it_outer']):63 model.zero_grad()64 train_dataloader = metaDataloader['train']65 test_dataloader = metaDataloader['test']66 outer_loss = torch.tensor(0., dtype=torch.float)67 for task in train_dataloader:68 iterator = iter(train_dataloader[task])69 train_sample = iterator.next()70 # get true h value71 # h_value = torch.tensor(train_sample[:,-1], dtype=torch.float)72 h_value = train_sample[:, -1].clone().detach().to(dtype=torch.float)73 # get input74 # input_value = torch.tensor(train_sample[:,:-1], dtype=torch.float)75 input_value = train_sample[:, :-1].clone().detach().to(dtype=torch.float)76 #77 train_h_value = model(input_value)78 inner_loss = F.mse_loss(train_h_value.view(-1, 1), h_value.view(-1, 1))79 model.zero_grad()80 # print('It {}, task {}, Start updating parameters'.format(it_outer, task))81 params = gradient_update_parameters(model, inner_loss, step_size=args['alpha'],82 first_order=args['first_order'])83 # adaptation84 # get test sample85 test_iterator = iter(test_dataloader[task])86 test_sample = test_iterator.next()87 # h_value2 = torch.tensor(test_sample[:,-1], dtype=torch.float)88 h_value2 = test_sample[:, -1].clone().detach().to(dtype=torch.float)89 # test_input_value = torch.tensor(test_sample[:,:-1], dtype=torch.float)90 test_input_value = test_sample[:, :-1].clone().detach().to(dtype=torch.float)91 test_h_value = model(test_input_value, params=params)92 outer_loss += F.mse_loss(test_h_value.view(-1, 1), h_value2.view(-1, 1))93 outer_loss.div_(args['num_tasks'])94 outer_loss.backward()95 meta_optimizer.step()96 loss_record.append(outer_loss.detach())97 if it_outer % 50 == 0:98 print('It {}, outer traning loss: {}'.format(it_outer, outer_loss))99 # print the loss plot100 plt.plot(loss_record)101 plt.title('Outer Training Loss (MSE Loss) in MAML')102 plt.xlabel('Iteration number')103 plt.show()104 # save model105 if args['output_model'] is not None:106 with open(args['output_model'], 'wb') as f:107 state_dict = model.state_dict()108 torch.save(state_dict, f)109def meta_train_v2(args, metaDataloader):110 model = RegressionNeuralNetwork_v2(args['in_channels'], hidden1_size=args['hidden1_size'],111 hidden2_size=args['hidden2_size'])112 model.train()113 meta_optimizer = torch.optim.Adam(model.parameters(), lr=args['beta'])114 loss_record = []115 # training loop116 for it_outer in range(args['num_it_outer']):117 model.zero_grad()118 train_dataloader = metaDataloader['train']119 test_dataloader = metaDataloader['test']120 outer_loss = torch.tensor(0., dtype=torch.float)121 for task in train_dataloader:122 iterator = iter(train_dataloader[task])123 train_sample = iterator.next()124 # get true h value125 # h_value = torch.tensor(train_sample[:,-1], dtype=torch.float)126 h_value = train_sample[:, -1].clone().detach().to(dtype=torch.float)127 # get input128 # input_value = torch.tensor(train_sample[:,:-1], dtype=torch.float)129 input_value = train_sample[:, :-1].clone().detach().to(dtype=torch.float)130 #131 train_h_value = model(input_value)132 inner_loss = F.mse_loss(train_h_value.view(-1, 1), h_value.view(-1, 1))133 model.zero_grad()134 # print('It {}, task {}, Start updating parameters'.format(it_outer, task))135 params = gradient_update_parameters(model, inner_loss, step_size=args['alpha'],136 first_order=args['first_order'])137 # adaptation138 # get test sample139 test_iterator = iter(test_dataloader[task])140 test_sample = test_iterator.next()141 # h_value2 = torch.tensor(test_sample[:,-1], dtype=torch.float)142 h_value2 = test_sample[:, -1].clone().detach().to(dtype=torch.float)143 # test_input_value = torch.tensor(test_sample[:,:-1], dtype=torch.float)144 test_input_value = test_sample[:, :-1].clone().detach().to(dtype=torch.float)145 test_h_value = model(test_input_value, params=params)146 outer_loss += F.mse_loss(test_h_value.view(-1, 1), h_value2.view(-1, 1))147 outer_loss.div_(args['num_tasks'])148 outer_loss.backward()149 meta_optimizer.step()150 loss_record.append(outer_loss.detach())151 if it_outer % 50 == 0:152 print('It {}, outer traning loss: {}'.format(it_outer, outer_loss))153 # print the loss plot154 plt.plot(loss_record)155 plt.title('Outer Training Loss (MSE Loss) in MAML')156 plt.xlabel('Iteration number')157 plt.show()158 # save model159 if args['output_model'] is not None:160 with open(args['output_model'], 'wb') as f:161 state_dict = model.state_dict()162 torch.save(state_dict, f)163def fine_tune(args, model, dataloader, validation_set):164 # set the model to be train mode165 model.train()166 # set the optimizer167 opt = torch.optim.Adam(model.parameters(), lr=args['learning_rate'])168 #169 train_it = 0170 loss_record = []171 validation_loss_record = []172 flag_stop = False173 for ep in range(args['epoch']):174 print("Run Epoch {}".format(ep))175 for data in dataloader:176 # input_value = torch.tensor(data[:,:-1], dtype=torch.float)177 input_value = data[:, :-1].clone().detach().to(dtype=torch.float)178 # h_value = torch.tensor(data[:,-1], dtype=torch.float)179 h_value = data[:, -1].clone().detach().to(dtype=torch.float)180 # zero out gradients181 opt.zero_grad()182 # forward recursion183 est_h_value = model(input_value)184 # loss185 loss = F.mse_loss(est_h_value.view(-1, 1), h_value.view(-1, 1))186 # backward recursion187 loss.backward()188 # update the weights189 opt.step()190 # calculate validation loss191 loss_record.append(loss.detach())192 with torch.no_grad():193 input_value_val = torch.tensor(validation_set[:, :-1], dtype=torch.float)194 h_value_val = torch.tensor(validation_set[:, -1], dtype=torch.float)195 est_h_value_val = model(input_value_val)196 val_loss = F.mse_loss(est_h_value_val.view(-1, 1), h_value_val.view(-1, 1))197 validation_loss_record.append(val_loss)198 if train_it % 5 == 0:199 print("It {}, L2 training loss: {} ".format(train_it, loss.item()))200 print("It {}, L2 validation loss: {}".format(train_it, val_loss.item()))201 if train_it > args['Max_it']:202 print('Stop fine-tuning')203 flag_stop = True204 break205 train_it += 1206 if flag_stop == True:207 break208 # print the loss plot209 fig = plt.figure(figsize=(10, 5))210 ax1 = plt.subplot(121)211 ax1.plot(loss_record)212 ax1.title.set_text('Training Loss (MSE Loss)')213 ax1.set_xlabel('Iteration Number')214 ax2 = plt.subplot(122)215 ax2.plot(validation_loss_record)216 ax2.title.set_text('Validation Loss (MSE Loss)')217 ax2.set_xlabel('Iteration Number')218 plt.show()219 # save the model220 if args['output_model'] is not None:221 with open(args['output_model'], 'wb') as f:222 state_dict = model.state_dict()223 torch.save(state_dict, f)...
predict.py
Source: predict.py
1import os2import torch3import torch.utils.data as Data4import numpy as np5import matplotlib.pyplot as plt6from network import DeepSeparator7model_name = 'DeepSeparator'8# choose one sample for visualization9index = 3510test_input = np.load('../data/test_input.npy')11test_input = test_input[index]12test_output = np.load('../data/test_output.npy')13test_output = test_output[index]14test_input = torch.from_numpy(test_input)15test_output = torch.from_numpy(test_output)16test_input = torch.unsqueeze(test_input, 0)17test_output = torch.unsqueeze(test_output, 0)18test_torch_dataset = Data.TensorDataset(test_input)19print("torch.cuda.is_available() = ", torch.cuda.is_available())20device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")21model = DeepSeparator()22model.to(device) # 移å¨æ¨¡åå°cuda23if os.path.exists('checkpoint/' + model_name + '.pkl'):24 print('load model')25 model.load_state_dict(torch.load('checkpoint/' + model_name + '.pkl'))26test_input = test_input.float().to(device)27extracted_signal = model(test_input, 0) # 0 for denoising, 1 for extracting artifact28extracted_artifact = model(test_input, 1) # 0 for denoising, 1 for extracting artifact29test_input_value = test_input.cpu()30test_input_value = test_input_value.detach().numpy()31test_input_value = test_input_value[0]32test_output_value = test_output.cpu()33test_output_value = test_output_value.detach().numpy()34test_output_value = test_output_value[0]35extracted_signal_value = extracted_signal.cpu()36extracted_signal_value = extracted_signal_value.detach().numpy()37extracted_signal_value = extracted_signal_value[0]38extracted_artifact_value = extracted_artifact.cpu()39extracted_artifact_value = extracted_artifact_value.detach().numpy()40extracted_artifact_value = extracted_artifact_value[0]41l0, = plt.plot(test_input_value)42l1, = plt.plot(extracted_signal_value)43# l2, = plt.plot(extracted_artifact_value)44l3, = plt.plot(test_output_value)45# plt.legend([l0, l1, l2, l3], ['Raw EEG', 'Denoised EEG', 'Extracted Artifact', 'Clean EEG'], loc='upper right')46plt.legend([l0, l1, l3], ['Raw EEG', 'Denoised EEG', 'Clean EEG'], loc='upper right')...
test_twitter_connector.py
Source: test_twitter_connector.py
1#!/usr/bin/env python2import sys3import os.path4sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))5sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.join(os.path.pardir, 'tweetbot'))))6import tweet_config, twitter_connector7from argparse import ArgumentParser8from unittest import mock9def test_twitter_connector_init():10 # set test config file to use11 #test_config_file = 'tests/test_files/test_config.ini'12 #tc = tweet_config.tweet_config(config_file=test_config_file)13 tc = tweet_config.tweet_config()14 # create tweet_config object15 tconn = twitter_connector.twitter_connector(tweet_config=tc)16 assert isinstance(tconn, twitter_connector.twitter_connector)17@mock.patch('twitter_connector.quote')18def test_percent_encode(mocked_twitter_connector_quote):19 #tc = tweet_config.tweet_config(config_file=test_config_file)20 tc = tweet_config.tweet_config()21 # create tweet_config object22 tconn = twitter_connector.twitter_connector(tweet_config=tc)23 # set test string24 test_input_value = 'this is a test string'25 # call method we want to test26 output = tconn._percent_encode(test_input_value)27 mocked_twitter_connector_quote.assert_called_with(test_input_value, safe="")...
test.py
Source: test.py
...9 """10 data = [1, 2, 3]11 result = find_average(data)12 self.assertEqual(result, 2.0)13 def test_input_value(self):14 """15 Provide an assertion level for arg input16 """17 18 self.assertRaises(TypeError, find_average, True)19class TestCountOccurence(unittest.TestCase):20 def test_count_occurence(self):21 """22 Test that it returns the count of each unique values in the given list23 """24 data = [0,0,9,0,8,9,0,7]25 result = count_occurence(data)26 output = {0: 4, 9: 2, 8: 1, 7: 1}27 self.assertAlmostEqual(result, output)28 def test_input_value(self):29 """30 Provide an assertion level for arg input31 """32 self.assertRaises(TypeError, count_occurence, True)33if __name__ == '__main__':...
Playwright error connection refused in docker
playwright-python advanced setup
How to select an input according to a parent sibling label
Error when installing Microsoft Playwright
Trouble waiting for changes to complete that are triggered by Python Playwright `select_option`
Capturing and Storing Request Data Using Playwright for Python
Can Playwright be used to launch a browser instance
Trouble in Clicking on Log in Google Button of Pop Up Menu Playwright Python
Scrapy Playwright get date by clicking button
React locator example
I solved my problem. In fact my docker container (frontend) is called "app" which is also domain name of fronend application. My application is running locally on http. Chromium and geko drivers force httpS connection for some domain names one of which is "app". So i have to change name for my docker container wich contains frontend application.
Check out the latest blogs from LambdaTest on this topic:
The sky’s the limit (and even beyond that) when you want to run test automation. Technology has developed so much that you can reduce time and stay more productive than you used to 10 years ago. You needn’t put up with the limitations brought to you by Selenium if that’s your go-to automation testing tool. Instead, you can pick from various test automation frameworks and tools to write effective test cases and run them successfully.
When it comes to web automation testing, there are a number of frameworks like Selenium, Cypress, PlayWright, Puppeteer, etc., that make it to the ‘preferred list’ of frameworks. The choice of test automation framework depends on a range of parameters like type, complexity, scale, along with the framework expertise available within the team. However, it’s no surprise that Selenium is still the most preferred framework among developers and QAs.
Playwright is a framework that I’ve always heard great things about but never had a chance to pick up until earlier this year. And since then, it’s become one of my favorite test automation frameworks to use when building a new automation project. It’s easy to set up, feature-packed, and one of the fastest, most reliable frameworks I’ve worked with.
The speed at which tests are executed and the “dearth of smartness” in testing are the two major problems developers and testers encounter.
With the rapidly evolving technology due to its ever-increasing demand in today’s world, Digital Security has become a major concern for the Software Industry. There are various ways through which Digital Security can be achieved, Captcha being one of them.Captcha is easy for humans to solve but hard for “bots” and other malicious software to figure out. However, Captcha has always been tricky for the testers to automate, as many of them don’t know how to handle captcha in Selenium or using any other test automation framework.
LambdaTest’s Playwright tutorial will give you a broader idea about the Playwright automation framework, its unique features, and use cases with examples to exceed your understanding of Playwright testing. This tutorial will give A to Z guidance, from installing the Playwright framework to some best practices and advanced concepts.
Get 100 minutes of automation test minutes FREE!!