Best Python code snippet using autotest_python
model.py
Source:model.py
1import torch2import torch.nn as nn3import sys4import random5class Feature_RNNWithCells(nn.Module):6 def __init__(self, params, feature_table): #the ith row of the feature table is the set of features for word i in phone2ix7 super(Feature_RNNWithCells, self).__init__()8 self.features = feature_table9 self.vocab_size = params['inv_size']10 self.d_feats = params['d_feats']11 #self.n_layers = params['num_layers']12 #self.d_hid = params['d_hid']13 self.device = params['device']14 self.i2R = nn.RNNCell(self.d_feats, self.vocab_size).to(self.device) #input to recurrent layer, default nonlinearity is tanh15 self.R2o = nn.Linear(self.vocab_size, self.vocab_size).to(self.device) #recurrent to output layer16 self.R2h = nn.Linear(self.vocab_size, self.vocab_size).to(self.device) #recurrent to output layer17 self.softmax = nn.Softmax(dim=1)18 def batch_to_features(self, batch, feature_table):19 batches, seq_len = batch.size()20 def forward(self, batch):21 batches, seq_len = batch.size()22 inventory_size, num_feats = self.features.size()23 output = torch.zeros(1, self.vocab_size)24 full_representation = torch.zeros(batches, seq_len, num_feats, requires_grad=False)#the final will be batch size x seq_len x number of features25 for i in range(batches):26 outputs = []27 for j in range(seq_len):28 full_representation = torch.unsqueeze(self.features[batch[i,j]], 0)29 output = self.i2R(full_representation, output) #By passing output instead of hidden as the second argument, we make this a Jordan rather than Elman net30 #This output of the cell is actually a hidden vector of the dimension the same as the output we passed it31 output = self.R2o(output) #Output has dim vocab size32 hidden = self.R2h(output) #This hidden vector also gets passed back to the cell so the network is a Jordan/Elman network33 output = self.softmax(output)34 output = torch.mul(output, torch.rand(output.size()))35 outputs.append(output)36 output = output + hidden37 outputs = torch.stack(outputs, dim=1)38 return outputs, hidden39# single-direction RNN, optionally tied embeddings40class Emb_RNNLM(nn.Module):41 def __init__(self, params):42 super(Emb_RNNLM, self).__init__()43 self.vocab_size = params['inv_size']44 self.d_emb = params['d_emb']45 self.n_layers = params['num_layers']46 self.d_hid = params['d_hid']47 self.embeddings = nn.Embedding(self.vocab_size, self.d_emb)48 self.device = params['device']49 50 # input to recurrent layer, default nonlinearity is tanh51 self.i2R = nn.RNN(52 self.d_emb, self.d_hid, batch_first=True, num_layers = self.n_layers53 ).to(self.device)54 # recurrent to output layer55 self.R2o = nn.Linear(self.d_hid, self.vocab_size).to(self.device)56 if params['tied']:57 if self.d_emb == self.d_hid:58 self.R2o.weight = self.embeddings.weight59 else:60 print("Dimensions don't support tied embeddings")61 self.softmax = nn.Softmax(dim=1)62 def forward(self, batch):63 batches, seq_len = batch.size()64 embs = self.embeddings(batch)65 output, hidden = self.i2R(embs)66 outputs = self.R2o(output)67 #print('outputs', outputs.size())68 return outputs, hidden69 #return outputs, output, embs70# single-direction RNN, optionally tied embeddings71#72class Emb_encoder_decoder(nn.Module):73 def __init__(self, params):74 super(Emb_encoder_decoder, self).__init__()75 self.vocab_size = params['inv_size']76 self.d_emb = params['d_emb']77 self.n_layers = params['num_layers']78 self.d_hid = params['d_hid']79 self.embeddings = nn.Embedding(self.vocab_size, self.d_emb)80 self.device = params['device']81 82 # input to recurrent layer, default nonlinearity is tanh83 self.encoder = nn.RNN(84 self.d_emb, self.d_hid, batch_first=True, num_layers = self.n_layers85 ).to(self.device)86 # recurrent to output layer87 self.R2o = nn.Linear(self.d_hid, self.vocab_size).to(self.device)88 if params['tied']:89 if self.d_emb == self.d_hid:90 self.R2o.weight = self.embeddings.weight91 else:92 print("Dimensions don't support tied embeddings")93 self.decoder = nn.RNNCell(self.vocab_size, self.d_hid).to(self.device)94 self.softmax = nn.Softmax(dim=2)95 def forward(self, batch):96 batches, seq_len = batch.size()97 embs = self.embeddings(batch)98 output, hidden = self.encoder(embs)99 outputs = self.R2o(output)100 outputs = self.softmax(outputs)101 #print('outputs', outputs.size())102 return outputs103 #return outputs, output, embs104#single direction RNN with fixed, prespecified features105class Feature_RNNLM(nn.Module):106 def __init__(self, params, feature_table): #the ith row of the feature table is the set of features for word i in phone2ix107 super(Feature_RNNLM, self).__init__()108 self.features = feature_table109 self.vocab_size = params['inv_size']110 self.d_feats = params['d_feats']111 self.n_layers = params['num_layers']112 self.d_hid = params['d_hid']113 self.device = params['device']114 self.i2R = nn.RNN(self.d_feats, self.d_hid, batch_first=True, num_layers=self.n_layers, bidirectional=True).to(self.device) #input to recurrent layer, default nonlinearity is tanh115 self.R2o = nn.Linear(self.d_hid*2, self.vocab_size).to(self.device) #recurrent to output layer116 self.softmax = nn.Softmax(dim=2)117 #self.lstm = nn.LSTM(self.d_feats, self.d_hid, batch_first=True, num_layers=self.n_layers).to(self.device)118 def batch_to_features(self, batch, feature_table):119 batches, seq_len = batch.size()120 def forward(self, batch):121 batches, seq_len = batch.size()122 inventory_size, num_feats = self.features.size()123 full_representation = torch.zeros(batches, seq_len, num_feats, requires_grad=False)#the final will be batch size x seq_len x number of features124 for i in range(batches):125 for j in range(seq_len):126 full_representation[i,j,:] = self.features[batch[i,j]]127 output, hidden = self.i2R(full_representation)128 #output, (hidden, _) = self.lstm(full_representation)129 outputs = self.R2o(output)130 outputs = self.softmax(outputs)131 return outputs132class Feature_encoder_decoder(nn.Module):133 def __init__(self, params, feature_table): #the ith row of the feature table is the set of features for word i in phone2ix134 super(Feature_encoder_decoder, self).__init__()135 self.features = feature_table136 self.vocab_size = params['inv_size']137 self.d_feats = params['d_feats']138 self.n_layers = params['num_layers']139 self.d_hid = params['d_hid']140 self.device = params['device']141 self.encoder = nn.RNN(self.d_feats, self.d_hid, batch_first=True, num_layers=self.n_layers).to(self.device) #input to recurrent layer, default nonlinearity is tanh142 self.decoder = nn.RNNCell(self.vocab_size, self.d_hid).to(self.device) 143 self.R2o = nn.Linear(self.d_hid, self.vocab_size).to(self.device) #recurrent to output layer144 self.softmax = nn.Softmax(dim=1)145 def batch_to_features(self, batch, feature_table):146 batches, seq_len = batch.size()147 def forward(self, batch):148 batches, seq_len = batch.size()149 inventory_size, num_feats = self.features.size()150 full_representation = torch.zeros(batches, seq_len, num_feats, requires_grad=False)#the final will be batch size x seq_len x number of features151 outputs = []152 for i in range(batches):153 for j in range(seq_len):154 full_representation[i,j,:] = self.features[batch[i,j]]155 _, hidden = self.encoder(full_representation)156 output = torch.zeros((batches, self.vocab_size), requires_grad=False)157 hidden = torch.squeeze(hidden,0)158 #print('vocab size', self.vocab_size, 'd hid', self.d_hid)159 #print('output', output.size(), 'hidden', hidden.size())160 for i in range(batches):161 for j in range(seq_len):162 hidden = self.decoder(output, hidden)163 output = self.R2o(hidden)164 output = self.softmax(output)165 outputs.append(output)166 outputs = torch.stack(outputs, dim=0).permute(1,0,2)167 #print('outputs', outputs.size())168 return outputs169class Feature_RNN_for_single_seg_prediction(nn.Module):170 def __init__(self, params, feature_table): #the ith row of the feature table is the set of features for word i in phone2ix171 super(Feature_RNN_for_single_seg_prediction, self).__init__()172 self.features = feature_table173 self.vocab_size = params['inv_size']174 self.d_feats = params['d_feats']175 self.n_layers = params['num_layers']176 self.d_hid = params['d_hid']177 self.device = params['device']178 self.i2R = nn.RNN(self.d_feats, self.d_hid, batch_first=True, num_layers=self.n_layers).to(self.device) #input to recurrent layer, default nonlinearity is tanh179 self.R2o = nn.Linear(self.d_hid, self.vocab_size).to(self.device) #recurrent to output layer180 def batch_to_features(self, batch, feature_table):181 batches, seq_len = batch.size()182 def forward(self, batch):183 batches, seq_len = batch.size()184 inventory_size, num_feats = self.features.size()185 full_representation = torch.zeros(batches, seq_len, num_feats, requires_grad=False)#the final will be batch size x seq_len x number of features186 for i in range(batches):187 for j in range(seq_len):188 full_representation[i,j,:] = self.features[batch[i,j]]189 output, hidden = self.i2R(full_representation)190 #print('output', output.size(), 'hidden', hidden.size())191 outputs = self.R2o(output[:,-1,:])192 #print('output', output.size())193 #print('outputs', outputs.size())194 #print('hidden', hidden.size())195 return outputs196class LexicalEmbeddingRNN(nn.Module):197 def __init__(self, params):198 super(LexicalEmbeddingRNN, self).__init__()199 self.device = params['device']200 self.num_words = params['num_words']201 self.vocab_size = params['inv_size']202 self.d_lex_emb = params['d_lex_emb']203 self.d_emb = params['d_emb']204 self.d_hid = params['d_hid']205 self.lexical_embeddings = nn.Embedding(self.num_words, self.d_lex_emb)206 #self.sos_embedding = nn.Embedding(1, self.d_emb)207 self.rnncell = nn.RNNCell(self.d_hid, self.d_hid)208 self.projection1 = nn.Linear(self.d_lex_emb * 2 , self.d_hid)209 self.projection2 = nn.Linear(self.d_hid, self.d_lex_emb)210 self.projection3 = nn.Linear(self.d_lex_emb, self.vocab_size)211 def forward(self, wd, lex_wd):212 wds, seq_len = wd.size()213 emb = self.lexical_embeddings(lex_wd) #lex_wd is a tensor index of the word.214 #We pass to the model a lexical embedding of the word and an encoding of a start of string symbol215 #On each timestep, the RNN cell takes in as its input the concatenation of two things (following Malouf): the last outputted symbol and the embedding of the lexeme.216 #The input to the hidden layer is the output of the hidden layer in the previous cell.217 #How do we deal with the difference in dimensionality between the embedding of the lexeme (which would seem to need to be large sonce there are so many different words) and the encoding of each segment, of which there are fewer?218 #If the concatentation goes through a linear layer, it should be able to find weights that give each of the lexeme embedding and the input segment the correct relative amount of influence, even if the former is of a higher dimension. 219 input_seg = torch.zeros((1, self.d_lex_emb)) #Use a tensor of zeroes for the embedding of the <sos> symbol220 #print('input seg', input_seg.size())221 lex_emb = torch.squeeze(emb, 0) # size: d_lex_emb222 #print('lex_emb', lex_emb.size())223 outputs = []224 for j in range(seq_len):225 #print(lex_emb.size(), input_seg.size())226 m = torch.cat((lex_emb, input_seg), 1) #size: d_lex_emb + d_lex_emb227 #print('m', m.size())228 m = self.projection1(m) # size: d_hid229 #print('m size', m.size())230 new_hidden = self.rnncell(m)231 #print('new hidden size', new_hidden.size())232 output = self.projection2(new_hidden) #size: d_lex_emb233 #print('output size', output.size()) #size: d_lex_emb234 input_seg = torch.add(lex_emb, output)235 #print('input seg2', input_seg.size())236 output_to_return = self.projection3(output)237 #print('output to return', output_to_return.size())238 outputs.append(output_to_return)239 word_output = torch.stack(outputs,0) 240 #print('word output dim', word_output.size())...
interface.py
Source:interface.py
1"""Some general interfaces around vison components."""2import tensorflow as tf3class VisionComponent(tf.keras.Model):4 """Abstract class that models that encode observations should extend."""5 def compute_full_representation(self, x, training=None):6 """Returns a representation given a batch of raw observations.7 Args:8 x: a tf.Tensor containing a batch of raw observations9 training: a bool or None saying whether we are training10 Returns:11 Either a single tf.Tensor or a 2-tuple of a tf.Tensor and dict12 mapping strings to tf.Tensors.13 The tf.Tensor will be the representation as a single tensor. For14 deterministic encoders, this will just be the representation. For15 a VAE, this will be the mean of the posterior distribution.16 The dict, if present, allows for returning other information relevant17 to the representation. For example, this lets you return the18 standard deviation of the posterior distribution for a VAE.19 """20 raise NotImplementedError()21 @tf.function22 def compute_tensor_representation(self, x, training=None):23 # TODO: Add docs24 full_representation = self.compute_full_representation(x, training=training)25 if isinstance(full_representation, tf.Tensor):26 return full_representation27 elif isinstance(full_representation, (tuple, list)):28 return full_representation[0]29 else:30 raise ValueError(31 f"The full representation was an invalid type: {full_representation}"32 )33 def get_loss_fn(self):34 """Returns the loss function that will be used to train the encoder.35 Returns:36 An acceptable keras loss function. This can be a function taking37 in (y_true, y_pred) as arguments and returning a scalar loss tensor.38 It can also be a instance of a subclass of tf.keras.losses.Loss....
stack.py
Source:stack.py
1from stack_exceptions import StackUnderflowException23class Stack:45 def __init__(self, elems=[]):6 self.index = len(elems)-1 # Subtracting one for 0-based indexing 7 8 self.structure = [elem for elem in elems] # List comprehension... because it's cool :) 91011 def isEmpty(self):12 return self.index == -1 131415 def push(self, elem): 16 self.structure.append(elem)17 self.index += 118 19 20 def pop(self):21 if self.isEmpty():22 raise StackUnderflowException()2324 else:25 self.structure = self.structure[:self.index]26 self.index -= 1272829 def peekAndRemove(self):30 peeked_data = self.peek()31 self.pop()3233 return peeked_data343536 def peek(self):37 return self.structure[self.index] 383940 def __repr__(self):41 """Visualize the stack through blocks""" 42 containers = "-"43 sides = "| "4445 with open("output.txt", "w+") as final_repr:4647 # First, we'll add the header48 with open("header.txt", "r") as header:49 final_repr.write(header.read())5051 for elem_index in range(len(self.structure)-1, -1, -1): 52 item_length = len(str(self.structure[elem_index]))5354 full_representation = """"""55 56 container_amt = containers * item_length + "----"5758 full_representation += container_amt # first line 59 full_representation += "\n" + "| " + str(self.structure[elem_index]) + " |" # second line (containing the value)60 full_representation += "\n" + container_amt # final line616263 final_repr.write(full_representation + "\n") # make sure the rest of the data is on the next line64 65 66 # Now, we'll add the footer67 with open("footer.txt", "r") as footer:68 final_repr.write(footer.read())6970 # We're all ready to go to return 71 final = open("output.txt", "r")
...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!