Best Python code snippet using slash
ModelHelper.py
Source:ModelHelper.py
...29 self._build_model_vq()30 elif self._settings.identifier == 'vanilla':31 self._build_model_vanilla()32 elif self._settings.identifier == 'combination':33 self._build_combination()34 self._input_tensor = self._misc.set_probabilities('model_checkpoint/vq_graph/p_s_m.mat')35 # elif self.settings.identifier == 'restore':36 # self._build_combination()37 def _build_model_vq(self):38 """39 Build your model for training. All of the architecture40 is defined here.41 :param scale_softmax: Scaling for the WTA-layer (Winner-Takes-All)42 :param codebook_size: Size of the codebook43 """44 # Start here to define your network45 # ------------------------------------------------------------------46 num_neurons = 51247 activation = tf.nn.relu48 regulizer = None49 if self._settings.l2_loss_reg:50 regulizer = tf.contrib.layers.l2_regularizer(scale=self._settings.scale_l2)51 with tf.variable_scope('base_network'):52 fc = tf.layers.dense(self.features, num_neurons, activation=activation, kernel_regularizer=regulizer)53 # fc1_bn = tf.layers.batch_normalization(fc1, training=self.train, center=False, scale=False)54 fc = tf.layers.batch_normalization(fc, training=self.train)55 # fc = tf.layers.dropout(fc, rate=self._settings.do_rate, training=self.train)56 fc = tf.layers.dense(fc, num_neurons, activation=activation, kernel_regularizer=regulizer)57 # fc2_bn = tf.layers.batch_normalization(fc2, training=self.train, center=False, scale=False)58 fc = tf.layers.batch_normalization(fc, training=self.train)59 # fc = tf.layers.dropout(fc, rate=self._settings.do_rate, training=self.train)60 fc = tf.layers.dense(fc, num_neurons, activation=activation, kernel_regularizer=regulizer)61 # fc3_bn = tf.layers.batch_normalization(fc3, training=self.train, center=False, scale=False)62 fc = tf.layers.batch_normalization(fc, training=self.train)63 # fc = tf.layers.dropout(fc, rate=self._settings.do_rate, training=self.train)64 #65 fc = tf.layers.dense(fc, num_neurons, activation=activation, kernel_regularizer=regulizer)66 # fc4_bn = tf.layers.batch_normalization(fc4, training=self.train, center=False, scale=False)67 fc = tf.layers.batch_normalization(fc, training=self.train)68 fc = tf.layers.dropout(fc, rate=self._settings.do_rate, training=self.train)69 #70 # fc = tf.layers.dense(fc, num_neurons, activation=activation)71 # # # fc3_bn = tf.layers.batch_normalization(fc3, training=self.train, center=False, scale=False)72 # fc = tf.layers.batch_normalization(fc, training=self.train)73 # fc = tf.layers.dropout(fc, rate=self._settings.do_rate, training=self.train)74 # # #75 # fc = tf.layers.dense(fc, num_neurons, activation=activation)76 # # # fc4_bn = tf.layers.batch_normalization(fc4, training=self.train, center=False, scale=False)77 # fc = tf.layers.batch_normalization(fc, training=self.train)78 # fc = tf.layers.dropout(fc, rate=self._settings.do_rate, training=self.train)79 # # WTA-layer starts here80 out = tf.layers.dense(fc, self._settings.codebook_size, activation=tf.nn.sigmoid,81 kernel_regularizer=regulizer)82 out_scaled = tf.scalar_mul(self._settings.scale_soft, out)83 # output without softmax84 self.logits = out_scaled85 # output with soft, be aware use a name 'nn_output' for the output node!86 self.inference = tf.nn.softmax(self.logits, name='nn_output')87 # Low rank matrix factorization88 # (c.f. LOW-RANK MATRIX FACTORIZATION FOR DEEP NEURAL NETWORK TRAINING WITH89 # HIGH-DIMENSIONAL OUTPUT TARGETS)90 # out_1 = tf.layers.dense(fc4_bn, 512, activation=None)91 # out = tf.layers.dense(out_1, self._settings.codebook_size, activation=None)92 # out_scaled = tf.scalar_mul(self._settings.scale_soft, out)93 # # output without softmax94 # self.logits = out_scaled95 # # output with soft, be aware use a name 'nn_output' for the output node!96 # self.inference = tf.nn.softmax(self.logits, name='nn_output')97 # ------------------------------------------------------------------98 # end of definition of network99 def _build_model_vanilla(self):100 num_neurons = 512101 with tf.variable_scope('vanilla_network'):102 fc1 = tf.layers.dense(self.features, num_neurons, activation=tf.nn.relu)103 fc1_bn = tf.layers.batch_normalization(fc1, training=self.train, center=False, scale=False)104 fc1_dropout = tf.layers.dropout(fc1_bn, rate=0.25, training=self.train)105 fc2 = tf.layers.dense(fc1_dropout, num_neurons, activation=tf.nn.relu)106 fc2_bn = tf.layers.batch_normalization(fc2, training=self.train, center=False, scale=False)107 fc2_dropout = tf.layers.dropout(fc2_bn, rate=0.25, training=self.train)108 #109 fc3 = tf.layers.dense(fc2_dropout, num_neurons, activation=tf.nn.relu)110 fc3_bn = tf.layers.batch_normalization(fc3, training=self.train, center=False, scale=False)111 fc3_dropout = tf.layers.dropout(fc3_bn, rate=0.25, training=self.train)112 fc4 = tf.layers.dense(fc3_dropout, num_neurons, activation=tf.nn.relu)113 fc4_bn = tf.layers.batch_normalization(fc4, training=self.train, center=False, scale=False)114 fc4_dropout = tf.layers.dropout(fc4_bn, rate=0.25, training=self.train)115 fc5 = tf.layers.dense(fc4_dropout, num_neurons, activation=tf.nn.relu)116 fc5_bn = tf.layers.batch_normalization(fc5, training=self.train, center=False, scale=False)117 fc5_dropout = tf.layers.dropout(fc5_bn, rate=0.25, training=self.train)118 self.logits = tf.layers.dense(fc5_dropout, self._settings.num_labels, activation=None)119 # output with soft, be aware use a name 'nn_output' for the output node!120 self.inference = tf.nn.softmax(self.logits, name='nn_output')121 def _build_combination(self):122 # we combine the nnvq and vanilla network123 num_neurons = 512124 # with tf.variable_scope('scaling_network'):125 # fc1_scale = tf.layers.dense(self.features, 1, activation=tf.nn.sigmoid)126 # self.scale = 35 * fc1_scale127 # # self.scale = tf.Print(self.scale, [tf.reduce_min(self.scale)])128 # first we build the nnvq network129 with tf.variable_scope('base_network'):130 fc1 = tf.layers.dense(self.features, num_neurons, activation=tf.nn.relu)131 fc1_bn = tf.layers.batch_normalization(fc1, training=self.train, center=False, scale=False)132 fc1_dropout = tf.layers.dropout(fc1_bn, rate=0.25, training=self.train)133 fc2 = tf.layers.dense(fc1_dropout, num_neurons, activation=tf.nn.relu)134 fc2_bn = tf.layers.batch_normalization(fc2, training=self.train, center=False, scale=False)135 fc2_dropout = tf.layers.dropout(fc2_bn, rate=0.25, training=self.train)...
cartesian.py
Source:cartesian.py
...22 return returned23 def check(self, iterator):24 names = list(self.sets)25 sets = [self.sets[name] for name in names]26 expected = sorted((self._build_combination(names, combination) for combination in itertools.product(*sets)), key=lambda d: sorted(d.items()))27 got = sorted(iterator, key=lambda d: sorted(d.items()))28 assert got == expected29 def _build_combination(self, names, combination):30 returned = {}31 for name, value in zip(names, combination):32 returned[name] = value33 for assign_source, assign_target in self._assigns:34 returned[assign_target] = returned[assign_source]35 return returned36class SetMaker(object):37 def __init__(self, cartesian, name):38 super(SetMaker, self).__init__()39 self.cartesian = cartesian40 self.name = name41 def make_set(self, size=3):42 assert self.name not in self.cartesian.sets43 returned = self.cartesian.sets[self.name] = ["{}{}".format(self.name, i) for i in range(size)]...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!