Best Python code snippet using assertpy_python
mcmc_svm.py
Source:mcmc_svm.py
1import numpy as np2from utils import compute_target, compute_std_and_var3def compute_mcmc_svm(X, y, nu, abs_eps, max_iterations, noisy=False, noise_period=20, seed=None):4 """5 Implementation of algorithm MCMC-SVM for a value alpha of 1.6 Parameters7 ----------8 X : ndarary9 An n by d array of n observation in an d-dimensional space.10 y : ndarray11 An n by 1 array of n responses.12 alpha : float13 Parameter used for L-regularization.14 nu : float15 Constant used for L-regularization.16 abs_eps : float17 Absolute tolerance used to confirm convergence.18 max_iterations : int19 Maximum number of iterations to run.20 noisy : bool21 Flag indicating whether or not to produce logs.22 noise_period : in23 Wait noise_period iterations before logging.24 seed : int or None25 Random seed used to seed numpy.random. No seeding is performed if None is passed.26 27 Returns28 -------29 betas : ndarray30 An N x d x 1 array of the N successive beta values. N is the number of iterations 31 b_values : ndarray32 An N x d x 1 array of the N successive prior mean values of beta. N is the number of iterations 33 """34 if seed is not None:35 np.random.seed(seed)36 if noisy:37 n_fill = int(np.log10(max_iterations)) + 138 _, d = X.shape39 40 yX = y * X41 std, var = compute_std_and_var(X)42 inv_var_matrix = np.diagflat(1 / var)43 44 beta = np.random.randn(d, 1)45 betas = [beta]46 omega_inv = (nu * std) / np.abs(beta)47 lam_inv = np.abs(1 - yX @ beta)48 b_values = []49 iteration = 150 51 while True:52 if noisy and iteration % noise_period == 0:53 loss = compute_target(X, y, 1, nu, beta)54 print(f'At iteration ({str(iteration).zfill(n_fill)}/{str(max_iterations).zfill(n_fill)}), loss = {loss:.3f}')55 56 # prepping step 157 B = np.linalg.inv(inv_var_matrix @ np.diagflat(omega_inv) / (nu ** 2) + yX.T @ np.diagflat(lam_inv) @ yX)58 b = B @ yX.T @ (1 + lam_inv)59 b_values.append(b)60 61 # step 162 new_beta = np.random.multivariate_normal(b.squeeze(), B).reshape(-1, 1)63 64 if np.abs(beta - new_beta).max() <= abs_eps or iteration == max_iterations:65 if noisy:66 loss = compute_target(X, y, 1, nu, beta)67 print(f'Finished run ({str(iteration).zfill(n_fill)}/{str(max_iterations).zfill(n_fill)}), loss = {loss:.3f}')68 return np.array(betas), np.array(b_values)69 beta = new_beta70 betas.append(beta)71 72 # step 273 gap = np.abs(1 - yX @ beta)74 is_zero = np.isclose(gap, 0)75 lam_inv = np.zeros_like(gap)76 lam_inv[is_zero] = 1 / (np.random.normal(size=is_zero.sum()) ** 2)77 lam_inv[~is_zero] = np.random.wald(1 / gap[~is_zero], 1)78 79 # step 380 gap = np.abs(beta) / (nu * std)81 is_zero = np.isclose(gap, 0)82 omega_inv = np.zeros_like(gap)83 omega_inv[is_zero] = 1 / (np.random.normal(size=is_zero.sum()) ** 2)84 omega_inv[~is_zero] = np.random.wald(1 / gap[~is_zero], 1)85 iteration += 186def compute_mcmc_svm_with_nu(X, y, anu, bnu, abs_eps, max_iterations, noisy=False, noise_period=20, seed=None):87 """88 Implementation of algorithm MCMC-SVM for a value alpha of 1.89 Parameters90 ----------91 X : ndarary92 An n by d array of n observation in an d-dimensional space.93 y : ndarray94 An n by 1 array of n responses.95 alpha : float96 Parameter used for L-regularization.97 anu : float98 Constant used for the prior of nu.99 bnu : float100 Constant used for the prior of nu.101 abs_eps : float102 Absolute tolerance used to confirm convergence.103 max_iterations : int104 Maximum number of iterations to run.105 noisy : bool106 Flag indicating whether or not to produce logs.107 noise_period : in108 Wait noise_period iterations before logging.109 seed : int or None110 Random seed used to seed numpy.random. No seeding is performed if None is passed.111 112 Returns113 -------114 betas : ndarray115 An N x d x 1 array of the N successive beta values. N is the number of iterations 116 b_values : ndarray117 An N x d x 1 array of the N successive prior mean values of beta. N is the number of iterations 118 """119 if seed is not None:120 np.random.seed(seed)121 if noisy:122 n_fill = int(np.log10(max_iterations)) + 1123 124 _, d = X.shape125 126 yX = y * X127 std, var = compute_std_and_var(X)128 inv_var_matrix = np.diagflat(1 / var)129 130 beta = np.random.randn(d, 1)131 betas = [beta]132 nu = 1133 nu_values = [nu]134 omega_inv = (nu * std) / np.abs(beta)135 lam_inv = np.abs(1 - yX @ beta)136 b_values = []137 iteration = 1138 139 while True:140 if noisy and iteration % noise_period == 0:141 loss = compute_target(X, y, 1, nu, beta)142 print(f'At iteration ({str(iteration).zfill(n_fill)}/{str(max_iterations).zfill(n_fill)}), loss = {loss:.3f}')143 144 # prepping step 1145 B = np.linalg.inv(inv_var_matrix @ np.diagflat(omega_inv) / (nu ** 2) + yX.T @ np.diagflat(lam_inv) @ yX)146 b = B @ yX.T @ (1 + lam_inv)147 b_values.append(b)148 149 # step 1150 new_beta = np.random.multivariate_normal(b.squeeze(), B).reshape(-1, 1)151 152 if np.abs(beta - new_beta).max() <= abs_eps or iteration == max_iterations:153 if noisy:154 loss = compute_target(X, y, 1, nu, beta)155 print(f'Finished run ({str(iteration).zfill(n_fill)}/{str(max_iterations).zfill(n_fill)}), loss = {loss:.3f}')156 return np.array(betas), np.array(b_values), np.array(nu_values).reshape(-1, 1)157 beta = new_beta158 betas.append(beta)159 160 # step 2161 gap = np.abs(1 - yX @ beta)162 is_zero = np.isclose(gap, 0)163 lam_inv = np.zeros_like(gap)164 lam_inv[is_zero] = 1 / (np.random.normal(size=is_zero.sum()) ** 2)165 lam_inv[~is_zero] = np.random.wald(1 / gap[~is_zero], 1)166 167 # step 3168 gap = np.abs(beta) / (nu * std)169 is_zero = np.isclose(gap, 0)170 omega_inv = np.zeros_like(gap)171 omega_inv[is_zero] = 1 / (np.random.normal(size=is_zero.sum()) ** 2)172 omega_inv[~is_zero] = np.random.wald(1 / gap[~is_zero], 1)173 # step 4174 nu = 1 / np.random.gamma(shape=anu + d, scale=1 / (bnu + np.abs(beta).sum()))175 nu_values.append(nu)...
codegen.py
Source:codegen.py
1def is_zero(x):2 return str(x) == "0" or str(x) == "0.0"3def is_one(x):4 return str(x) == "1" or str(x) == "1.0"5class codegen:6 def __init__(self, s):7 if(type(s) is str):8 self.str = s9 else:10 self.str = str(s)11 def __str__(self):12 return self.str13 def __eq__(a, b):14 return a.str == b.str15 def __neg__(a):16 if(is_zero(a)):17 return a18 else:19 return codegen("(-"+a.str+")")20 def __add__(a, b):21 if(is_zero(a)):22 return b23 elif(is_zero(b)):24 return a25 elif(a == b):26 return codegen(2)*a27 else:28 return codegen("("+a.str+"+"+b.str+")")29 def __sub__(a, b):30 if(is_zero(a)):31 return codegen("-"+b.str)32 elif(is_zero(b)):33 return a34 elif(a == b):35 return codegen(0)36 else:37 return codegen("("+a.str+"-"+b.str+")")38 def __mul__(a, b):39 if(is_zero(a) or is_zero(b)):40 return codegen(0)41 elif(is_one(b)):42 return a43 elif(is_one(a)):44 return b45 elif(a == b):46 return a**codegen(2)47 else:48 return codegen("("+a.str+"*"+b.str+")")49 def __div__(a, b):50 if(is_zero(a)):51 return codegen(0)52 elif(is_zero(b)):53 raise(ZeroDivisionError("division by zero"))54 elif(is_one(b)):55 return a56 elif(a == b):57 return codegen(1)58 else:59 return codegen("("+a.str+"/"+b.str+")")60 def __pow__(a, b):61 if(is_zero(a) or is_one(a) or is_one(b)):62 return a63 elif(is_zero(b)):64 return codegen(1)65 else:66 return codegen("("+a.str+"**"+b.str+")")67 def exp(a):68 return codegen("exp("+a.str+")")69 def log(a):70 return codegen("log("+a.str+")")71 def sin(a):72 return codegen("sin("+a.str+")")73 def cos(a):74 return codegen("cos("+a.str+")")75 def tan(a):76 return codegen("tan("+a.str+")")77 def cot(a):...
gru.py
Source:gru.py
1from torch import nn2class GRU(nn.Module):3 def __init__(self, state_space, hidden_size, enable_recurrent = True):4 super(GRU, self).__init__()5 self.enable_recurrent = enable_recurrent6 self.hidden_size = hidden_size7 if enable_recurrent:8 self.gru = nn.GRU(state_space, hidden_size)9 for name, param in self.gru.named_parameters():10 if 'bias' in name: nn.init.constant_(param, 0)11 elif 'weight' in name: nn.init.orthogonal_(param)12 def forward(self, states, hxs, masks):13 if inputs.size(0) == hxs.size(0):14 x, hxs = self.gru(inputs.unsqueeze(0), (hxs * masks).unsqueeze(0))15 x, hxs = x.squeeze(0), hxs.squeeze(0)16 else:17 n = hxs.size(0)18 t = int(x.size(0) / n)19 x = inputs.view(t, n, inputs.size(1))20 masks = masks.view(t, n)21 is_zero = ((mask[1:] == 0.0)\22 .any(dim = -1).nonzero().squeeze().cpu())23 if is_zero.dim() == 0: is_zero = [is_zero.item() + 1]24 else: is_zero = (is_zero + 1).numpy().tolist()25 is_zero = [0] + is_zero + [t]26 hxs = hxs.unsqueeze(0)27 outputs = []28 for i in range(len(is_zero) - 1):29 start = is_zero[i]30 end = is_zero[i + 1]31 score, hxs = self.gru(x[start: end],32 hxs * mask[start].view(1, -1, 1))33 outputs.append(score)34 x = torch.cat(outputs, dim = 0)35 x = x.view(t * n, -1)36 hxs = hxs.squeeze(0)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!