Best Python code snippet using molecule_python
corrmut_test.py
Source:corrmut_test.py
...65 ###########66 def test_hard_conv_1(self):67 labels = [1] * 1068 pre_labels = [1] * 1069 converged = corrmut.has_converged(labels, pre_labels, "hard")70 assert converged71 def test_hard_conv_2(self):72 labels = [0] * 1073 pre_labels = [0] * 1074 converged = corrmut.has_converged(labels, pre_labels, "hard")75 assert converged76 def test_hard_conv_3(self):77 labels = [1] * 1078 pre_labels = [0] * 1079 converged = corrmut.has_converged(labels, pre_labels, "hard")80 assert not converged81 def test_hard_conv_4(self):82 labels = [0] * 1083 pre_labels = [1] * 1084 converged = corrmut.has_converged(labels, pre_labels, "hard")85 assert not converged86 def test_hard_conv_5(self):87 labels = [1, 1, 1, 0, 0, 0]88 pre_labels = [0, 0, 0, 1, 1, 1]89 converged = corrmut.has_converged(labels, pre_labels, "hard")90 assert not converged91 def test_hard_conv_6(self):92 labels = [1, 1, 1, 1, 0]93 pre_labels = [1, 1, 1, 1, 1]94 converged = corrmut.has_converged(labels, pre_labels, "hard")95 assert not converged96 def test_hard_conv_7(self):97 labels = [1, 1, 1, 0, 0, 0]98 pre_labels = [1, 1, 1, 0, 0, 0]99 converged = corrmut.has_converged(labels, pre_labels, "hard")100 assert converged101 ###########102 # Soft EM #103 ###########104 def test_soft_conv_1(self):105 labels = [1] * 10106 pre_labels = [1] * 10107 converged = corrmut.has_converged(labels, pre_labels, "soft")108 assert converged109 def test_soft_conv_2(self):110 labels = [0] * 10111 pre_labels = [0] * 10112 converged = corrmut.has_converged(labels, pre_labels, "soft")113 assert converged114 def test_soft_conv_3(self):115 labels = [1] * 10116 pre_labels = [0] * 10117 converged = corrmut.has_converged(labels, pre_labels, "soft")118 assert not converged119 def test_soft_conv_4(self):120 labels = [0] * 10121 pre_labels = [1] * 10122 converged = corrmut.has_converged(labels, pre_labels, "soft")123 assert not converged124 def test_soft_conv_5(self):125 labels = [1, 1, 1, 0, 0, 0]126 pre_labels = [0, 0, 0, 1, 1, 1]127 converged = corrmut.has_converged(labels, pre_labels, "soft")128 assert not converged129 def test_soft_conv_6(self):130 labels = [1, 1, 1, 1, 0]131 pre_labels = [1, 1, 1, 1, 1]132 converged = corrmut.has_converged(labels, pre_labels, "soft")133 assert not converged134 def test_soft_conv_7(self):135 labels = [1, 1, 1, 0, 0, 0]136 pre_labels = [1, 1, 1, 0, 0, 0]137 converged = corrmut.has_converged(labels, pre_labels, "soft")138 assert converged139 def test_soft_conv_8(self):140 labels = [0.99, 0.95, 0.90, 0.95, 0.1, 0.1, 0.001, 1e-3]141 pre_labels = [[0.99, 0.95, 0.90, 0.95, 0.1, 0.1, 0.001, 1e-16]]142 tol = 5e-3143 converged = corrmut.has_converged(labels, pre_labels, "soft", tol=tol)144 assert converged145 def test_soft_conv_9(self):146 labels = [0.99, 0.95, 0.90, 0.95, 0.1, 0.1, 0.001, 1e-3]147 pre_labels = [[0.99, 0.95, 0.90, 0.95, 0.1, 0.1, 0.001, 1e-32]]148 tol = 5e-3149 converged = corrmut.has_converged(labels, pre_labels, "soft", tol=tol)150 assert converged151 def test_soft_conv_10(self):152 # Difference between two arrays equals tolerance,153 # (would not be enough to reach convergence by itself)154 # but amount of observations must also be taken into account!155 labels = [0.99, 0.99, 0.1, 0.1, 0.005]156 pre_labels = [0.99, 0.99, 0.1, 0.1, 0.01]157 tol = 5e-3158 converged = corrmut.has_converged(labels, pre_labels, "soft", tol=tol)159 assert converged160#####################161# Alternative model #162#####################163class TestGetPosteriorLogProbs():164 """165 Class to test the corrmut.get_posterior_logprobs function166 """167 def test_logprobs_1(self):168 # Tested using sklearn 0.19.2169 # Covariation between two toy alignments170 Y = [3, 11, 3, 11, 3, 11]171 X = np.array([[11, 3, 11, 3, 11, 3]])172 exp_logprobs = np.log([0.79807613, 0.79822048, 0.79807613,...
plot_vary_multipliers_v2_data.py
Source:plot_vary_multipliers_v2_data.py
1"""2Plot v2 of Fig. 4's parameter sweep, which varies over overlap length, finger width, initial front gap, and support spring width3"""4import os5file_location = os.path.abspath(os.path.dirname( __file__))6dir_location = os.path.abspath(os.path.join(file_location, '..'))7import sys8sys.path.append(file_location)9sys.path.append(dir_location)10import numpy as np11import matplotlib.pyplot as plt12from scipy.io import loadmat, savemat13from datetime import datetime14plt.rc('font', size=11.5)15plt.rcParams['legend.fontsize'] = 11.516def setup_plot(len_x, len_y, plt_title=None, x_label="", y_label=""):17 fig, axs = plt.subplots(len_x, len_y)18 if plt_title is not None:19 fig.suptitle(plt_title)20 if x_label or y_label:21 # add a big axis, hide frame22 fig.add_subplot(111, frameon=False)23 # hide tick and tick label of the big axis24 plt.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)25 plt.xlabel(x_label, fontsize=12)26 plt.ylabel(y_label, fontsize=12)27 return fig, axs28def display_stats(x_converged, times_converged, label, nom):29 # Get helpful stats for paper30 # print(label, x_converged, times_converged)31 try:32 time_50_up = times_converged[np.where(np.isclose(x_converged, nom*1.25e6))[0][0]]33 time_nominal = times_converged[np.where(np.isclose(x_converged, nom*1.e6))[0][0]]34 time_50_down = times_converged[np.where(np.isclose(x_converged, nom*0.8e6))[0][0]]35 print("{}: con=0.75: {} (Ratio: {}), con=1: {}, con=1.25: {} (Ratio: {})".format(36 label, time_50_down, 1 - time_50_down/time_nominal, time_nominal, time_50_up, 1 - time_50_up/time_nominal37 ))38 except Exception as e:39 print(e)40now = datetime.now()41name_clarifier = "_20211024_01_37_55_vary_multipliers_undercut=0.400_Fes=v2_Fb=v2_modified"42timestamp = now.strftime("%Y%m%d_%H_%M_%S") + name_clarifier43print(timestamp)44filename = "../data/20211024_01_37_55_vary_multipliers_undercut=0.400_Fes=v2_Fb=v2.npy"45fileData = np.load(filename, allow_pickle=True)46process, fingerLcon_range, fingerWcon_range, gfcon_range, supportWcon_range, fingerLnom, fingerWnom, gfnom, supportWnom, data, fig = fileData47plt.close()48fig, axs = setup_plot(2, 2, y_label=r"Time ($\mu$s)")49### fingerL50x_converged, times_converged = data["fingerLpullin"]51x_converged = np.array(x_converged)52times_converged = np.array(times_converged)53idx = np.where(x_converged < 95)54x_converged = x_converged[idx]55times_converged = times_converged[idx]56axs[0, 0].plot(x_converged, times_converged, 'b')57display_stats(x_converged, times_converged, "Pullin fingerLcon", fingerLnom)58x_converged, times_converged = data["fingerLrelease"]59x_converged = np.array(x_converged)60times_converged = np.array(times_converged)61idx = np.where(x_converged < 95)62x_converged = x_converged[idx]63times_converged = times_converged[idx]64axs[0, 0].plot(x_converged, times_converged, 'r')65axs[0, 0].set_title(r"Varying $L_{ol}$", fontsize=12)66axs[0, 0].axvline(fingerLnom*1e6, color='k', linestyle='--')67display_stats(x_converged, times_converged, "Release fingerLcon", fingerLnom)68label = r"$L_{ol}=$" + "{:0.1f}".format(fingerLnom*1e6) + r' $\mu$m'69axs[0, 0].annotate(label, xy=(0.73, 0.96), xycoords='axes fraction', color='k',70 xytext=(0, 0), textcoords='offset points', ha='right', va='top')71axs[0, 0].set_xlabel(r'$L_{ol} (\mu$m)')72### fingerW73x_converged, times_converged = data["fingerWpullin"]74x_converged = np.array(x_converged)75times_converged = np.array(times_converged)76# idx = np.where(x_converged > 3.425)77idx = np.where(np.array(x_converged) > 3.5)78# idx = np.where(x_converged >= 4.)79x_converged = x_converged[idx]80times_converged = times_converged[idx]81axs[0, 1].plot(x_converged, times_converged, 'b')82display_stats(x_converged, times_converged, "Pullin fingerWcon", fingerWnom)83x_converged, times_converged = data["fingerWrelease"]84x_converged = np.array(x_converged)85times_converged = np.array(times_converged)86# idx = np.where(np.array(x_converged) > 3.425)87idx = np.where(np.array(x_converged) > 3.5)88# idx = np.where(x_converged >= 4.)89x_converged = x_converged[idx]90times_converged = times_converged[idx]91axs[0, 1].plot(x_converged, times_converged, 'r')92axs[0, 1].set_title(r"Varying $w_f$", fontsize=12)93axs[0, 1].axvline(fingerWnom*1e6, color='k', linestyle='--')94display_stats(x_converged, times_converged, "Release fingerWcon", fingerWnom)95label = r"$w_f=$" + "{:0.1f}".format(fingerWnom*1e6) + r' $\mu$m'96axs[0, 1].annotate(label, xy=(0.29, 0.6), xycoords='axes fraction', color='k',97 xytext=(0, 0), textcoords='offset points', ha='left', va='top')98axs[0, 1].set_xlabel(r'$w_f (\mu$m)')99### gf0100x_converged, times_converged = data["gfpullin"]101axs[1, 0].plot(x_converged, times_converged, 'b')102display_stats(x_converged, times_converged, "Pullin gf0con", gfnom)103max_x = max(x_converged)104x_converged, times_converged = data["gfrelease"]105x_converged = np.array(x_converged)106times_converged = np.array(times_converged)107idx = np.where(np.array(x_converged) < max_x)108x_converged = x_converged[idx]109times_converged = times_converged[idx]110axs[1, 0].plot(x_converged, times_converged, 'r')111axs[1, 0].set_title(r"Varying $x_0$", fontsize=12)112axs[1, 0].axvline(gfnom*1e6, color='k', linestyle='--')113display_stats(x_converged, times_converged, "Release gf0con", gfnom)114label = r"$x_0=$" + "\n" + "{:0.2f}".format(gfnom*1e6) + r' $\mu$m'115axs[1, 0].annotate(label, xy=(0.57, 0.96), xycoords='axes fraction', color='k',116 xytext=(0, 0), textcoords='offset points', ha='left', va='top')117axs[1, 0].set_xlabel(r'$x_0 (\mu$m)')118### supportW119x_converged, times_converged = data["supportWpullin"]120axs[1, 1].plot(x_converged, times_converged, 'b')121display_stats(x_converged, times_converged, "Pullin supportWcon", supportWnom)122x_converged, times_converged = data["supportWrelease"]123axs[1, 1].plot(x_converged, times_converged, 'r')124axs[1, 1].set_title(r"Varying $w_{spr}$", fontsize=12)125axs[1, 1].axvline(supportWnom*1e6, color='k', linestyle='--')126display_stats(x_converged, times_converged, "Release supportWcon", supportWnom)127label = r"$w_{spr}=$" + "{:0.1f}".format(supportWnom*1e6) + r' $\mu$m'128axs[1, 1].annotate(label, xy=(0.47, 0.96), xycoords='axes fraction', color='k',129 xytext=(0, 0), textcoords='offset points', ha='left', va='top')130axs[1, 1].set_xlabel(r'$w_{spr} (\mu$m)')131axs[1, 1].legend(["Pull-in", "Release"], loc='center right')132plt.tight_layout()133timestamp = "20211024_01_37_55_vary_multipliers_undercut=0.400_Fes=v2_Fb=v2_modified_v4"134# plt.savefig("../figures/" + timestamp + ".png")135# plt.savefig("../figures/" + timestamp + ".pdf")...
library.py
Source:library.py
1'''2 Copyright (C) 2020 Anne Hartebrodt3 This program is free software; you can redistribute it and/or modify4 it under the terms of the GNU General Public License as published by5 the Free Software Foundation; either version 2 of the License, or6 (at your option) any later version.7 This program is distributed in the hope that it will be useful,8 but WITHOUT ANY WARRANTY; without even the implied warranty of9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the10 GNU General Public License for more details.11 You should have received a copy of the GNU General Public License along12 with this program; if not, write to the Free Software Foundation, Inc.,13 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.14 Authors: Anne Hartebrodt15'''16import numpy as np17import scipy as sc18import svd.shared_functions as sh19import scipy.linalg as la20import svd.comparison as co21def local1(data, G_i):22 return np.dot(data, G_i)23def local2(data, H_pooled, G_previous):24 return G_previous + np.dot(data.T, H_pooled)25def pooling(H_all):26 H_pooled = H_all[0]27 for h in H_all[1:len(H_all)]:28 H_pooled = H_pooled + h29 #H_pooled = H_pooled / len(H_all)30 H_pooled = H_pooled / np.linalg.norm(H_pooled)31 return H_pooled32def standalone(data, k=10):33 G_i = sh.generate_random_gaussian(data.shape[1], k) # phi134 G_i, Q = la.qr(G_i, mode='economic')35 converged = False36 previous = G_i37 previous_h = sh.generate_random_gaussian(data.shape[0], k)38 iterations = 039 while not converged:40 iterations = iterations+141 print(iterations)42 H_i = np.dot(data, G_i) # YiPhii , gamma if standalone43 G_i = np.dot(data.T, H_i) + previous44 G_i, Q = la.qr(G_i, mode='economic') # 'compute residuals'45 converged, sum = convergence_checker(H_i, previous_h, epsilon=0.0000001)46 previous_h = H_i47 previous = G_i48 return G_i49def convergence_checker_d(current, previous, epsilon=0.000001):50 '''51 Checks the convergence52 Args:53 H_i:54 N_list:55 previous:56 epsilon:57 Returns: -1 if convergence not reached58 '''59 sum = 060 for i in range(current.shape[1]):61 ra = np.dot(current[:, i].T, current[:, i]) / sc.linalg.norm(current[:, i])62 sum = np.abs(ra)63 if np.abs(sum-previous) < epsilon:64 return -165 else:66 return ra67def convergence_checker_angle(current, previous, epsilon=0.000001, return_converged=False):68 """69 Check if angle between consecutive eigenvectors is nearing 070 Args:71 current: Current eigenvalue guesss72 previous: Previous eigenvalue guess73 epsilon: tolerance74 return_converged: verbosity75 Returns:76 """77 sum = 078 converged = True79 converged_eigenvectors = []80 deltas = []81 for i in range(current.shape[1]):82 ra = np.dot(current[:,i].T, previous[:,i])83 if ra >= 1-epsilon:84 converged_eigenvectors.append(i)85 else:86 # if one of the eigenvalues has not converged set overall convergence to true87 converged = False88 if return_converged:89 return converged, sum, converged_eigenvectors, deltas90 else:91 return converged, sum92def convergence_checker(current, previous, epsilon=0.000001, return_converged=False):93 """94 Convergence checked via Raleigh coefficient.95 Args:96 current:97 previous:98 epsilon:99 return_converged:100 Returns:101 """102 sum = 0103 converged = True104 converged_eigenvectors = []105 deltas = []106 for i in range(current.shape[1]):107 ra = np.dot(current[:,i].T, current[:,i])/sc.linalg.norm(current[:,i])108 rap = np.dot(previous[:,i].T, previous[:,i])/sc.linalg.norm(previous[:,i])109 sum = sum + np.abs(ra-rap)110 deltas.append(np.abs(ra-rap))111 if np.abs(ra-rap) > epsilon:112 converged=False113 else:114 converged_eigenvectors.append(i)115 if return_converged:116 return converged, sum, converged_eigenvectors, deltas117 else:118 return converged, sum119def convergence_checker_a(current, previous,alpha_norm, alpha_norm_prev, epsilon=0.000001, return_converged=False):120 """121 weird convergence criterion emplyed by Guo et al.122 Args:123 current: Current H124 previous: previous H125 alpha_norm: Current G norm126 alpha_norm_prev: previous G norm127 epsilon: tolerance128 return_converged: Which values to return129 Returns:130 """131 sum = 0132 converged = True133 converged_eigenvectors = []134 deltas = []135 for i in range(current.shape[1]):136 ra = np.dot(current[:,i].T, current[:,i])/alpha_norm[i]137 rap = np.dot(previous[:,i].T, previous[:,i])/alpha_norm_prev[i]138 sum = sum + np.abs(ra-rap)139 deltas.append(np.abs(ra-rap))140 if np.abs(ra-rap) > epsilon:141 converged=False142 else:143 converged_eigenvectors.append(i)144 if return_converged:145 return converged, sum, converged_eigenvectors, deltas146 else:147 return converged, sum148def standalone2(data, first):149 G_i = sh.generate_random_gaussian(data.shape[1], 1)150 G_i = G_i - np.dot(np.inner(G_i, first.T) , first.T)151 print(np.asarray(G_i).T)152 print(first)153 print(co.angle(np.asarray(G_i).T, np.asarray(first).T))154 Q, R = la.qr(np.asarray(np.concatenate([ first.T,G_i], axis = 1)))155 print(Q[:,0])156 G_i = Q[:,1]157 converged = False158 previous = G_i159 previous_h = sh.generate_random_gaussian(data.shape[0], 1)160 iterations = 0161 while not converged:162 iterations = iterations+1163 print(iterations)164 H_i = np.dot(data, G_i) # YiPhii , gamma if standalone165 G_i = np.dot(data.T, H_i) + previous166 Q, R = la.qr(np.asarray(np.stack([first.flatten(),G_i.T])).T)167 G_i = Q[:, 1]168 converged = convergence_checker(H_i, previous_h)169 previous_h = H_i170 previous = G_i171 return G_i172def get_initial_eigenvector_k(V):173 '''174 ap = a -sum over k-1 <a, ai>ai175 Args:176 V:177 Returns:178 '''179 a = sh.generate_random_gaussian(1, V.shape[0])180 sum = np.zeros(V.shape[0])181 for v in range(V.shape[1]):182 sum = sum + np.dot(np.dot(a, v), v)183 ap = a - sum...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!