Best Python code snippet using pytest-cov
test_chinese.py
Source:test_chinese.py
1import pickle2from hanziconv import HanziConv3from revscoring.datasources import revision_oriented4from revscoring.languages import chinese5from .util import compare_extraction6bad_init = [7 "çå
«è", # son of a bitch8 "ä»åª½ç", # "his mother's"9 "å»ä½ 媽", # "to your mother"10 "å»ä½ ç", # "to yours"11 "å©å", "å¦å¥³", # prostitute12 "æ¥ç", "æ¥äºç", # lonely dog13 "å±ç¼", "æ··è", "渾è", # asshole14 "混帳", # variant of above15 "çå
«", # bitch16 "ç½ç¡", # idiot17 "è
¦æ®", # brain dead18 "æºé", # mentally retarded19 "å©", "å¦", # prostitute20 "å±", # shit21 "å±", # dick22 "å¦é¼", # (this is verbal but definitely bad)23 "è¹", "è", # fuck (in any context)24 "æ¾å±", # fart25 # Variants (homonyms) of the use of "fuck" that use æ ("operation") and26 # è ("grass"), "è" is the actual character. "è¹" is not a real character27 # but it's used this way28 "æä½ ", "èä½ ", "æ¥ä½ ", # fuck you29 "æä»", "èä»", "æ¥ä»", # fuck his30 "æ她", "è她", "æ¥å¥¹", # fuck her31 # Discrimination (racial slurs)32 "å°æ¥æ¬", # little Japanese33 "å°æ¹¾ç", # Taiwanese dogs34 "å
±äº§ä¸å½", # communist Chinese35 "æµæ°å½å®¶", # rogue country36 "人渣", # human slag37 "æå»", # this is verbal and bad38 "鬼å" # devil, usually a suffix39]40BAD = [HanziConv.toSimplified(word) for word in bad_init] + \41 [HanziConv.toTraditional(word) for word in bad_init]42INFORMAL = [43 # Hello44 "ä½ å¥½", # nÇ hÇo; The standard "hello" greeting.45 "æ¨å¥½", # nÃn hÇo; The same "hello" greeting as above46 "ä½ æä¹æ ·", # nÇ zÄnmeyà ng?; "What's up?", "How are you doing?"47 # Good afternoon48 "åå®", # wÇ'an; note: seldom used in the Mainland.49 "ä¸å好", # xìawÇ hÇo! Seldom used in the Republic of China50 # Good evening / Good night51 "æå®", # wÇn'an; Literally "Peace at night", Good night.52 "æä¸å¥½", # wÇnshang hÇo; Good evening!53 # Good-bye54 "åè¦", # zà ijian; Literally "See you again".55 "æ天è¦", # mÃngtian jià n; Literally "See you tomorrow".56 "ææ", # bÄibÄi/báibái; From English "Bye-Bye".57 "åé è¦", # huÃtóujià n: roughly equivalent to "see you soon"58 "åè¦", # huÃjià n; usually used in Beijing or written Chinese.59 "åæ", # zà ihuì: Literally "[we'll] hello again".60 "66666666", "666",61 "233", "2333333"62]63WORDS_TO_WATCH = [64 # Advertising language65 "æ¬å°", # this channel66 "æ¬å
¬å¸", # this company67 "代å·", "代ç»", "代æ¢", # someone who plays games for you68 "强å¿åå½", # "mightly" return69 "è¶
å¼", # very cost-effective70 "ä¸æ¡é¾", # a proverb? "one line of dragon"71 "ä¸å¤æ
", # selling one's body (advertising)72 "ä¸çä¸æµ", "å½é
ä¸æµ", # world first-class73 "ç¨æ·ç¬¬ä¸", "ç¨æ·æ»¡æ", "ç¨æ·è³ä¸", # customer-first74 "æ ¸å¿ä»·å¼", "æ ¸å¿å¢é", "æ ¸å¿å®æ¨", # core value75 "æå¡å°å§", # service lady76 "æå¡èå´", # service area77 "æå¡é¡¹ç®", # service items78 "æå¡ç念", # service philosophy79]80OTHER = [81 """2005年大西æ´é¢¶é¢¨å£æ¯æ纪å½ä»¥æ¥ææ´»è·ç大西æ´é¢¶é¢¨å£ï¼è³ä»ä»ä¿æçå¤é¡¹çºªå½ã82 å
¨å£å¯¹å¤§èå´å°åºé ææ¯çæ§æå»ï¼å
±å¯¼è´3,913人æ»äº¡ï¼æ失æ°é¢æ´åä¸æ°çºªå½ï¼é«è¾¾1592亿ç¾å
ã83 æ¬å£å大åé£é£å°±æ7åºä¹å¤ï¼å
¶ä¸5åºå¨ç»éæ¶ä»æ大åé£é£å¼ºåº¦ï¼åå«æ¯é¢¶é¢¨ä¸¹å°¼æ¯ãè¾ç±³èã84 å¡ç¹éå¨ã丽å¡åå¨å°çï¼å¤§é¨å人å伤亡å财产æ失é½æ¯è¿5åºé£é£å¼èµ·ã85 墨西å¥çéå¡ç´ç¾
奧å·åå°¤å¡å¦å·ï¼86 以åç¾å½çä½ç½éè¾¾å·åè·¯ææ¯å®é£å·é½æ¾ä¸¤åº¦å大åé£é£è¢å»ï¼å¤å·´ãå·´å马ãæµ·å°ï¼87 ç¾å½çå¯è¥¿è¥¿æ¯å·åå¾å
è¨æ¯å·ï¼è¿æ墨西å¥çå¡æ¯å©å¸æ¯å·é½æ¾ç´æ¥å1åºå¤§åé£é£å²å»ï¼88 è¿æè³å°1åºå¨éè¿æ è¿ãç¾å墨西å¥ç£æ²¿å²¸å°åæ¯æ¬å£åç¾æ严éçæå¨ï¼89 é£é£å¡ç¹éå¨äº§çé«è¾¾10ç±³çé£æ´æ½®ï¼å¼åæ¯çæ§æ´ªç¾ï¼å¯è¥¿è¥¿æ¯å·æ²¿æµ·å°åºç大é¨å建çç©è¢«æ¯ï¼90 é£æ´ä¹åå令æ°å¥¥å°è¯é²æ´ªå ¤å³å£ï¼æ´ä¸ªåå¸å æ¤åå°éåãæ¤å¤ï¼é£é£æ¯å¦å溫帶氣æå
±åå½±åï¼91 å¨ä¸ç¾æ´²å¤å°å¼åè´å½çæ³¥ç³æµï¼å
¶ä¸å以å±å°é©¬æç¾æ
æ为严éã"""92]93r_text = revision_oriented.revision.text94def simplified_eq(a, b):95 return len(a) == len(b) and \96 HanziConv.toSimplified(a[0]) == \97 HanziConv.toSimplified(b[0])98def test_badwords():99 compare_extraction(chinese.badwords.revision.datasources.matches,100 BAD, OTHER, eq=simplified_eq)101 assert chinese.badwords == pickle.loads(pickle.dumps(chinese.badwords))102def test_informals():103 compare_extraction(chinese.informals.revision.datasources.matches,104 INFORMAL, OTHER, eq=simplified_eq)105 assert chinese.informals == pickle.loads(pickle.dumps(chinese.informals))106def test_words_to_watch():107 compare_extraction(chinese.words_to_watch.revision.datasources.matches,108 WORDS_TO_WATCH, OTHER, eq=simplified_eq)109 assert chinese.words_to_watch == \...
learning.py
Source:learning.py
1from model_class import *2from data_creation import *3import pickle4######################################### LEARNING PROCESS #############################################################56all_fpr = []7all_tpr = []8all_auc = []9lorenz = []10all_gini = []1112# loop to train models13for intensitytype in ('f'): # in ('f','h')1415 if intensitytype == 'f':16 hidden_dim = [5, 3]17 else:18 hidden_dim = [30, 15]1920 duan_replic = "" #duan_replic : full duan replic || #exp : exp activation function2122 # plot directory23 directory_roc = 'Results NN for Intensity estimation\\real dataset\\roc\\' + intensitytype + '\\' + str(hidden_dim) + '\\' #add back24 #directory_roc = 'Results NN for Intensity estimation\\real dataset\\roc\\' + intensitytype + '\\' + duan_replic + '\\' #remove25 if not os.path.exists(directory_roc):26 os.makedirs(directory_roc)2728 directory_lorenz = 'Results NN for Intensity estimation\\real dataset\\lorenz\\' + intensitytype + '\\' + str(hidden_dim) + '\\' #add back29 # directory_lorenz = 'Results NN for Intensity estimation\\real dataset\\lorenz\\' + intensitytype + '\\' + duan_replic + '\\' #remove30 if not os.path.exists(directory_lorenz):31 os.makedirs(directory_lorenz)3233 for tau in range(36):34 print('Estimating model ... ' + intensitytype + str(tau))35 deltaT = 1/1236 learning_rate=0.00137 feature_size = 1238 batch_size = 25639 perc = 0.940 name = intensitytype + str(tau) + '_hidden' + str(hidden_dim[0]) + '_layers' + str(len(hidden_dim)) + '_learning' + str(learning_rate) + '_batch' + \41 str(batch_size) + '_perc' + str(perc) + '_' #add back42 path = intensitytype + str(tau) + '_hidden' + str(hidden_dim[0]) + '_layers' + str(len(hidden_dim)) + '_learning' + str(learning_rate) + '_batch' + \43 str(batch_size) + '_perc' + str(perc) + '\\' #add back44 #name = intensitytype + str(tau) + '_hidden' + str(hidden_dim[0]) + '_layers' + str(len(hidden_dim)) + '_learning' + str(learning_rate) + '_batch' + \45 # str(batch_size) + '_perc' + str(perc) + duan_replic + '_' #remove46 #path = intensitytype + str(tau) + '_hidden' + str(hidden_dim[0]) + '_layers' + str(len(hidden_dim)) + '_learning' + str(learning_rate) + '_batch' + \47 # str(batch_size) + '_perc' + str(perc) + duan_replic + '\\' #remove4849 if intensitytype == 'f':50 x_train,y_train,x_test,y_test,_,_ = RealData_f(tau=tau)51 else:52 x_train,y_train,x_test,y_test,_,_ = RealData_h(tau=tau)535455 model = NeuralNetwork(hidden_dim = hidden_dim, deltaT = deltaT, learning_rate = learning_rate,56 feature_size = feature_size, batch_size = batch_size, perc = perc, path = path,name = name)57 self = model5859 # trick to avoid bad initialization (output of first forward propagation algorithm too close to boundary)60 model.initialise()61 _ , ff = model.pred(x_train,y_train)62 gg = 0.00001*np.ones(ff.shape)63 bad_init = np.allclose(ff,gg) # test if predictions are close to fmin boundary. if yes, initialize again64 if bad_init is True:65 while bad_init is True:66 model.initialise()67 _, ff = model.pred(x_train, y_train)68 gg = 0.00001 * np.ones(ff.shape)69 bad_init = np.allclose(ff, gg)7071 # training process72 for e in range(20):73 model.training(x_train,y_train,e)74 in_loss_value, in_f_value = model.pred(x_train,y_train)75 out_loss_value, out_f_value = model.pred_and_write_summary(x_test,y_test,e)76 print('insample-loss:', in_loss_value, '// outsample-loss:',out_loss_value)7778 #_,_,auc_score, fpr_val, tpr_val = testmodel(model = model, x = x_test,y = y_test, path = directory_roc, save=True) # computes auc score and save roc curve to directory79 perc, cumy, gini = LorenzCurve(model, x_test, y_test, tau, str(model.hidden_dim), color='green', path=directory_lorenz, save=True)8081 #all_auc.append(auc_score) # store auc_score for this particular model82 #all_fpr.append(fpr_val) # store auc_score for this particular model83 #all_tpr.append(tpr_val) # store auc_score for this particular model84 lorenz.append((perc,cumy))85 all_gini.append(gini)8687 tf.reset_default_graph()88 model.sess.close()8990 with open(directory_lorenz + "gini.txt", "w") as f:91 for s in all_gini:92 f.write(str(s) + "\n")9394 with open(directory_lorenz + 'lorenz.pkl', 'wb') as f:95 pickle.dump(lorenz, f)9697 all_auc = []98 lorenz = []99100101102103104105
...
lqrflm_load_play.py
Source:lqrflm_load_play.py
1import numpy as np2import math3import gym4env_name = 'Pendulum-v0'5env = gym.make(env_name)6gains = np.loadtxt('./save_weights/kalman_gain.txt', delimiter=" ")7T = gains[-1, 0]8T = np.int(T)9Kt = gains[:, 1:4]10kt = gains[:, -1]11i_ang = 180.0*np.pi/180.012x0 = np.array([math.cos(i_ang), math.sin(i_ang), 0])13bad_init = True14while bad_init:15 state = env.reset() # shape of observation from gym (3,)16 x0err = state - x017 if np.sqrt(x0err.T.dot(x0err)) < 0.1: # x0=(state_dim,)18 bad_init = False19for time in range(T+1):20 env.render()21 Ktt = np.reshape(Kt[time, :], [1, 3])22 action = Ktt.dot(state) + kt[time]23 action = np.clip(action, -env.action_space.high[0], env.action_space.high[0])24 ang = math.atan2(state[1], state[0])25 print('Time: ', time, ', angle: ', ang * 180.0 / np.pi, 'action: ', action)26 state, reward, _, _ = env.step(action)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!