Best Python code snippet using nose
ADAS13_RNN.py
Source:ADAS13_RNN.py
1import pandas as pd2import conf3import src.RNN.X_Y_H as ref4from keras.models import Model5from keras.layers import Dense, Input6from keras.models import Sequential7from keras.layers import LSTM8from keras.callbacks import EarlyStopping9import src.preprocess.csv_utils as utils10import numpy as np11from src.evaluate import evaluateRegression12import sklearn13Target='ADAS13'14loss_train=[]15loss_test=[]16loss_validation=[]17loss_true_train=[]18def predict_state(Encoder,Model,target_df):19 target_dict = utils.build_ptid_split_dic(target_df)20 for i in range(len(target_df.index)):21 #print(i, 'in', len(target_df.index))22 id = target_df['PTID_Key'][i]23 start_in_train = target_dict[id][0]24 end_in_train = target_dict[id][1]25 if i!=start_in_train:26 X_test=target_df.loc[i-1,ref.h_index+ref.x_index].as_matrix(columns=None)27 temp=Encoder.predict(np.expand_dims(X_test, axis=0))28 for j in range(len(ref.h_index)):29 target_df.set_value(i,ref.h_index[j],temp[0,j])30 return target_df31def predict_result(Encoder,Model,target_df):32 target_dict = utils.build_ptid_split_dic(target_df)33 for i in range(len(target_df.index)):34 #print(i, 'in', len(target_df.index))35 id = target_df['PTID_Key'][i]36 start_in_train = target_dict[id][0]37 end_in_train = target_dict[id][1]38 if i != start_in_train:39 X_test = target_df.loc[i - 1, ref.h_index + ref.x_index].as_matrix(columns=None)40 temp = Encoder.predict(np.expand_dims(X_test, axis=0))41 for j in range(len(ref.h_index)):42 target_df.set_value(i, ref.h_index[j], temp[0, j])43 X_test_t = target_df.loc[i, ref.h_index + ref.x_index].as_matrix(columns=None)44 result=Model.predict(np.expand_dims(X_test_t, axis=0))45 target_df.set_value(i,Target,result)46 return target_df47def getTrain(input_df,train_df):48 train_df=pd.concat((input_df,train_df))49 #train=train.sort_values(by=['PTID_Key','M'])50 #train.index = pd.RangeIndex(len(train.index))51 #train=utils.dataCompen(train,Target)52 train = train_df.dropna(axis=0, subset=[Target])53 return train54epochs=1055input_shift=pd.read_csv(conf.intermediate_dir+'input_shift.csv')56train_add=pd.read_csv(conf.intermediate_dir+'train_add.csv')57train=getTrain(input_shift,train_add)58train=input_shift59X_train=train[ref.h_index+ref.x_index].values60Y_train=train[Target].values61input=Input(shape=(X_train.shape[1],))62hidden1=Dense(30, activation='relu')(input)63hidden1_1=Dense(20, activation='relu')(hidden1)64hidden1_2=Dense(20,activation='relu')(hidden1_1)65hidden2=Dense(len(ref.h_index),activation='relu')(hidden1_2)66hidden2_1=Dense(20,activation='relu')(hidden2)67output=Dense(1, activation='relu')(hidden2_1)68encoder=Model(input,hidden2)69model = Model(input,output)70model.compile(loss='mean_squared_error', optimizer='adam')71val=100000072preval=val*273callback = [74 EarlyStopping(monitor='val_loss', patience=2, min_delta=0.001, verbose=0)75 ]76X_train_1=X_train[0:6870,:]77Y_train_1=Y_train[0:6870]78model.fit(X_train, Y_train, epochs=20, verbose=1,callbacks=callback,validation_split=0.1)79train_add=predict_state(encoder,model,train_add)80train = getTrain(input_shift, train_add)81X_train = train[ref.h_index + ref.x_index].values82Y_train=train[Target]83for i in range(40):84 #if(abs(preval-val)<1):85 # break86 his=model.fit(X_train, Y_train, epochs=1, verbose=2)87 val_loss=his.history['loss']88 preval=val89 val=val_loss[-1]90 train_add=predict_state(encoder,model,train_add)91 train = getTrain(input_shift, train_add)92 X_train = train[ref.h_index + ref.x_index].values93 ############get every time loss######################94 loss_train.append(val)95 test_add = pd.read_csv(conf.intermediate_dir + 'test_add.csv')96 test_result = predict_result(encoder, model, test_add)97 test_add = pd.read_csv(conf.intermediate_dir + 'test_add.csv')98 loss=evaluateRegression(test_add, test_result)[0][1]99 loss_test.append(loss)100 ##validation101 test_add = pd.read_csv(conf.intermediate_dir + 'val_add.csv')102 test_result = predict_result(encoder, model, test_add)103 test_add = pd.read_csv(conf.intermediate_dir + 'val_add.csv')104 loss = evaluateRegression(test_add, test_result)[0][1]105 loss_validation.append(loss)106 test_add = pd.read_csv(conf.intermediate_dir + 'train_add.csv')107 test_result = predict_result(encoder, model, test_add)108 test_add = pd.read_csv(conf.intermediate_dir + 'train_add.csv')109 loss = evaluateRegression(test_add, test_result)[0][1]110 loss_true_train.append(loss)111print(loss_train)112print(loss_test)113print(loss_validation)114print(loss_true_train)115test_add=pd.read_csv(conf.intermediate_dir+'test_add.csv')116test_result=predict_result(encoder,model,test_add)117test_add=pd.read_csv(conf.intermediate_dir+'test_add.csv')118evaluateRegression(test_add,test_result)119test_result.to_csv(conf.result_dir+'test_predict.csv')120val_add=pd.read_csv(conf.intermediate_dir+'val_add.csv')121val_result=predict_result(encoder,model,val_add)122try:123 val_predict=pd.read_csv(conf.intermediate_dir+'val_predict.csv')124 val_predict[Target]=val_result[Target]125 val_predict.to_csv(conf.result_dir+'val_predict.csv',index=False)126except:...
RNN.py
Source:RNN.py
1import pandas as pd2import src.RNN.X_Y_H as ref3from keras.models import Model4from keras.layers import Dense, Input,Dropout5from keras.models import Sequential6from keras.layers import LSTM7from keras.callbacks import EarlyStopping8import src.preprocess.csv_utils as utils9import numpy as np10from src.evaluate import evaluateRegression11import sklearn12import conf13Target='MMSE'14loss_train=[]15loss_test=[]16loss_validation=[]17loss_true_train=[]18def predict_state(Encoder,Model,target_df):19 target_dict = utils.build_ptid_split_dic(target_df)20 for i in range(len(target_df.index)):21 #print(i, 'in', len(target_df.index))22 id = target_df['PTID_Key'][i]23 start_in_train = target_dict[id][0]24 end_in_train = target_dict[id][1]25 if i!=start_in_train:26 X_test=target_df.loc[i-1,ref.h_index+ref.x_index].as_matrix(columns=None)27 temp=Encoder.predict(np.expand_dims(X_test, axis=0))28 for j in range(len(ref.h_index)):29 target_df.set_value(i,ref.h_index[j],temp[0,j])30 return target_df31def predict_result(Encoder,Model,target_df):32 target_dict = utils.build_ptid_split_dic(target_df)33 for i in range(len(target_df.index)):34 #print(i, 'in', len(target_df.index))35 id = target_df['PTID_Key'][i]36 start_in_train = target_dict[id][0]37 end_in_train = target_dict[id][1]38 if i != start_in_train:39 X_test = target_df.loc[i - 1, ref.h_index + ref.x_index].as_matrix(columns=None)40 temp = Encoder.predict(np.expand_dims(X_test, axis=0))41 for j in range(len(ref.h_index)):42 target_df.set_value(i, ref.h_index[j], temp[0, j])43 X_test_t = target_df.loc[i, ref.h_index + ref.x_index].as_matrix(columns=None)44 result=Model.predict(np.expand_dims(X_test_t, axis=0))45 target_df.set_value(i,Target,result)46 return target_df47def getTrain(input_df,train_df):48 #train_df=pd.concat((input_df,train_df))49 #train=train.sort_values(by=['PTID_Key','M'])50 #train.index = pd.RangeIndex(len(train.index))51 #train=utils.dataCompen(train,Target)52 train = train_df.dropna(axis=0, subset=[Target])53 return train54epochs=1055input_shift=pd.read_csv(conf.intermediate_dir+'input_shift.csv')56train_add=pd.read_csv(conf.intermediate_dir+'train_add.csv')57train=getTrain(input_shift,train_add)58train=input_shift59X_train=train[ref.h_index+ref.x_index].values60Y_train=train[Target].values61input=Input(shape=(X_train.shape[1],))62hidden1=Dense(100, activation='relu')(input)63hidden1_1=Dense(200, activation='relu')(hidden1)64hidden2=Dense(len(ref.h_index),activation='relu')(hidden1_1)65hidden2_1=Dense(200,activation='relu')(hidden2)66hidden2_2=Dropout(0.1)(hidden2_1)67output=Dense(1, activation='relu')(hidden2_2)68encoder=Model(input,hidden2)69model = Model(input,output)70model.compile(loss='mean_squared_error', optimizer='adam')71val=100000072preval=val*273callback = [74 EarlyStopping(monitor='val_loss', patience=2, min_delta=0.01, verbose=0)75 ]76model.fit(X_train, Y_train, epochs=20, verbose=1,callbacks=callback,validation_split=0.1)77train_add=predict_state(encoder,model,train_add)78train = getTrain(input_shift, train_add)79X_train = train[ref.h_index + ref.x_index].values80Y_train=train[Target]81for i in range(40):82 #if(abs(preval-val)<1):83 # break84 his=model.fit(X_train, Y_train, epochs=1, verbose=2)85 val_loss=his.history['loss']86 preval=val87 val=val_loss[-1]88 train_add=predict_state(encoder,model,train_add)89 train = getTrain(input_shift, train_add)90 X_train = train[ref.h_index + ref.x_index].values91 ############get every time loss######################92 loss_train.append(val)93 test_add = pd.read_csv(conf.intermediate_dir + 'test_add.csv')94 test_result = predict_result(encoder, model, test_add)95 test_add = pd.read_csv(conf.intermediate_dir + 'test_add.csv')96 loss=evaluateRegression(test_add, test_result)[0][1]97 loss_test.append(loss)98 ##validation99 test_add = pd.read_csv(conf.intermediate_dir + 'val_add.csv')100 test_result = predict_result(encoder, model, test_add)101 test_add = pd.read_csv(conf.intermediate_dir + 'val_add.csv')102 loss = evaluateRegression(test_add, test_result)[0][1]103 loss_validation.append(loss)104 '''105 test_add = pd.read_csv(conf.intermediate_dir + 'train_add.csv')106 test_result = predict_result(encoder, model, test_add)107 test_add = pd.read_csv(conf.intermediate_dir + 'train_add.csv')108 loss = evaluateRegression(test_add, test_result)[0][1]109 loss_true_train.append(loss)110 '''111print(loss_train)112print(loss_test)113print(loss_validation)114print(loss_true_train)115test_add=pd.read_csv(conf.intermediate_dir+'test_add.csv')116test_result=predict_result(encoder,model,test_add)117test_add=pd.read_csv(conf.intermediate_dir+'test_add.csv')118evaluateRegression(test_add,test_result)119test_result.to_csv(conf.result_dir+'test_predict.csv')120val_add=pd.read_csv(conf.intermediate_dir+'val_add.csv')121val_result=predict_result(encoder,model,val_add)122try:123 val_predict=pd.read_csv(conf.intermediate_dir+'val_predict.csv')124 val_predict[Target]=val_result[Target]125 val_predict.to_csv(conf.result_dir+'val_predict.csv',index=False)126except:...
VN_RNN.py
Source:VN_RNN.py
1import pandas as pd2import src.RNN.X_Y_H as ref3from keras.models import Model,optimizers4from keras.layers import Dense, Input,Dropout5from keras.models import Sequential6from keras.layers import LSTM7from keras.callbacks import EarlyStopping8import src.preprocess.csv_utils as utils9import numpy as np10from src.evaluate import evaluateRegression11import conf12Target='Ventricles_Norm'13loss_train=[]14loss_test=[]15loss_validation=[]16loss_true_train=[]17def predict_state(Encoder,Model,target_df):18 target_dict = utils.build_ptid_split_dic(target_df)19 for i in range(len(target_df.index)):20 #print(i, 'in', len(target_df.index))21 id = target_df['PTID_Key'][i]22 start_in_train = target_dict[id][0]23 end_in_train = target_dict[id][1]24 if i!=start_in_train:25 X_test=target_df.loc[i-1,ref.h_index+ref.x_index].as_matrix(columns=None)26 temp=Encoder.predict(np.expand_dims(X_test, axis=0))27 for j in range(len(ref.h_index)):28 target_df.set_value(i,ref.h_index[j],temp[0,j])29 return target_df30def predict_result(Encoder,Model,target_df):31 target_dict = utils.build_ptid_split_dic(target_df)32 for i in range(len(target_df.index)):33 #print(i, 'in', len(target_df.index))34 id = target_df['PTID_Key'][i]35 start_in_train = target_dict[id][0]36 end_in_train = target_dict[id][1]37 if i != start_in_train:38 X_test = target_df.loc[i - 1, ref.h_index + ref.x_index].as_matrix(columns=None)39 temp = Encoder.predict(np.expand_dims(X_test, axis=0))40 for j in range(len(ref.h_index)):41 target_df.set_value(i, ref.h_index[j], temp[0, j])42 X_test_t = target_df.loc[i, ref.h_index + ref.x_index].as_matrix(columns=None)43 result=Model.predict(np.expand_dims(X_test_t, axis=0))44 target_df.set_value(i,Target,result)45 return target_df46def getTrain(input_df,train_df):47 #train_df=pd.concat((input_df,train_df))48 #train=train.sort_values(by=['PTID_Key','M'])49 #train.index = pd.RangeIndex(len(train.index))50 #train=utils.dataCompen(train,Target)51 train = train_df.dropna(axis=0, subset=[Target])52 return train53epochs=1054input_shift=pd.read_csv(conf.intermediate_dir+'input_shift.csv')55train_add=pd.read_csv(conf.intermediate_dir+'train_add.csv')56train=getTrain(input_shift,train_add)57train=input_shift58X_train=train[ref.h_index+ref.x_index].values59Y_train=train[Target].values60input=Input(shape=(X_train.shape[1],))61hidden1=Dense(100, activation='relu')(input)62hidden1_1=Dense(100, activation='relu')(hidden1)63hidden2=Dense(len(ref.h_index),activation='relu')(hidden1_1)64hidden2_1=Dense(100,activation='relu')(hidden2)65output=Dense(1, activation='relu')(hidden2_1)66encoder=Model(input,hidden2)67model = Model(input,output)68my_opti=optimizers.rmsprop(decay=0.0000001)69model.compile(loss='mean_squared_error', optimizer='rmsprop')70val=100000071preval=val*272callback = [73 EarlyStopping(monitor='val_loss', patience=2, min_delta=0.0000001, verbose=0)74 ]75model.fit(X_train, Y_train, epochs=20)76train_add=predict_state(encoder,model,train_add)77train = getTrain(input_shift, train_add)78X_train = train[ref.h_index + ref.x_index].values79Y_train=train[Target]80for i in range(40):81 #if(abs(preval-val)<1):82 # break83 his=model.fit(X_train, Y_train, epochs=1, verbose=2)84 val_loss=his.history['loss']85 preval=val86 val=val_loss[-1]87 train_add=predict_state(encoder,model,train_add)88 train = getTrain(input_shift, train_add)89 X_train = train[ref.h_index + ref.x_index].values90 ############get every time loss######################91 loss_train.append(val)92 test_add = pd.read_csv(conf.intermediate_dir + 'test_add.csv')93 test_result = predict_result(encoder, model, test_add)94 test_add = pd.read_csv(conf.intermediate_dir + 'test_add.csv')95 loss=evaluateRegression(test_add, test_result)[0][1]96 loss_test.append(loss)97 ##validation98 test_add = pd.read_csv(conf.intermediate_dir + 'val_add.csv')99 test_result = predict_result(encoder, model, test_add)100 test_add = pd.read_csv(conf.intermediate_dir + 'val_add.csv')101 loss = evaluateRegression(test_add, test_result)[0][1]102 loss_validation.append(loss)103 '''104 test_add = pd.read_csv(conf.intermediate_dir + 'train_add.csv')105 test_result = predict_result(encoder, model, test_add)106 test_add = pd.read_csv(conf.intermediate_dir + 'train_add.csv')107 loss = evaluateRegression(test_add, test_result)[0][1]108 loss_true_train.append(loss)109 '''110print(loss_train)111print(loss_test)112print(loss_validation)113print(loss_true_train)114test_add=pd.read_csv(conf.intermediate_dir+'test_add.csv')115test_result=predict_result(encoder,model,test_add)116test_add=pd.read_csv(conf.intermediate_dir+'test_add.csv')117evaluateRegression(test_add,test_result)118test_result.to_csv(conf.result_dir+'test_predict.csv')119val_add=pd.read_csv(conf.intermediate_dir+'val_add.csv')120val_result=predict_result(encoder,model,val_add)121try:122 val_predict=pd.read_csv(conf.intermediate_dir+'val_predict.csv')123 val_predict[Target]=val_result[Target]124 val_predict.to_csv(conf.result_dir+'val_predict.csv',index=False)125except:...
test_signals.py
Source:test_signals.py
...28 def getStockHigh(self):29 result = signals.getCurrentPrice('AIB')30 self.assertEquals(result, 15)31 self.assertEquals(signals.add(10, 5), 15)32 def test_add(self):33 result = signals.getCurrentPrice('AIB')34 self.assertEquals(result, 15)35 self.assertEquals(signals.add(10, 5), 15)36class TestStockLow(unittest.TestCase):37 def test_add(self):38 result = signals.getCurrentPrice('AIB')39 self.assertEquals(result, 15)40 self.assertEquals(signals.add(10, 5), 15)41 def test_add(self):42 result = signals.getCurrentPrice('AIB')43 self.assertEquals(result, 15)44 self.assertEquals(signals.add(10, 5), 15)45class TestStockOpen(unittest.TestCase):46 def test_add(self):47 result = signals.getCurrentPrice('AIB')48 self.assertEquals(result, 15)49 self.assertEquals(signals.add(10, 5), 15)50 def test_add(self):51 result = signals.add(10, 5)52 self.assertEquals(result, 15)53 self.assertEquals(signals.add(10, 5), 15)54class TestStockClose(unittest.TestCase):55 def test_add(self):56 result = signals.getCurrentPrice('AIB')57 self.assertEquals(result, 15)58 self.assertEquals(signals.add(10, 5), 15)59 def test_add(self):60 result = signals.getCurrentPrice('AIB')61 self.assertEquals(result, 15)62 self.assertEquals(signals.add(10, 5), 15)63class TestStockVolume(unittest.TestCase):64 def test_add(self):65 result = signals.getCurrentPrice('AIB')66 self.assertEquals(result, 15)67 self.assertEquals(signals.add(10, 5), 15)68 def test_add(self):69 result = signals.getCurrentPrice('AIB')70 self.assertEquals(result, 15)71 self.assertEquals(signals.add(10, 5), 15)72class TestUpdateStockPrice(unittest.TestCase):73 def test_add(self):74 result = signals.getCurrentPrice('AIB')75 self.assertEquals(result, 15)76 self.assertEquals(signals.add(10, 5), 15)77 def test_add(self):78 result = signals.getCurrentPrice('AIB')79 self.assertEquals(result, 15)80 self.assertEquals(signals.add(10, 5), 15)81if __name__ == '__main__':...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!