Best Python code snippet using slash
WeatherClassifier.py
Source:WeatherClassifier.py
1import numpy as np2import pandas3import argparse4import matplotlib.pyplot as plt5import datetime6from keras.models import Sequential7from keras.layers import Dense, Dropout, advanced_activations8def main():9 # Get input args10 args = parse_arguments()11 # Init random seed for reproducibility12 np.random.seed(0)13 # load the dataset14 dataframe = pandas.read_csv(args["data_path"], engine='python', parse_dates=['DATE'],15 date_parser=lambda x: pandas.to_datetime(x, infer_datetime_format=True))16 # Define the training set using the input begin and end dates17 train_df= dataframe[(dataframe['DATE'] >= datetime.datetime(args["begin_train"],1,1)) &18 (dataframe['DATE'] <= datetime.datetime(args["end_train"],12,31))]19 # Define the testing set using the input begin and end dates20 test_df = dataframe[(dataframe['DATE'] >= datetime.datetime(args["begin_test"],1,1)) &21 (dataframe['DATE'] <= datetime.datetime(args["end_test"],12,31))]22 # Remove null and other invalid entries in the data23 train_data = np.nan_to_num(train_df['TAVG'].values.astype('float32'))24 test_data = np.nan_to_num(test_df['TAVG'].values.astype('float32'))25 # Combine the data to one array26 combined_data = np.append(train_data, test_data)27 # reshape dataset to window matrix28 look_back = 12 # This is the size of the window29 trainX, trainY = create_dataset(train_data, look_back)30 testX, testY = create_dataset(test_data, look_back)31 # Define and fit the model32 model = create_model(look_back=look_back)33 model.fit(trainX, trainY, epochs=500, batch_size=12, verbose=2)34 # Estimate model performance35 trainScore = model.evaluate(trainX, trainY, verbose=0)36 print('Train Score: %.2f MAE' % (trainScore))37 testScore = model.evaluate(testX, testY, verbose=0)38 print('Test Score: %.2f MAE' % (testScore))39 # generate predictions for training40 trainPredict = model.predict(trainX)41 testPredict = model.predict(testX)42 # shift train predictions for plotting43 trainPredictPlot = np.empty((len(combined_data), 1))44 trainPredictPlot[:] = np.nan45 trainPredictPlot[look_back:len(trainPredict) + look_back] = trainPredict46 # shift test predictions for plotting47 testPredictPlot = np.empty((len(combined_data), 1))48 testPredictPlot[:] = np.nan49 testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(combined_data) - 1] = testPredict50 # Combine the results51 combined_df = train_df.append(test_df)52 combined_dates = combined_df['DATE']53 # plot baseline and predictions54 plt.plot(combined_dates, combined_data, )55 plt.plot(combined_dates, trainPredictPlot)56 plt.plot(combined_dates, testPredictPlot)57 plt.minorticks_on()58 plt.show()59# Standard Model Creation60def create_model(look_back):61 # create and fit Multilayer Perceptron model62 model = Sequential()63 model.add(Dense(100, input_dim=look_back, activation='relu'))64 # model.add(Dense(50, activation='relu'))65 # model.add(Dense(25, activation='relu'))66 # model.add(Dense(10, activation='relu'))67 # model.add(Dense(5, activation='relu'))68 model.add(Dense(1))69 model.compile(loss='mean_absolute_error', optimizer='nadam')70 return model71# convert an array of values into a dataset matrix72def create_dataset(dataset, look_back=1):73 dataX, dataY = [], []74 for i in range(len(dataset) - look_back - 1):75 a = dataset[i:(i + look_back)]76 dataX.append(a)77 dataY.append(dataset[i + look_back])78 return np.array(dataX), np.array(dataY)79# Command Line Arguments are parsed here80def parse_arguments():81 parser = argparse.ArgumentParser()82 parser.add_argument("-dp", "--data_path", help="Data File Path")83 parser.add_argument("-ad", "--adverse", help="Turns on Adversarial Learning")84 parser.add_argument("-m", "--mode", help="Choose mode: full, grid")85 parser.add_argument("-e", "--epochs", help="Number of Epochs", type=int, nargs="*")86 parser.add_argument("-tr", "--train_ratio", nargs="*", type=int,87 help="Set Train Ratios. Enter as a percent (20,40,60,80). Can be a list space delimited")88 parser.add_argument("-bs", "--batch_size", nargs="*", type=int,89 help="Batch size. Can be a list space delimited")90 parser.add_argument("-n", "--neurons", nargs="*", type=int,91 help="Number of Neurons. Can be a list space delimited")92 parser.add_argument("-o", "--optimizer", nargs="*",93 help="Optimizers. Can be a list space delimited")94 parser.add_argument("-w", "--weight_constraint", nargs="*", type=int,95 help="Weight Constraint. Can be a list space delimited")96 parser.add_argument("-d", "--dropout", nargs="*", type=int,97 help="Dropout. Enter as percent (10,20,30,40...). Can be a list space delimited.")98 parser.add_argument("-model", "--model", help="Select which model to run: all, one_layer, four_decr, four_same")99 parser.add_argument("-s", "--splits", help="Number of Splits for SSS", type=int)100 parser.add_argument("-btr", "--begin_train", help="Year to begin training (1940-2016)", type=int)101 parser.add_argument("-etr", "--end_train", help="Year to end training. Should be higher than begin & <=2017", type=int)102 parser.add_argument("-bts", "--begin_test", help="Year to begin testing (1940-2017)", type=int)103 parser.add_argument("-ets", "--end_test", help="Year to end testing. Should be higher than begin test.", type=int)104 args = parser.parse_args()105 arguments = {}106 if args.data_path:107 arguments["data_path"] = args.data_path108 else:109 print("Default Data Path: ../Data/BWIMonthly1939.csv")110 arguments["data_path"] = "../Data/BWIMonthly1939.csv"111 if args.adverse:112 adverse = True113 else:114 adverse = False115 arguments["adverse"] = adverse116 if args.mode == "grid":117 mode = "grid"118 print("Mode is %s" % mode)119 else:120 mode = "full"121 print("Mode is %s" % mode)122 arguments["mode"] = mode123 if args.model == "all":124 model = ["oneLayer", "fourDecr", "fourSame"]125 elif args.model in ["oneLayer", "fourDecr", "fourSame"]:126 model = [args.model]127 else:128 print("Defaulting to All models")129 model = ["oneLayer", "fourDecr", "fourSame"]130 arguments["model"] = model131 if args.epochs:132 epochs = args.epochs133 else:134 print("Defaulting to 16 epochs")135 epochs = 16136 arguments["epochs"] = epochs137 if args.train_ratio:138 train_ratio = args.train_ratio139 else:140 print("Defaulting to testing all ratios")141 train_ratio = [20, 40, 60, 80]142 arguments["train_ratio"] = train_ratio143 if args.batch_size:144 batch_size = args.batch_size145 else:146 print("Defaulting to Batch Size 10")147 batch_size = 10148 arguments["batch_size"] = batch_size149 if args.neurons:150 neurons = args.neurons151 else:152 print("Defaulting to 45 Neurons")153 neurons = 45154 arguments["neurons"] = neurons155 if args.optimizer:156 optimizer = args.optimizer157 else:158 print("Defaulting to NADAM Optimizer")159 optimizer = "Nadam"160 arguments["optimizer"] = optimizer161 if args.weight_constraint:162 weight_constraint = args.weight_constraint163 else:164 print("Defaulting to weight constraint 5")165 weight_constraint = 5166 arguments["weight_constraint"] = weight_constraint167 if args.dropout:168 dropout = args.dropout169 else:170 print("Defaulting to dropout of 10%")171 dropout = 10172 arguments["dropout"] = dropout173 if args.splits:174 splits = args.splits175 else:176 print("Defaulting to 1 SSS Split")177 splits = 1178 arguments["splits"] = splits179 if args.begin_train:180 begin_train = args.begin_train181 else:182 print("Default begin training is 1940")183 begin_train = 1940184 arguments["begin_train"] = begin_train185 if args.end_train:186 end_train = args.end_train187 else:188 print("Defult end training is 1980")189 end_train = 1980190 if end_train < begin_train:191 print("End_Train should be bigger than Begin_Train")192 exit(1)193 arguments["end_train"] = end_train194 if args.begin_test:195 begin_test = args.begin_test196 else:197 print("Default begin test is 1981")198 begin_test = 1981199 arguments["begin_test"] = begin_test200 if args.end_test:201 end_test = args.end_test202 else:203 print("Default end test is 2017")204 end_test = 2017205 if end_test < begin_test:206 print("End_Test should be bigger than Begin_Test")207 exit(1)208 arguments["end_test"] = end_test209 return arguments210if __name__ == "__main__":...
test_zebraParser.py
Source:test_zebraParser.py
...23 '--checkargs']24full_cmd_5 = ['--input', 'somefile.zbtex',25 '--texmfhome', '/home/mancilla/development/Zebrackets/src/test',26 '--checkargs']27def begin_test(name):28 print()29 print(70*'=')30 print("On test: " + name)31 print()32class TestZebraParser(unittest.TestCase):33 def setUp(self):34 pass35 ## Checking the argparse parametrization, no errors in command36 def test_zebraparser_cmd1(self):37 begin_test("test_zebraparser_cmd1")38 self.assertEqual(39 zebraParser.zebraParserParser(full_cmd_1), 40 'Error: Invalid input file, zbtex extension required.')41 ## Checking the argparse parametrization, checking for file extensions42 def test_zebraparser_cmd2(self):43 begin_test("test_zebraparser_cmd2")44 try:45 self.assertIn(zebraParser.zebraParserParser(full_cmd_2), None)46 except:47 pass48 def test_zebraparser_cmd3(self):49 begin_test("test_zebraparser_cmd3")50 self.assertEqual(51 zebraParser.zebraParserParser(full_cmd_3),52 "Error: Invalid texmf, path is not a directory.")53 def test_zebraparser_cmd4(self):54 begin_test("test_zebraparser_cmd4")55 self.assertEqual(56 zebraParser.zebraParserParser(full_cmd_4),57 "Error: TEXMFHOME environment variable is not set.")58 def test_zebraparser_cmd5(self):59 begin_test("test_zebraparser_cmd5")60 self.assertEqual(zebraParser.zebraParserParser(full_cmd_5), None)61 ## Checking the argparse parametrization, with --h62 # Argparse sends an error exit code, so we need to catch here63 def test_zebraparser_cmd0(self):64 begin_test("test_zebraparser_cmd0")65 try:66 self.assertEqual(zebraParser.zebraParserParser(['--h']), None)67 except:68 pass69# print(sys.exc_info()[0])70# print(sys.exc_info())71 ## Checking the argparse parametrization, with no values72 # Argparse raises an exception so we need to catch it here73 def test_zebraparser_noargs(self):74 begin_test("test_zebraparser_noargs")75 try:76 zebraParser.zebraParserParser()77 except:78 pass79if __name__ == '__main__':...
main.py
Source:main.py
1import time2import requests345website = 'https://random-word-api.herokuapp.com/word?number='67# ask user to begin test, else end testing8print('The test will begin after the first word starts.')9begin_test = input('Would you like to start test? y/n: ').lower()1011while begin_test != 'n':12 if begin_test.startswith('y'):13 # ask how many words to test14 num_words = input('How many words would you like to test against? Enter a number: ')1516 # send get request to get random json of words and save resulting json as list of words to test17 word_list = requests.get(website + num_words).json()18 print(f'Here is the full list of words:\n')19 print(word_list)20 # start clock21 test_start = time.time()2223 for count,word in enumerate(word_list):24 word_to_test = ''25 while word_to_test != word_list[count]:26 print(word_list[count])27 word_to_test = input(':')28 test_end = time.time()29 average_speed = int(num_words) / (test_end - test_start)*6030 print(f'Your results are:\n{average_speed} words per minute')31 begin_test = input('Would you like to go again? y/n: ')32 elif begin_test.startswith('n'):33 print('Test ended. See you next time!')34 begin_test = 'n'3536 else:
...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!