Best Python code snippet using locust
data_serializer.py
Source:data_serializer.py
1import sys2import pickle3import pandas as pd4sys.path.append("../..")5from src.utility.utility_functions import *6def load_network_node_from_csv_file_and_save_it_to_pickle_file(path_to_csv: str):7 all_nodes = []8 nodes_csv = pd.read_csv(path_to_csv)9 num_of_nodes = nodes_csv.shape[0]10 print(f"[INFO] num_of_nodes {nodes_csv.shape}")11 print(nodes_csv.head(2))12 for idx in range(num_of_nodes):13 node = Pos()14 node.node_id = int(nodes_csv.iloc[idx]["id"])15 node.lon = nodes_csv.iloc[idx]["lng"]16 node.lat = nodes_csv.iloc[idx]["lat"]17 all_nodes.append(node)18 path_to_pickle = path_to_csv.replace(".csv", ".pickle")19 with open(path_to_pickle, 'wb') as f:20 pickle.dump(all_nodes, f)21def load_path_table_from_csv_file_and_save_it_to_pickle(path_to_csv: str):22 path_table_csv = pd.read_csv(path_to_csv, index_col=0).values23 path_to_pickle = path_to_csv.replace(".csv", ".pickle")24 with open(path_to_pickle, 'wb') as f:25 pickle.dump(path_table_csv, f)26def load_request_data_from_csv_file_and_save_it_to_pickle_file(path_to_csv: str):27 all_requests = []28 requests_csv = pd.read_csv(path_to_csv)29 num_of_requests = requests_csv.shape[0]30 print(f"[INFO] num_of_requests {requests_csv.shape}")31 print(requests_csv.head(2))32 for idx in tqdm(range(num_of_requests), "loading requests"):33 request = Request()34 request.origin_node_id = int(requests_csv.iloc[idx]["onid"])35 request.destination_node_id = int(requests_csv.iloc[idx]["dnid"])36 request.request_time_date = requests_csv.iloc[idx]["ptime"]37 request.request_time_ms = compute_the_accumulated_seconds_from_0_clock(request.request_time_date) * 100038 all_requests.append(request)39 path_to_pickle = path_to_csv.replace(".csv", ".pickle")40 with open(path_to_pickle, 'wb') as f:41 pickle.dump(all_requests, f)42if __name__ == '__main__':43 vehicle_stations = f"{ROOT_PATH}/datalog-gitignore/map-data/stations-101.csv"44 network_nodes = f"{ROOT_PATH}/datalog-gitignore/map-data/nodes.csv"45 mean_table = f"{ROOT_PATH}/datalog-gitignore/map-data/mean-table.csv"46 dist_table = f"{ROOT_PATH}/datalog-gitignore/map-data/dist-table.csv"47 path_table = f"{ROOT_PATH}/datalog-gitignore/map-data/path-table.csv"48 for node_file in [vehicle_stations, network_nodes]:49 load_network_node_from_csv_file_and_save_it_to_pickle_file(node_file)50 for table_file in [mean_table, dist_table, path_table]:51 load_path_table_from_csv_file_and_save_it_to_pickle(table_file)52 for day in ["03", "04", "05", "10", "11", "12", "17", "19", "24", "25", "26"]:53 taxi_data = f"{ROOT_PATH}/datalog-gitignore/taxi-data/manhattan-taxi-201605{day}-peak.csv"...
process_requests.py
Source:process_requests.py
1import os2from tess_requests import Request3def process(folder):4 requests_csv = []5 # folder = '/Users/wolfson/Desktop/homework/molbi/molbi_asv' # testing6 directory = fr'{folder}'7 for entry in os.scandir(directory):8 if (entry.path.endswith('.jpg')) and entry.is_file():9 obj = Request(entry.path)10 print(entry)11 for _ in obj.csv_data:12 requests_csv.append(_)13 os.rename(entry, f'{directory}/{obj.case_number}.jpg')...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!