Best Python code snippet using ATX
format_datasharing_csvs.py
Source:format_datasharing_csvs.py
1import pandas as pd2import os3# script to format final files for data sharing4# PER GLACIER5# per glacier cumulative series6list_fn_cumul_pergla = ['/home/atom/ongoing/work_worldwide/vol/dh_06_rgi60_int_base.csv']7for fn_cumul_pergla in list_fn_cumul_pergla:8 df = pd.read_csv(fn_cumul_pergla,index_col=0)9 df = df.round({'dh':3,'err_dh':3,'dt':1,'std_dt':1,'perc_area_meas':3,'perc_area_res':3,'err_corr_150':3,'err_corr_2000':3,10 'err_corr_5000':3,'err_corr_20000':3,'err_corr_50000':3,'err_corr_200000':3,'valid_obs':2,'valid_obs_py':2,11 'area':0,'lon':4,'lat':4,'perc_err_cont':3})12 df = df[['rgiid','time','area','dh','err_dh','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','dt','std_dt'13 ,'err_corr_150','err_corr_2000','err_corr_5000','err_corr_20000','err_corr_50000','err_corr_200000','lat','lon']]14 df.to_csv(os.path.join(os.path.dirname(fn_cumul_pergla),os.path.splitext(os.path.basename(fn_cumul_pergla))[0]+'_fmt.csv'))15# RGI O1 REGIONS WITH TAREA16# RGI O1 regional cumulative series17list_fn_cumul_reg = ['/home/atom/ongoing/work_worldwide/vol/final/dh_01_rgi60_int_base_reg.csv']18for fn_cumul_reg in list_fn_cumul_reg:19 df = pd.read_csv(fn_cumul_reg)20 df = df.drop(columns=['area_valid_obs_py','perc_err_cont'])21 df = df.round({'dh':3,'err_dh':3,'dvol':0,'err_dvol':0,'dm':4,'err_dm':4,'dt':1,'std_dt':1,'perc_area_meas':3,'perc_area_res':322 ,'valid_obs':2,'valid_obs_py':2, 'area':0,'area_nodata':0})23 df = df[['reg','time','area','dh','err_dh','dvol','err_dvol','dm','err_dm','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]24 df.to_csv(os.path.join(os.path.dirname(fn_cumul_reg),os.path.splitext(os.path.basename(fn_cumul_reg))[0]+'_fmt.csv'),index=None)25# RGI O1 regional rates26list_fn_rates_reg = ['/home/atom/ongoing/work_worldwide/vol/final/dh_01_rgi60_int_base_reg_subperiods.csv']27for fn_rates_reg in list_fn_rates_reg:28 df = pd.read_csv(fn_rates_reg,index_col=0)29 df = df.round({'dhdt':3,'err_dhdt':3,'dvoldt':0,'err_dvoldt':0,'dmdt':4,'err_dmdt':4,'dmdtda':3,'err_dmdtda':3,'perc_area_meas':3,'perc_area_res':3,30 'valid_obs':2,'valid_obs_py':2,'area':0,'area_nodata':0, 'tarea':0})31 df = df[['reg','period','area','tarea','dhdt','err_dhdt','dvoldt','err_dvoldt','dmdt','err_dmdt','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]32 df.to_csv(os.path.join(os.path.dirname(fn_rates_reg),os.path.splitext(os.path.basename(fn_rates_reg))[0]+'_fmt.csv'),index=None)33# TILES34# tile cumulative series35list_fn_cumul_tile = ['/home/atom/ongoing/work_worldwide/vol/final/dh_world_tiles_2deg.csv']36for fn_cumul_tile in list_fn_cumul_tile:37 df = pd.read_csv(fn_cumul_tile)38 df = df.drop(columns=['area_valid_obs_py','perc_err_cont'])39 df = df.round({'dh':3,'err_dh':3,'dvol':0,'err_dvol':0,'dm':4,'err_dm':4,'dt':1,'std_dt':1,'perc_area_meas':3,'perc_area_res':340 ,'valid_obs':2,'valid_obs_py':2, 'area':0,'area_nodata':0,'tile_lonmin':1,'tile_latmin':1,'tile_size':1})41 df = df[['tile_lonmin','tile_latmin','tile_size','time','area','dh','err_dh','dvol','err_dvol','dm','err_dm','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]42 df.to_csv(os.path.join(os.path.dirname(fn_cumul_tile),os.path.splitext(os.path.basename(fn_cumul_tile))[0]+'_fmt.csv'),index=None)43# tile rates44list_fn_rates_tile = ['/home/atom/ongoing/work_worldwide/vol/final/dh_world_tiles_2deg_subperiods.csv']45for fn_rates_tile in list_fn_rates_tile:46 df = pd.read_csv(fn_rates_tile,index_col=0)47 df = df.drop(columns=['tarea'])48 df = df.round({'dhdt':3,'err_dhdt':3,'dvoldt':0,'err_dvoldt':0,'dmdt':4,'err_dmdt':4,'dmdtda':3,'err_dmdtda':3,'perc_area_meas':3,'perc_area_res':3,49 'valid_obs':2,'valid_obs_py':2,'area':0,'area_nodata':0,'tile_lonmin':1,'tile_latmin':1,'tile_size':1})50 df = df[['tile_lonmin','tile_latmin','tile_size','period','area','dhdt','err_dhdt','dvoldt','err_dvoldt','dmdt','err_dmdt','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]51 df.to_csv(os.path.join(os.path.dirname(fn_rates_tile),os.path.splitext(os.path.basename(fn_rates_tile))[0]+'_fmt.csv'),index=None)52#SHP with TW/NTW sorting53# shp cumulative series54list_fn_cumul_shp = ['/home/atom/ongoing/work_worldwide/vol/final/subreg_HIMAP_cumul.csv']55for fn_cumul_shp in list_fn_cumul_shp:56 df = pd.read_csv(fn_cumul_shp)57 df = df.round({'dh':3,'err_dh':3,'dvol':0,'err_dvol':0,'dm':4,'err_dm':4,'dt':1,'std_dt':1,'perc_area_meas':3,'perc_area_res':358 ,'valid_obs':2,'valid_obs_py':2, 'area':0,'area_nodata':0})59 df = df[['subreg','time','area','dh','err_dh','dvol','err_dvol','dm','err_dm','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]60 df.to_csv(os.path.join(os.path.dirname(fn_cumul_shp),os.path.splitext(os.path.basename(fn_cumul_shp))[0]+'_fmt.csv'),index=None)61# shp rates62list_fn_rates_shp = ['/home/atom/ongoing/work_worldwide/vol/final/subreg_HIMAP_rates.csv']63for fn_rates_shp in list_fn_rates_shp:64 df = pd.read_csv(fn_rates_shp,index_col=0)65 df = df.drop(columns=['tarea'])66 df = df.round({'dhdt':3,'err_dhdt':3,'dvoldt':0,'err_dvoldt':0,'dmdt':4,'err_dmdt':4,'dmdtda':3,'err_dmdtda':3,'perc_area_meas':3,'perc_area_res':3,67 'valid_obs':2,'valid_obs_py':2,'area':0,'area_nodata':0})68 df = df[['subreg','period','area','dhdt','err_dhdt','dvoldt','err_dvoldt','dmdt','err_dmdt','perc_area_meas','perc_area_res','valid_obs','valid_obs_py','area_nodata']]...
solution.py
Source:solution.py
1# -*- coding: utf-8 -*-2import sys3import json4import pandas as pd5from pandas.io.json import json_normalize6if len(sys.argv) < 2:7 print('Usage: python solution.py [filename] - Executa o desafio 5 usando o nome de arquivo passado')8 sys.exit()9filename = sys.argv[1]10with open(filename, encoding='utf-8') as json_data:11 data = json.load(json_data)12funcionarios = json_normalize(data['funcionarios'])13areas = json_normalize(data['areas'])14# Questao 115global_max = funcionarios.loc[funcionarios.salario == funcionarios.salario.max()]16for index, row in global_max.iterrows():17 print('global_max|{0}|{1:.2f}'.format(18 ' '.join([row.nome, row.sobrenome]),19 row.salario 20 ))21global_min = funcionarios.loc[funcionarios.salario == funcionarios.salario.min()]22for index, row in global_min.iterrows():23 print('global_min|{0}|{1:.2f}'.format(24 ' '.join([row.nome, row.sobrenome]),25 row.salario 26 ))27print('global_avg|{0:.2f}'.format(28 round(funcionarios.salario.mean(), 2)29))30# Questao 231area_group = funcionarios.groupby(by='area')32for area, data in area_group:33 area_max = data.loc[data.salario == data.salario.max()]34 for index, row in area_max.iterrows():35 print('area_max|{0}|{1}|{2:.2f}'.format(36 areas.loc[areas.codigo == area].iloc[0].nome,37 ' '.join([row.nome, row.sobrenome]),38 row.salario 39 ))40 area_min = data.loc[data.salario == data.salario.min()]41 for index, row in area_min.iterrows():42 print('area_min|{0}|{1}|{2:.2f}'.format(43 areas.loc[areas.codigo == area].iloc[0].nome,44 ' '.join([row.nome, row.sobrenome]),45 row.salario 46 ))47 48 area_avg = data.loc[data.salario == data.salario.mean()]49 print('area_avg|{0}|{1:.2f}'.format(50 areas.loc[areas.codigo == area].iloc[0].nome,51 data.salario.mean()52 ))53# Questao 354area_employees = funcionarios['area'].value_counts()55most_employees = area_employees.loc[area_employees == area_employees.max()]56for area in most_employees.index:57 print('most_employees|{0}|{1}'.format(58 areas.loc[areas.codigo == area].iloc[0].nome,59 most_employees[area]60 ))61least_employees = area_employees.loc[area_employees == area_employees.min()]62for area in least_employees.index:63 print('least_employees|{0}|{1}'.format(64 areas.loc[areas.codigo == area].iloc[0].nome,65 least_employees[area]66 ))67# Questao 468last_names = funcionarios.groupby(by='sobrenome')69for sobrenome, data in last_names:70 if data.shape[0] > 1:71 last_name_max = data.loc[data.salario == data.salario.max()]72 for index, row in last_name_max.iterrows():73 print('last_name_max|{0}|{1}|{2:.2f}'.format(74 sobrenome,75 ' '.join([row.nome, row.sobrenome]),76 row.salario ...
aRectangle.py
Source:aRectangle.py
...7 if l == w:8 self.is_square = True9 else:10 self.is_square = False11# def area(self):12# self.area = self.l * self.w13# return self.area14# def perimeter(self):15# self.perimeter = (2 * self.l) + (2 * self.w)16# return self.perimeter17 def __lt__(self, other):18 return self.area < other.area19 def ___le__(self, other):20 return self.area <= other.area21 def __eq__(self, other):22 return self.area == other.area23 def __ne__(self, other):24 return self.area != other.area25 def __gt__(self, other):26 return self.area > other.area27 def __ge__(self, other):28 return self.area >= other.area29# def is_square(self):30# if self.l == self.w:31# return True32# else:33# return False34 def __str__(self):35 return ("Width: " + str(self.w) + ", Length: " + str(self.l) + ", Area: " + str(self.area) + ", Perimeter: " + str(self.perimeter) + ", is Square: " + str(self.is_square) + "\n")36r = aRectangle(2,5)37r2 = aRectangle(2,2)38#r.area()39#r.perimeter()40#r.is_square()41#print(r, r2)42print(r > r2)43print(r < r2)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!