Best Python code snippet using localstack_python
evolution.py
Source:evolution.py
1import random2import lib.mapelites.Individual as Individual3from lib.logger import log4import numpy as np5import math6from lib.mapelites.metrics import similarity_order, similarity_range7highest_e = 08def normalize(value):9 return value/highest_e10def density_error(full_chrom, chrom_idx, neigh_idx, areas):11 errors = []12 def get_error(main, neighbors, minimum_error=0):13 # error returns how far the current density is from being within14 # 80% ~ 120% of the neighbouring maximum density value15 maximum = max(neighbors)16 if maximum == 0:17 if main == 0:18 error = minimum_error19 else:20 error = 021 else:22 min_range, max_range = maximum*0.8, maximum*1.223 error = abs(main-min_range) if main <= min_range else abs(main-max_range) if main >= max_range else 0.024 return error25 for c_idx, neigh_idx_list in zip(chrom_idx, neigh_idx):26 density = full_chrom[c_idx]/areas[c_idx]27 neighbor_densities = [full_chrom[n_idx]/areas[c_idx] for n_idx in neigh_idx_list]28 error = get_error(density, neighbor_densities)29 errors.append(error)30 #print("Density: {}, neighbors: {}, error: {:.2f}".format(density,31 # neighbor_densities, error))32 return sum(errors)33##############34# MAP ELITES35##############36def mutate_ME(individual, chrom_idx, min_range=0, max_range=10, mut_rate=0.1):37 chrom = individual.chromosome38 for idx in chrom_idx:39 #for i in range(len(chrom)):40 if random.random() < mut_rate:41 chrom[idx] += random.randint(-max_range, max_range)42 chrom[idx] = 0 if chrom[idx] < 0 else chrom[idx]43 individual.chromosome = chrom44def initialize_pop_ME(chrom, chrom_idx, neigh_idx, areas, max_buildings, pop_range=10, max_per_cell=10):45 # gives a very rough approximation of the highest error46 def highest_error(chrom, chrom_idx, neigh_idx, areas, max_buildings):47 global highest_e48 archive = []49 for i in range(100000):50 new_chrom = [g for g in chrom]51 max_parcel = int(max_buildings/len(chrom))52 genes = [int(random.random()*max_parcel) for x in range(len(chrom))]53 for c_idx in chrom_idx: new_chrom[c_idx] = genes[c_idx]54 individual = Individual.Individual(new_chrom)55 individual.error = density_error(new_chrom, chrom_idx, neigh_idx, areas)56 archive.append(individual)57 highest_e = max([x.error for x in archive])58 return max([x.error for x in archive])59 def get_index(value, min, max, partitions=10):60 import bisect61 bisect_range = np.linspace(min, max, partitions+1)62 b_index = bisect.bisect(bisect_range, value)-163 return b_index64 population = [[[] for i in range(pop_range)] for j in range(pop_range)]65 print("Total buildings: {}".format(sum([g for g in chrom])))66 print("Total parcels: {}".format(len(chrom_idx)))67 misses = 068 archive = []69 # we will try to generate random candidates to fit each part of the range70 # between min and max_buildings (e.g 0-20, 21-40, 41-60 etc).71 # we don't have a good way of generating between the density error (0-1) so72 # we generate extra candidates (max_per_cell*n) to try and hit the max73 for a in range(max_per_cell*10):74 r = np.linspace(0, max_buildings, pop_range+1)75 for it in range(len(r)-1):76 # create a new chrom clone77 new_chrom = [g for g in chrom]78 # find min and max_buildings for this range (e.g. 0-20)79 min_limit, max_limit = r[it], r[it+1]80 # find randomly a maximum vector size to distribute max_buildings81 vector_limit = len(chrom_idx) if len(chrom_idx) < int(max_limit/2) else int(max_limit/2)82 # initialize a vector with size no bigger than vector_limit83 size_vector = random.randint(1, vector_limit)84 # draw a random number of buildings between min and max_limit85 # and distribute this number for each cell of the vector86 desired_buildings = random.randint(min_limit, max_limit)87 vector = [random.random() for x in range(size_vector)]88 generated_sum = sum(vector)89 for i in range(len(vector)):90 # because in this number vector generation we often loose91 # or gain by rounding, select to round up or down randomnly92 rounding = random.choice([math.ceil, int])93 vector[i] = rounding((vector[i]/generated_sum)*desired_buildings)94 # transfer the numbers from the vector into the new chrom95 rnd_idx = [i for i in range(len(chrom_idx))]96 random.shuffle(rnd_idx)97 for i in range(len(vector)):98 idx = rnd_idx[i]99 new_chrom[chrom_idx[idx]] = vector[i]100 ind = Individual.Individual(new_chrom)101 ind.error = density_error(new_chrom, chrom_idx, neigh_idx, areas)102 nbuildings = 0103 for idx in chrom_idx:104 nbuildings += new_chrom[idx]105 if nbuildings < min_limit or nbuildings > max_limit:106 misses += 1107 continue108 else:109 archive.append(ind)110 # what is the maximum acceptable error? We get the maximum error from the111 # initial generated candidates and set it as the max error threshold112 global highest_e113 highest_e = max([x.error for x in archive])114 print("highest_error: {}".format(highest_e))115 # after an initial archive of individuals was generated, distribute116 # them in the appropriate cells of MAP-Elites117 while len(archive) > 0:118 ind = archive.pop(0)119 nbuildings = 0120 for idx in chrom_idx:121 nbuildings += ind.chromosome[idx]122 ind.error = normalize(ind.error)123 d_idx = get_index(nbuildings, 0, max_buildings, pop_range)124 e_idx = get_index(ind.error, 0, 1, pop_range)125 if d_idx < 0 or d_idx >= pop_range: continue126 if e_idx < 0: continue127 if e_idx >= pop_range: continue128 if len(population[d_idx][e_idx]) < max_per_cell:129 population[d_idx][e_idx].append(ind)130 return population131def generation_ME(population, chrom_idx, neigh_idx, areas, max_buildings,132 metric=similarity_range, generations=1000, pop_range=10):133 file1 = open("_log_mapelites","w")134 def get_index(value, min, max, partitions=10):135 import bisect136 bisect_range = np.linspace(min, max, partitions+1)137 b_index = bisect.bisect(bisect_range, value)-1138 return b_index139 def downsize_diversity(population, similarity_limit=0.5):140 for i in range(len(population)):141 for j in range(len(population[i])):142 pop = population[i][j]143 for k in range(len(pop)-1, 0, -1):144 for l in range(k-1, -1, -1):145 sim = similarity_range(pop[k].chromosome, pop[l].chromosome, chrom_idx)146 if sim > similarity_limit:147 if pop[k].error < pop[l].error:148 _temp = pop[k]149 pop[k] = pop[l]150 pop[l] = _temp151 pop.pop(k)152 break153 def steadystate(population, chrom_idx, max_buildings, neigh_idx, areas, pop_range):154 all_individuals = []155 for i in range(len(population)):156 for j in range(len(population[i])):157 for ind in population[i][j]:158 all_individuals.append((i, j, ind))159 for i, j, ind in all_individuals:160 child = Individual.Individual(ind.chromosome)161 # the intensity of mutation is proportioinal to the current total no of buildings162 max_mut = math.ceil(max_buildings/len(chrom_idx))*(i+2)163 mutate_ME(child, chrom_idx, 0, max_mut, 0.1)164 child.error = density_error(child.chromosome, chrom_idx, neigh_idx, areas)165 child.error = normalize(child.error)166 nbuildings = 0167 for idx in chrom_idx:168 nbuildings += child.chromosome[idx]169 # get new indexes in the grid and place in the appropriate cell170 d_idx = get_index(nbuildings, 0, max_buildings, pop_range)171 e_idx = get_index(child.error, 0, 1, pop_range)172 if d_idx < 0 or d_idx >= pop_range: continue173 if e_idx < 0 or e_idx >= pop_range: continue174 # #no need to compare child against parent as downsize function175 # #already compares everyone against everyone for each cell176 # if d_idx != i or e_idx != j:177 # population[d_idx][e_idx].append(child)178 # elif child.error < ind.error:179 # population[i][j].remove(ind)180 # population[i][j].append(child)181 population[d_idx][e_idx].append(child)182 def log_pop(gen, population, logfile):183 def get_pop_data(p):184 import numpy as np185 fitnesses = [x.error for x in p]186 mini = min(fitnesses)187 chrom = p[fitnesses.index(mini)].chromosome188 n_buildings = [chrom[idx] for idx in chrom_idx]189 maxi = max(fitnesses)190 avg = np.average(fitnesses)191 std = np.std(fitnesses)192 return sum(n_buildings), mini, maxi, avg, std193 if gen % 50 == 0:194 log("Generation: {}".format(gen))195 for i in range(len(population)):196 for j in range(len(population[i])):197 pop = population[i][j]198 # if there's no candidates initialize all stats as 0199 if len(pop) == 0:200 n_bui, mini, maxi, avg, std = 0,0,0,0,0201 else:202 n_bui, mini, maxi, avg, std = get_pop_data(pop)203 logfile.write("{},{},{},{},{:.2f},{:.2f},{:.2f},{:.2f}".format(gen,i,j,len(pop),mini,maxi,avg,std))204 log("Population [{}][{}], Cap: {}, n_bui:{}, best:{:.2f}, worst:{:.2f}, " \205 "avg:{:.2f}, std:{:.2f}".format(i,j, len(pop), n_bui, mini, maxi, avg, std))206 logfile.write("\n")207 for gen in range(generations):208 # log some data from pop every few generations209 log_pop(gen, population, file1)210 steadystate(population, chrom_idx, max_buildings, neigh_idx, areas, pop_range)211 downsize_diversity(population, 0.35)212 file1.close()213def top_individuals_ME(population, n_ind=1, pop_range=10):214 top_individuals = [[[] for i in range(pop_range)] for j in range(pop_range)]215 for i in range(len(population)):216 for j in range(len(population)):217 pop = population[i][j]218 if len(pop) == 0: continue219 best_ind = sorted(pop, key=lambda i: i.error)[:n_ind]220 top_individuals[i][j].extend(best_ind)...
report.py
Source:report.py
1import dataclasses2import enum3import math4import os5import pickle6import statistics7import textwrap8from typing import Iterator, List, Optional, Tuple9import numpy as np10from yattag import Doc, indent11from v2.build import check_unbuildable12from v2.containers import BenchmarkResult, BenchmarkResults, Commit, ResultRange13from v2.gen_report.write import write_report14from v2.runner import Runner15class RowClassification(enum.Enum):16 HighSignificance = 017 ModerateSignificance = 118 LowSignificance = 219 NoSignificance = 320 @property21 def criteria(self):22 if self == RowClassification.HighSignificance:23 return ((1, 0.10),)24 if self == RowClassification.ModerateSignificance:25 return ((1, 0.05),)26 if self == RowClassification.LowSignificance:27 return ((0, 0.01),)28 return ((0, 0.0),)29 @staticmethod30 def characterize(values):31 for row_classification in RowClassification:32 for n_criteria, rel_diff_criteria in row_classification.criteria:33 if sum(abs(i) >= rel_diff_criteria for i in values) >= n_criteria:34 return row_classification35 # Fallback, though it should not be needed.36 return RowClassification.NoSignificance37def _iter_flat(result_ranges: Tuple[ResultRange]) -> BenchmarkResult:38 for r in result_ranges:39 results = r.lower_results40 if results is not None:41 for i in results.values:42 yield r.lower_commit.sha, i43 results = r.upper_results44 if results is not None:45 for i in results.values:46 yield r.upper_commit.sha, i47def make_report(self: Runner):48 # Determine table params.49 top_level_labels = {}50 label_order = {}51 low_water_mark = {}52 all_keys = []53 result_ranges = self._group_ranges()[::-1]54 for _, r in _iter_flat(result_ranges):55 if r.label[0] not in top_level_labels:56 top_level_labels[r.label[0]] = len(top_level_labels)57 if r.label not in label_order:58 label_order[r.label] = len(label_order)59 if r.key not in all_keys:60 all_keys.append(r.key)61 low_water_mark.setdefault(r.key, int(np.median(r.instructions)))62 low_water_mark[r.key] = min(low_water_mark[r.key], int(np.median(r.instructions)))63 cols = sorted(64 {(label, autograd, runtime, num_threads)65 for label, _, autograd, runtime, num_threads in all_keys},66 key=lambda x: (top_level_labels[x[0][0]], label_order[x[0]], x[2], x[1], x[3])67 )68 grid_pos = {}69 for i, (label, autograd, runtime, num_threads) in enumerate(cols):70 grid_pos[(label, "Python", autograd, runtime, num_threads)] = (i, 0)71 grid_pos[(label, "C++", autograd, runtime, num_threads)] = (i, 1)72 # Process Data.73 all_tests_ref = {}74 for result_range in result_ranges:75 if result_range.upper_results is None:76 continue77 at_new = {r.key: int(np.median(r.instructions)) for r in result_range.upper_results.values}78 if len(at_new) >= len(all_tests_ref):79 all_tests_ref = at_new80 assert len(all_tests_ref) == len(all_keys), f"{len(all_tests_ref)} {len(all_keys)}"81 row_classifications = {}82 row_deltas = {}83 bisect_ranges: List[ResultRange] = []84 for result_range in result_ranges:85 if result_range.lower_results is None and result_range.upper_results is None:86 continue87 if result_range.lower_results is None or result_range.upper_results is None:88 bisect_ranges.append(result_range)89 continue90 include_in_bisect = (91 {ri.key for ri in result_range.lower_results.values} !=92 {ri.key for ri in result_range.upper_results.values}93 )94 grid = [[None, None] for _ in range(len(cols))]95 lower = {r.key: int(np.median(r.instructions)) for r in result_range.lower_results.values}96 upper = {r.key: int(np.median(r.instructions)) for r in result_range.upper_results.values}97 for key, i1 in upper.items():98 if key not in lower:99 continue100 i0 = lower[key]101 abs_delta = abs(i1 - i0) / statistics.mean([i0, i1])102 rel_delta = (i1 - i0) / low_water_mark[key]103 if abs_delta > 0.03: # and result_range.lower_commit.date_str >= "09/01/2020":104 include_in_bisect = True105 i, j = grid_pos[key]106 grid[i][j] = rel_delta107 row_deltas[id(result_range)] = grid108 grid_for_criteria = [max(abs(d_py or 0), abs(d_cpp or 0)) for d_py, d_cpp in grid]109 c = RowClassification.characterize(grid_for_criteria)110 if c == RowClassification.HighSignificance:111 include_in_bisect = True112 row_classifications[id(result_range)] = c113 if include_in_bisect:114 bisect_ranges.append(result_range)115 row_counts = {}116 for sha, r in _iter_flat(result_ranges):117 grid = row_counts.get(sha, None)118 if grid is None:119 row_counts[sha] = grid = [[None, None] for _ in range(len(cols))]120 i, j = grid_pos[r.key]121 grid[i][j] = int(np.median(r.instructions))122 bisect_count = 0123 for bisect_range in bisect_ranges:124 if not bisect_range.intermediate_commits:125 continue126 bisect_index = int(len(bisect_range.intermediate_commits) // 2)127 sha = bisect_range.intermediate_commits[bisect_index].sha128 self._state.maybe_enqueue_build(sha)129 bisect_count += 1130 print(bisect_count)131 # return132 import importlib133 import v2.gen_report.write134 importlib.reload(v2.gen_report.write)135 si_map = {136 RowClassification.HighSignificance: 0,137 RowClassification.ModerateSignificance: 1,138 RowClassification.LowSignificance: 2,139 }140 row_classification_indicies = {141 id(r): si_map[row_classifications[id(r)]]142 for r in result_ranges143 if row_classifications.get(id(r), None) in si_map144 }145 v2.gen_report.write.write_report(146 self._history, cols, top_level_labels, grid_pos, row_counts, result_ranges,147 row_deltas, row_classification_indicies...
solution.py
Source:solution.py
...4 passes = []5 with open(filename, 'r') as fp:6 passes.extend([line.strip() for line in fp])7 return passes8def bisect_range(codes: str, start: int = 0, end: int = 127) -> int:9 """A plane has X rows and Y columns, find the row/column based on bisection10 For each character bisect the rows or columns.11 'F' (front) or 'L' (left) gives us the lower half of the range.12 'B' (back) or 'R' (right) gives us the upper half of the range.13 This function is called with the remaining codes to process and the14 current range of rows/columns.15 """16 length = end - start + 117 code = codes[0].upper()18 if code == 'F' or code == 'L':19 end = end - (length // 2)20 elif code == 'B' or code == 'R':21 start = start + (length // 2)22 codes = codes[1:]23 ret = start24 if codes:25 ret = bisect_range(codes, start=start, end=end)26 return ret27def main():28 passes = read_file('input.txt')29 highest_id = -130 # pprint(passes)31 seating = [[0 for _ in range(8)] for _ in range(128)]32 for boarding_code in passes:33 row_codes = boarding_code[:-3]34 col_codes = boarding_code[-3:]35 # print(f'row_codes: {row_codes}\tcol_codes: {col_codes}')36 row = bisect_range(row_codes, start=0, end=127)37 col = bisect_range(col_codes, start=0, end=7)38 # print(f'row: {row} \t\tcol: {col}')39 if seating[row][col]:40 raise Exception(f'Seat {row}, {col} already filled')41 seating[row][col] = 142 seat_id = row * 8 + col43 highest_id = seat_id if seat_id > highest_id else highest_id44 print(f'Highest seat ID found: {highest_id}')45 for row_num, row in enumerate(seating):46 if row.count(1) == 7:47 col_num = row.index(0)48 print(f'You are in row {row_num}, column {col_num}')49 print(f'Your seat ID is {row_num * 8 + col_num}')50if __name__ == '__main__':51 main()
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!