Best Python code snippet using localstack_python
smt_mtz.py
Source:smt_mtz.py
1"""2CVRP using Z3 and SMT3"""4import sys5import numpy as np6from scipy.spatial.distance import cityblock7import networkx as nx8import matplotlib.pyplot as plt9import random10from z3 import Int,Bool,Solver,Optimize,PbLe,PbEq,Implies,And,Not,Or,If,Sum,sat,set_option1112# Save the solution if any 13def on_model(model):14 out = open('../out/out'+sys.argv[1][1:][3:]+'.txt', 'w')15 print("Solution found")16 out.write("Solution found")17 out.write("\n")18 tot = Int('tot')19 print("Total length of the routes: "+ str(model[tot]))20 out.write("Total length of the routes: "+ str(model[tot]))21 out.write("\n")22 track = [[[Bool(f"x_{i}_{j}_{k}") for k in range(num_courier)] for j in range(num_item+1)] for i in range(num_item+1)]23 t = [[[ model.evaluate(track[i][j][k]) for k in range(num_courier)]for j in range(num_item+1)]for i in range(num_item+1)]24 edges=[[] for c in range(num_courier)]25 for c in range(num_courier):26 print("Route courier "+str(c+1)+":")27 out.write("Route courier "+str(c+1)+":")28 out.write("\n")29 active_arcs={}30 nodes=list(range(num_item))31 nodes.insert(0,num_item)32 for i in nodes:33 for j in nodes:34 if t[i][j][c]:35 active_arcs[i]=j36 finish = False37 i = num_item38 route=str(0)+'->'39 while not finish :40 j=active_arcs.get(i)41 edges[c].append((i,j))42 i=j43 if(j==num_item):44 route+=str(0)45 finish=True46 else:47 route+= str(j+1)+'->'48 print(route)49 out.write(route)50 out.write("\n")51 print()52 out.close()5354 # Plot the tour of each vehicle55 fig, ax = plt.subplots()56 G=nx.Graph(name="route_smt")57 for c in range(num_courier):58 G.add_nodes_from(edges[c][:][0])59 G.add_edges_from(edges[c])60 coordinates = np.array([ [x,y] for x,y in zip(xc, yc)])61 nodes = np.arange(0,num_item+1)62 npos = dict(zip(nodes, coordinates))63 nx.draw_networkx_nodes(G,pos=npos, node_size=9)64 colors = [ "#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(edges))]65 linewidth = 266 plt.plot(xc[-1], yc[-1], c='r', marker='s')67 for ctr, edgelist in enumerate(edges):68 nx.draw_networkx_edges(G,pos=npos,edgelist=edgelist,edge_color = colors[ctr], width=linewidth)69 plt.axis('on')70 ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)71 plt.savefig('../plots/route_smt_'+sys.argv[1]+'.png')72 7374def courier_scheduling_problem_MTZ(num_items, num_couriers, items_weights, courier_loads, dist):75 track = [[[Bool(f"x_{i}_{j}_{k}") for k in range(num_couriers)] for j in range(num_items+1)] for i in range(num_items+1)]76 u = [Int(f"u_{j}")for j in range(num_items)]77 tot = Int("tot")78 s = Optimize()79 s.set_on_model(on_model)80 s.set("maxsat_engine",'core_maxsat')81 s.set("timeout",300000)82 #s = Solver()8384 # Main diagonal equal to 0 85 for i in range(num_items+1):86 #s.add(And([Not(track[i][i][c]) for c in range(num_couriers)]))87 s.add(PbEq([88 (track[i][i][c], 1) for c in range(num_couriers)89 ], 0))90 9192 # Each node visited only once93 for j in range(num_items):94 #s.add(And(exactly_one([track[i][j][c] for c in range(num_courier) for i in range(num_items+1)]), exactly_one([track[j][i][c] for c in range(num_courier) for i in range(num_items+1)])))95 s.add(And(PbEq([96 (track[i][j][c],1) for c in range(num_couriers) for i in range(num_items+1)97 ],1),98 PbEq([99 (track[j][i][c],1) for c in range(num_couriers) for i in range(num_items+1)100 ],1)))101102 103 # Each courier can go back and depart from depot at maximum one time104 for c in range(num_couriers):105 #s.add(And(exactly_one([track[num_items][j][c] for j in range(num_items)]), exactly_one([track[j][num_items][c] for j in range(num_items)])))106 s.add(And(PbEq([(track[num_items][j][c],1) for j in range(num_items)],1),107 PbEq([(track[j][num_items][c],1) for j in range(num_items)],1)))108109110 # Constraint on the load of each courier111 for c in range(num_couriers):112 #s.add(courier_loads[c]>= Sum([If(track[i][j][c],items_weights[i],0) for i in range(num_items) for j in range(num_items+1)]))113 s.add(PbLe([114 (track[i][j][c], items_weights[i]) for i in range(num_items) for j in range(num_items+1)115 ], courier_loads[c]))116117 # N arcs in = n arcs out118 for c in range(num_couriers):119 for j in range(num_items+1):120 # orig121 s.add(Sum([If(track[i][j][c],1,0) for i in range(num_items+1)])==Sum([If(track[j][i][c],1,0) for i in range(num_items+1)]))122 #s.add(PbEq([(track[i][j][c],1) for i in range(num_items+1)],Sum([(track[j][i][c],1) for i in range(num_items+1)])))123124125 # Miller-Tucker-Zemlin formulation126 for c in range(num_courier):127 for i in range(num_items):128 for j in range(num_items):129 s.add(u[i] + If(track[i][j][c],1,0) <= u[j] + num_items*(1-If(track[i][j][c],1,0)))130 s.add(u[i] > 0)131 132133 # Constraint to obtain the total length of the routes 134 s.add(tot==Sum([If(track[i][j][c],dist[i][j],0) for i in range(num_items+1) for j in range(num_items+1) for c in range(num_couriers)]))135 s.minimize(tot)136137 print("Model loaded, starting to solve the "+sys.argv[1]+".")138139 if s.check() == sat:140 m = s.model()141 t = m.evaluate(tot)142 r = [[[m.evaluate(track[i][j][k]) for k in range(num_couriers)]for j in range(num_items+1)]for i in range(num_items+1)]143 for c in range(num_couriers):144 print('Courier', c)145 nodes=list(range(num_items))146 nodes.insert(0,num_items)147 for i in nodes:148 for j in nodes:149 if r[i][j][c] == True:150 print(i,'->',j)151 print("satttttttttttttttttttttttttt")152 print(r)153 return t154 else:155 print("unsat")156157158159def main(argv):160 # Instantiate variables from file161 file= open('./MCP_Instances/'+argv)162 lines=[]163 for line in file:164 lines.append(line)165 file.close()166 global num_courier167 num_courier=int(lines[0].rstrip('\n'))168 global num_item169 num_item=int(lines[1].rstrip('\n'))170 load=list(map(int, lines[2].rstrip('\n').split()))171 weight=list(map(int, lines[3].rstrip('\n').split()))172 global xc173 xc=list(map(int, lines[4].rstrip('\n').split()))174 global yc175 yc=list(map(int, lines[5].rstrip('\n').split()))176 load = sorted(load, reverse=True)177 for i in range(len(load)):178 if sum(load[0:i])>= sum(weight):179 load1 = load[0:i]180 load=load1181 num_courier = i182 break183 dist=np.zeros((len(xc),len(xc)))184 for i in range(len(xc)):185 for j in range(len(xc)):186 dist[i,j] = int(cityblock([xc[i],yc[i]],[xc[j],yc[j]]))187 dist=dist.astype(int)188 dist=dist.tolist()189 # Call to the Z3 SMT MAXSAT solver190 tot = courier_scheduling_problem_MTZ(num_item, num_courier, weight, load, dist)191192 """Alternative model that uses lazy constraints: to use it comment the lines above and uncomment the lines below"""193 # tot,best_track = courier_scheduling_problem_MTZ_lazy(num_item, num_courier, weight, load, dist)194 # out = open('../out/out'+sys.argv[1][1:][3:]+'.txt', 'w')195 # print("Solution found")196 # out.write("Solution found")197 # out.write("\n")198 # print("Total length of the routes: "+ str(tot))199 # out.write("Total length of the routes: "+ str(tot))200 # out.write("\n")201 # edges=[[] for c in range(num_courier)]202 # for c in range(num_courier):203 # print('Courier', c+1)204 # active_arcs={}205 # nodes=list(range(num_item))206 # nodes.insert(0,num_item)207 # for i in nodes:208 # for j in nodes:209 # if best_track[i][j][c]:210 # active_arcs[i]=j211 # finish = False212 # i = num_item213 # route=str(i)+'->'214 # while not finish :215 # j=active_arcs.get(i)216 # edges[c].append((i,j))217 # i=j218 # if(j==num_item):219 # route+=str(num_item)220 # finish=True221 # else:222 # route+= str(j)+'->'223 # print(route)224 # out.write(route)225 # out.write("\n")226 # print()227 # out.close()228 # # plot the tour of each vehicle229 # fig, ax = plt.subplots()230 # G=nx.Graph(name="route_smt")231 # for c in range(num_courier):232 # G.add_nodes_from(edges[c][:][0])233 # G.add_edges_from(edges[c])234 # coordinates = np.array([ [x,y] for x,y in zip(xc, yc)])235 # nodes = np.arange(0,num_item+1)236 # npos = dict(zip(nodes, coordinates))237 # nx.draw_networkx_nodes(G,pos=npos, node_size=35)238 # colors = [ "#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(edges))]239 # linewidths = 2240 # for ctr, edgelist in enumerate(edges):241 # nx.draw_networkx_edges(G,pos=npos,edgelist=edgelist,edge_color = colors[ctr], width=linewidth)242 # plt.axis('on')243 # plt.grid(visible=True)244 # ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)245 # plt.savefig('../plots/route_smt_lazy_'+sys.argv[1]+'.png')246247if __name__ == "__main__":248 main(sys.argv[1])249250251def courier_scheduling_problem_MTZ_lazy(num_items, num_couriers, items_weights, courier_loads, dist):252 track = [[[Bool(f"x_{i}_{j}_{k}") for k in range(num_courier)] for j in range(num_items+1)] for i in range(num_items+1)]253 u = [Int(f"u_{j}")for j in range(num_items)]254 tot = Int("tot")255 s = Solver()256 s.set("timeout",300000)257258 # Tot constraint259 s.add(tot==Sum([If(track[i][j][c],dist[i][j],0) for i in range(num_items+1) for j in range(num_items+1) for c in range(num_couriers)]))260 set_option("verbose", 2)261262 # Main diagonal equal to 0 263 for i in range(num_items+1):264 #s.add(And([Not(track[i][i][c]) for c in range(num_couriers)]))265 s.add(PbEq([266 (track[i][i][c], 1) for c in range(num_couriers)267 ], 0))268 269270 # Each node visited only once271 for j in range(num_items):272 # orig273 #s.add(And(exactly_one([track[i][j][c] for c in range(num_courier) for i in range(num_items+1)]), exactly_one([track[j][i][c] for c in range(num_courier) for i in range(num_items+1)])))274 s.add(And(PbEq([275 (track[i][j][c],1) for c in range(num_courier) for i in range(num_items+1)276 ],1),277 PbEq([278 (track[j][i][c],1) for c in range(num_courier) for i in range(num_items+1)279 ],1)))280281 282 # # Each courier can go back and depart from depot at maximum one time283 for c in range(num_courier):284 s.add(And(PbEq([(track[num_items][j][c],1) for j in range(num_items)],1), PbEq([(track[j][num_items][c],1) for j in range(num_items)],1)))285286 # Constraint on the load of each courier287 for c in range(num_courier):288 #s.add(courier_loads[c]>= Sum([If(track[i][j][c],items_weights[i],0) for i in range(num_items) for j in range(num_items+1)]))289 s.add(PbLe([290 (track[i][j][c], items_weights[i]) for i in range(num_items) for j in range(num_items+1)291 ], courier_loads[c]))292293 # N arcs in = n arcs out294 for c in range(num_courier):295 for j in range(num_items):296 # orig297 s.add(Sum([If(track[i][j][c],1,0) for i in range(num_items+1)])==Sum([If(track[j][i][c],1,0) for i in range(num_items+1)]))298 #s.add(PbEq([(track[i][j][c],1) for i in range(num_items+1)],1))299300 # Constraint to eliminate the i->j->i possibility301 for c in range(num_courier):302 for i in range(num_items):303 for j in range(num_items):304 s.add(Implies(track[i][j][c], Not(track[j][i][c])))305306 # Constraint to compute the sum of our tour307 best = 1000000308 best_track=[[[False]]]309 for i in range(1000):310 if s.check() == sat:311 m = s.model()312 t = m.evaluate(tot)313 routes = [[ [ m.evaluate(track[i][j][k]) for k in range(num_couriers)] for j in range(num_items+1)] for i in range(num_items+1)]314 subtour=False315 tours =[[] for i in range(num_couriers) ]316 for c in range(num_courier):317 list1 = list(range(num_items))318 list1.insert(0,num_items)319 for i in list1:320 for j in list1:321 if routes[i][j][c] == True:322 tours[c].append((i,j))323 for c in range(num_couriers):324 if(len(tours[c])>0):325 tmp=[tours[c][0]]326 i=1327 while i < len(tours[c]):328 if(tmp[-1][1]==tours[c][i][0]):329 if tours[c][i] not in tmp:330 tmp.append(tours[c][i])331 i=0332 i+=1333 if(len(tmp)!=len(tours[c])):334 subtour=True335 sub=[i[0] for i in [j for j in tmp]]336 for i in sub:337 for k in range(num_items):338 if(i!=num_items):339 s.add(u[i] + If(track[i][k][c],1,0) <= u[k] + num_items*(1-If(track[i][k][c],1,0)))340 if (not subtour) and (t.as_long() < best) :341 s.add(tot < t.as_long())342 best = t.as_long()343 best_track=routes344 else:345 print("unsat")
...
metric_utils.py
Source:metric_utils.py
1# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.2#3# NVIDIA CORPORATION and its licensors retain all intellectual property4# and proprietary rights in and to this software, related documentation5# and any modifications thereto. Any use, reproduction, disclosure or6# distribution of this software and related documentation without an express7# license agreement from NVIDIA CORPORATION is strictly prohibited.8"""Miscellaneous utilities used internally by the quality metrics."""9import os10import time11import hashlib12import pickle13import copy14import uuid15import numpy as np16import torch17import dnnlib18#----------------------------------------------------------------------------19class MetricOptions:20 def __init__(self, G=None, G_kwargs={}, dataset_kwargs={}, num_gpus=1, rank=0, device=None, progress=None, cache=True):21 assert 0 <= rank < num_gpus22 self.G = G23 self.G_kwargs = dnnlib.EasyDict(G_kwargs)24 self.dataset_kwargs = dnnlib.EasyDict(dataset_kwargs)25 self.num_gpus = num_gpus26 self.rank = rank27 self.device = device if device is not None else torch.device('cuda', rank)28 self.progress = progress.sub() if progress is not None and rank == 0 else ProgressMonitor()29 self.cache = cache30#----------------------------------------------------------------------------31_feature_detector_cache = dict()32def get_feature_detector_name(url):33 return os.path.splitext(url.split('/')[-1])[0]34def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False):35 assert 0 <= rank < num_gpus36 key = (url, device)37 if key not in _feature_detector_cache:38 is_leader = (rank == 0)39 if not is_leader and num_gpus > 1:40 torch.distributed.barrier() # leader goes first41 with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:42 _feature_detector_cache[key] = pickle.load(f).to(device)43 if is_leader and num_gpus > 1:44 torch.distributed.barrier() # others follow45 return _feature_detector_cache[key]46#----------------------------------------------------------------------------47def iterate_random_labels(opts, batch_size):48 if opts.G.c_dim == 0:49 c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device)50 while True:51 yield c52 else:53 dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)54 while True:55 c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_size)]56 c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)57 yield c58#----------------------------------------------------------------------------59class FeatureStats:60 def __init__(self, capture_all=False, capture_mean_cov=False, max_items=None):61 self.capture_all = capture_all62 self.capture_mean_cov = capture_mean_cov63 self.max_items = max_items64 self.num_items = 065 self.num_features = None66 self.all_features = None67 self.raw_mean = None68 self.raw_cov = None69 def set_num_features(self, num_features):70 if self.num_features is not None:71 assert num_features == self.num_features72 else:73 self.num_features = num_features74 self.all_features = []75 self.raw_mean = np.zeros([num_features], dtype=np.float64)76 self.raw_cov = np.zeros([num_features, num_features], dtype=np.float64)77 def is_full(self):78 return (self.max_items is not None) and (self.num_items >= self.max_items)79 def append(self, x):80 x = np.asarray(x, dtype=np.float32)81 assert x.ndim == 282 if (self.max_items is not None) and (self.num_items + x.shape[0] > self.max_items):83 if self.num_items >= self.max_items:84 return85 x = x[:self.max_items - self.num_items]86 self.set_num_features(x.shape[1])87 self.num_items += x.shape[0]88 if self.capture_all:89 self.all_features.append(x)90 if self.capture_mean_cov:91 x64 = x.astype(np.float64)92 self.raw_mean += x64.sum(axis=0)93 self.raw_cov += x64.T @ x6494 def append_torch(self, x, num_gpus=1, rank=0):95 assert isinstance(x, torch.Tensor) and x.ndim == 296 assert 0 <= rank < num_gpus97 if num_gpus > 1:98 ys = []99 for src in range(num_gpus):100 y = x.clone()101 torch.distributed.broadcast(y, src=src)102 ys.append(y)103 x = torch.stack(ys, dim=1).flatten(0, 1) # interleave samples104 self.append(x.cpu().numpy())105 def get_all(self):106 assert self.capture_all107 return np.concatenate(self.all_features, axis=0)108 def get_all_torch(self):109 return torch.from_numpy(self.get_all())110 def get_mean_cov(self):111 assert self.capture_mean_cov112 mean = self.raw_mean / self.num_items113 cov = self.raw_cov / self.num_items114 cov = cov - np.outer(mean, mean)115 return mean, cov116 def save(self, pkl_file):117 with open(pkl_file, 'wb') as f:118 pickle.dump(self.__dict__, f)119 @staticmethod120 def load(pkl_file):121 with open(pkl_file, 'rb') as f:122 s = dnnlib.EasyDict(pickle.load(f))123 obj = FeatureStats(capture_all=s.capture_all, max_items=s.max_items)124 obj.__dict__.update(s)125 return obj126#----------------------------------------------------------------------------127class ProgressMonitor:128 def __init__(self, tag=None, num_items=None, flush_interval=1000, verbose=False, progress_fn=None, pfn_lo=0, pfn_hi=1000, pfn_total=1000):129 self.tag = tag130 self.num_items = num_items131 self.verbose = verbose132 self.flush_interval = flush_interval133 self.progress_fn = progress_fn134 self.pfn_lo = pfn_lo135 self.pfn_hi = pfn_hi136 self.pfn_total = pfn_total137 self.start_time = time.time()138 self.batch_time = self.start_time139 self.batch_items = 0140 if self.progress_fn is not None:141 self.progress_fn(self.pfn_lo, self.pfn_total)142 def update(self, cur_items):143 assert (self.num_items is None) or (cur_items <= self.num_items)144 if (cur_items < self.batch_items + self.flush_interval) and (self.num_items is None or cur_items < self.num_items):145 return146 cur_time = time.time()147 total_time = cur_time - self.start_time148 time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)149 if (self.verbose) and (self.tag is not None):150 print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}')151 self.batch_time = cur_time152 self.batch_items = cur_items153 if (self.progress_fn is not None) and (self.num_items is not None):154 self.progress_fn(self.pfn_lo + (self.pfn_hi - self.pfn_lo) * (cur_items / self.num_items), self.pfn_total)155 def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1):156 return ProgressMonitor(157 tag = tag,158 num_items = num_items,159 flush_interval = flush_interval,160 verbose = self.verbose,161 progress_fn = self.progress_fn,162 pfn_lo = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_lo,163 pfn_hi = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_hi,164 pfn_total = self.pfn_total,165 )166#----------------------------------------------------------------------------167def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, **stats_kwargs):168 dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)169 if data_loader_kwargs is None:170 data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)171 # Try to lookup from cache.172 cache_file = None173 if opts.cache:174 # Choose cache file name.175 args = dict(dataset_kwargs=opts.dataset_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs)176 md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8'))177 cache_tag = f'{dataset.name}-{get_feature_detector_name(detector_url)}-{md5.hexdigest()}'178 cache_file = dnnlib.make_cache_dir_path('gan-metrics', cache_tag + '.pkl')179 # Check if the file exists (all processes must agree).180 flag = os.path.isfile(cache_file) if opts.rank == 0 else False181 if opts.num_gpus > 1:182 flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device)183 torch.distributed.broadcast(tensor=flag, src=0)184 flag = (float(flag.cpu()) != 0)185 # Load.186 if flag:187 return FeatureStats.load(cache_file)188 # Initialize.189 num_items = len(dataset)190 if max_items is not None:191 num_items = min(num_items, max_items)192 stats = FeatureStats(max_items=num_items, **stats_kwargs)193 progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi)194 detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)195 # Main loop.196 item_subset = [(i * opts.num_gpus + opts.rank) % num_items for i in range((num_items - 1) // opts.num_gpus + 1)]197 for images, _labels in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs):198 if images.shape[1] == 1:199 images = images.repeat([1, 3, 1, 1])200 features = detector(images.to(opts.device), **detector_kwargs)201 stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)202 progress.update(stats.num_items)203 # Save to cache.204 if cache_file is not None and opts.rank == 0:205 os.makedirs(os.path.dirname(cache_file), exist_ok=True)206 temp_file = cache_file + '.' + uuid.uuid4().hex207 stats.save(temp_file)208 os.replace(temp_file, cache_file) # atomic209 return stats210#----------------------------------------------------------------------------211def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, batch_gen=None, **stats_kwargs):212 if batch_gen is None:213 batch_gen = min(batch_size, 4)214 assert batch_size % batch_gen == 0215 # Setup generator and labels.216 G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)217 c_iter = iterate_random_labels(opts=opts, batch_size=batch_gen)218 # Initialize.219 stats = FeatureStats(**stats_kwargs)220 assert stats.max_items is not None221 progress = opts.progress.sub(tag='generator features', num_items=stats.max_items, rel_lo=rel_lo, rel_hi=rel_hi)222 detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)223 # Main loop.224 while not stats.is_full():225 images = []226 for _i in range(batch_size // batch_gen):227 z = torch.randn([batch_gen, G.z_dim], device=opts.device)228 img = G(z=z, c=next(c_iter), **opts.G_kwargs)229 img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8)230 images.append(img)231 images = torch.cat(images)232 if images.shape[1] == 1:233 images = images.repeat([1, 3, 1, 1])234 features = detector(images, **detector_kwargs)235 stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)236 progress.update(stats.num_items)237 return stats...
preprocessor.py
Source:preprocessor.py
1"""2Ziwei Zhu3Computer Science and Engineering Department, Texas A&M University4zhuziwei@tamu.edu5"""6import numpy as np7import pandas as pd8class ml1m:9 def __init__(self):10 return11 @staticmethod12 def train(n):13 train_df = pd.read_csv('./data/ml-1m/train_%d.csv' % n)14 vali_df = pd.read_csv('./data/ml-1m/vali_%d.csv' % n)15 num_users = np.max(train_df['userId'])16 num_items = np.max(train_df['movieId'])17 train_R = np.zeros((num_users, num_items)) # training rating matrix18 vali_R = np.zeros((num_users, num_items)) # validation rating matrix19 train_mat = train_df.values20 for i in range(len(train_df)):21 user_idx = int(train_mat[i, 0]) - 122 item_idx = int(train_mat[i, 1]) - 123 train_R[user_idx, item_idx] = 124 vali_mat = vali_df.values25 for i in range(len(vali_df)):26 user_idx = int(vali_mat[i, 0]) - 127 item_idx = int(vali_mat[i, 1]) - 128 vali_R[user_idx, item_idx] = 129 return train_R, vali_R30 @staticmethod31 def test():32 test_df = pd.read_csv('./data/ml-1m/test.csv')33 num_users = np.max(test_df['userId'])34 num_items = np.max(test_df['movieId'])35 test_R = np.zeros((num_users, num_items)) # testing rating matrix36 test_mat = test_df.values37 for i in range(len(test_df)):38 user_idx = int(test_mat[i, 0]) - 139 item_idx = int(test_mat[i, 1]) - 140 test_R[user_idx, item_idx] = 141 train_df = pd.read_csv('./data/ml-1m/train.csv')42 num_users = np.max(train_df['userId'])43 num_items = np.max(train_df['movieId'])44 train_R = np.zeros((num_users, num_items)) # testing rating matrix45 train_mat = train_df.values46 for i in range(len(train_df)):47 user_idx = int(train_mat[i, 0]) - 148 item_idx = int(train_mat[i, 1]) - 149 train_R[user_idx, item_idx] = 150 train_R[user_idx, item_idx] = 151 return train_R, test_R52class Pinterest:53 def __init__(self):54 return55 @staticmethod56 def train(n):57 train_df = pd.read_csv('./data/p/train_%d.csv' % n)58 vali_df = pd.read_csv('./data/p/vali_%d.csv' % n)59 num_users = np.max(train_df['userId'])60 num_items = np.max(train_df['movieId'])61 train_R = np.zeros((num_users, num_items)) # training rating matrix62 vali_R = np.zeros((num_users, num_items)) # validation rating matrix63 train_mat = train_df.values64 for i in range(len(train_df)):65 user_idx = int(train_mat[i, 0]) - 166 item_idx = int(train_mat[i, 1]) - 167 train_R[user_idx, item_idx] = 168 vali_mat = vali_df.values69 for i in range(len(vali_df)):70 user_idx = int(vali_mat[i, 0]) - 171 item_idx = int(vali_mat[i, 1]) - 172 vali_R[user_idx, item_idx] = 173 return train_R, vali_R74 @staticmethod75 def test():76 test_df = pd.read_csv('./data/p/test.csv')77 num_users = np.max(test_df['userId'])78 num_items = np.max(test_df['movieId'])79 test_R = np.zeros((num_users, num_items)) # testing rating matrix80 test_mat = test_df.values81 for i in range(len(test_df)):82 user_idx = int(test_mat[i, 0]) - 183 item_idx = int(test_mat[i, 1]) - 184 test_R[user_idx, item_idx] = 185 train_df = pd.read_csv('./data/p/train.csv')86 num_users = np.max(train_df['userId'])87 num_items = np.max(train_df['movieId'])88 train_R = np.zeros((num_users, num_items)) # testing rating matrix89 train_mat = train_df.values90 for i in range(len(train_df)):91 user_idx = int(train_mat[i, 0]) - 192 item_idx = int(train_mat[i, 1]) - 193 train_R[user_idx, item_idx] = 194 train_R[user_idx, item_idx] = 195 return train_R, test_R96class yelp:97 def __init__(self):98 return99 @staticmethod100 def train(n):101 train_df = pd.read_csv('./data/yelp/train_%d.csv' % n)102 vali_df = pd.read_csv('./data/yelp/vali_%d.csv' % n)103 num_users = np.max(train_df['userId'])104 num_items = np.max(train_df['itemId'])105 train_R = np.zeros((num_users, num_items)) # training rating matrix106 vali_R = np.zeros((num_users, num_items)) # validation rating matrix107 train_mat = train_df.values108 for i in range(len(train_df)):109 user_idx = int(train_mat[i, 0]) - 1110 item_idx = int(train_mat[i, 1]) - 1111 train_R[user_idx, item_idx] = 1112 vali_mat = vali_df.values113 for i in range(len(vali_df)):114 user_idx = int(vali_mat[i, 0]) - 1115 item_idx = int(vali_mat[i, 1]) - 1116 vali_R[user_idx, item_idx] = 1117 return train_R, vali_R118 @staticmethod119 def test():120 test_df = pd.read_csv('./data/yelp/test.csv')121 num_users = np.max(test_df['userId'])122 num_items = np.max(test_df['itemId'])123 test_R = np.zeros((num_users, num_items)) # testing rating matrix124 test_mat = test_df.values125 for i in range(len(test_df)):126 user_idx = int(test_mat[i, 0]) - 1127 item_idx = int(test_mat[i, 1]) - 1128 test_R[user_idx, item_idx] = 1129 train_df = pd.read_csv('./data/yelp/train.csv')130 num_users = np.max(train_df['userId'])131 num_items = np.max(train_df['itemId'])132 train_R = np.zeros((num_users, num_items)) # testing rating matrix133 train_mat = train_df.values134 for i in range(len(train_df)):135 user_idx = int(train_mat[i, 0]) - 1136 item_idx = int(train_mat[i, 1]) - 1137 train_R[user_idx, item_idx] = 1138 train_R[user_idx, item_idx] = 1...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!