Best Python code snippet using localstack_python
Stand_alone-checkpoint.py
Source:Stand_alone-checkpoint.py
...21from copy import deepcopy, copy22from config_profile import args23from Utils import cv2_scale36, cv2_scale, np_reshape, np_reshape6424print("Starting up...")25def wait_for_user():26 input("Press Enter to continue...")27print("Done!")28wait_for_user()29#-------------------------------------------------------------30print("Check GPU availability, using nvidia-smi")31os.environ["CUDA_VISIBLE_DEVICES"] = "1"32print("Done!")33wait_for_user()34#-------------------------------------------------------------35print("Define PyTorch dataset")36class TripletPhotoTour(dset.PhotoTour):37 """38 From the PhotoTour Dataset it generates triplet samples39 note: a triplet is composed by a pair of matching images and one of40 different class.41 """42 def __init__(self, train=True, transform=None, batch_size = None,load_random_triplets = False, *arg, **kw):43 super(TripletPhotoTour, self).__init__(*arg, **kw)44 self.transform = transform45 self.out_triplets = load_random_triplets46 self.train = train47 self.n_triplets = args.n_triplets48 self.batch_size = batch_size49 if self.train:50 print('Generating {} triplets'.format(self.n_triplets))51 self.triplets = self.generate_triplets(self.labels, self.n_triplets)52 @staticmethod53 def generate_triplets(labels, num_triplets):54 def create_indices(_labels):55 inds = dict()56 for idx, ind in enumerate(_labels):57 if ind not in inds:58 inds[ind] = []59 inds[ind].append(idx)60 return inds61 triplets = []62 indices = create_indices(labels.numpy())63 unique_labels = np.unique(labels.numpy())64 n_classes = unique_labels.shape[0]65 # add only unique indices in batch66 already_idxs = set()67 for x in tqdm(range(num_triplets)):68 if len(already_idxs) >= args.batch_size:69 already_idxs = set()70 c1 = np.random.randint(0, n_classes)71 while c1 in already_idxs:72 c1 = np.random.randint(0, n_classes)73 already_idxs.add(c1)74 c2 = np.random.randint(0, n_classes)75 while c1 == c2:76 c2 = np.random.randint(0, n_classes)77 if len(indices[c1]) == 2: # hack to speed up process78 n1, n2 = 0, 179 else:80 n1 = np.random.randint(0, len(indices[c1]))81 n2 = np.random.randint(0, len(indices[c1]))82 while n1 == n2:83 n2 = np.random.randint(0, len(indices[c1]))84 n3 = np.random.randint(0, len(indices[c2]))85 triplets.append([indices[c1][n1], indices[c1][n2], indices[c2][n3]])86 return torch.LongTensor(np.array(triplets))87 def __getitem__(self, index):88 def transform_img(img):89 if self.transform is not None:90 img = self.transform(img.numpy())91 return img92 if not self.train:93 m = self.matches[index]94 img1 = transform_img(self.data[m[0]])95 img2 = transform_img(self.data[m[1]])96 return img1, img2, m[2]97 t = self.triplets[index]98 a, p, n = self.data[t[0]], self.data[t[1]], self.data[t[2]]99 img_a = transform_img(a)100 img_p = transform_img(p)101 img_n = None102 if self.out_triplets:103 img_n = transform_img(n)104 # transform images if required105 if args.fliprot:106 do_flip = random.random() > 0.5107 do_rot = random.random() > 0.5108 if do_rot:109 img_a = img_a.permute(0,2,1)110 img_p = img_p.permute(0,2,1)111 if self.out_triplets:112 img_n = img_n.permute(0,2,1)113 if do_flip:114 img_a = torch.from_numpy(deepcopy(img_a.numpy()[:,:,::-1]))115 img_p = torch.from_numpy(deepcopy(img_p.numpy()[:,:,::-1]))116 if self.out_triplets:117 img_n = torch.from_numpy(deepcopy(img_n.numpy()[:,:,::-1]))118 if self.out_triplets:119 return (img_a, img_p, img_n)120 else:121 return (img_a, img_p)122 def __len__(self):123 if self.train:124 return self.triplets.size(0)125 else:126 return self.matches.size(0)127print("Done!")128wait_for_user()129#-------------------------------------------------------------130print("Define the dataloader")131def create_loaders(dataset_names, load_random_triplets = False):132 test_dataset_names = copy(dataset_names)133 test_dataset_names.remove(args.training_set)134 kwargs = {'num_workers': args.num_workers, 'pin_memory': args.pin_memory} if args.cuda else {}135 np_reshape64 = lambda x: np.reshape(x, (64, 64, 1))136 transform_test = transforms.Compose([137 transforms.Lambda(np_reshape64),138 transforms.ToPILImage(),139 transforms.Resize(32),140 transforms.ToTensor()])141 transform_train = transforms.Compose([142 transforms.Lambda(np_reshape64),143 transforms.ToPILImage(),144 transforms.RandomRotation(5,PIL.Image.BILINEAR),145 transforms.RandomResizedCrop(32, scale = (0.9,1.0),ratio = (0.9,1.1)),146 transforms.Resize(32),147 transforms.ToTensor()])148 transform = transforms.Compose([149 transforms.Lambda(cv2_scale),150 transforms.Lambda(np_reshape),151 transforms.ToTensor(),152 transforms.Normalize((args.mean_image,), (args.std_image,))])153 if not args.augmentation:154 transform_train = transform155 transform_test = transform156 train_loader = torch.utils.data.DataLoader(157 TripletPhotoTour(train=True,158 load_random_triplets = load_random_triplets,159 batch_size=args.batch_size,160 root=args.dataroot,161 name=args.training_set,162 download=True,163 transform=transform_train),164 batch_size=args.batch_size,165 shuffle=False, **kwargs)166 test_loaders = [{'name': name,167 'dataloader': torch.utils.data.DataLoader(168 TripletPhotoTour(train=False,169 batch_size=args.test_batch_size,170 root=args.dataroot,171 name=name,172 download=True,173 transform=transform_test),174 batch_size=args.test_batch_size,175 shuffle=False, **kwargs)}176 for name in test_dataset_names]177 return train_loader, test_loaders178print("Done!")179wait_for_user()180#-------------------------------------------------------------181print("Load Data")182dataset_names = ['liberty', 'notredame', 'yosemite']183train_loader, test_loaders = create_loaders(dataset_names, load_random_triplets = args.load_random_triplets)184print("Done!")185wait_for_user()186#-------------------------------------------------------------187print("Visualizaiton of the Training and Testing Data")188nrow = 3 189def plot_examples(img_tensor, nrow): 190 fig, axs = plt.subplots(1, nrow)191 for i, ax in enumerate(axs):192 img = img_tensor[i, 0]193 ax.imshow(img, cmap='gray')194 ax.axis('off')195for i_batch, sample_batched in enumerate(train_loader):196 print("IN TRAINing, each data entry has {} elements, each with size of: ".format(len(sample_batched)))197 print(sample_batched[0].shape)198 print("Below two rows images are {} examples for patch_a and patch_p".format(nrow))199 if i_batch == 0:200 plot_examples(sample_batched[0], nrow)201 plot_examples(sample_batched[1], nrow)202 plt.show()203 break204print("Done!")205wait_for_user()206#-------------------------------------------------------------207print("Testing")208for i_batch, sample_batched in enumerate(test_loaders[0]['dataloader']):209 print("IN TESTING, each data entry has {} elements, with size of: {}, {}, and {}".format(len(sample_batched), 210 sample_batched[0].shape, 211 sample_batched[1].shape, 212 sample_batched[2].shape))213 print("\nBelow two rows images are {} examples for for patch_a and patch_p.".format(nrow))214 if i_batch == 0:215 plot_examples(sample_batched[0], nrow)216 plot_examples(sample_batched[1], nrow)217 print("labels are :", sample_batched[2][:nrow])218 plt.show()219 break220print("Done!")221wait_for_user()222#-------------------------------------------------------------223print("Build Network Model")224# load network225from descriptor import DesNet226model = DesNet()227if args.cuda:228 model.cuda()229print("Done!")230wait_for_user()231#-------------------------------------------------------------232print("Define optimize")233# define optimizer234def create_optimizer(model, new_lr):235 # setup optimizer236 if args.optimizer == 'sgd':237 optimizer = optim.SGD(model.parameters(), lr=new_lr,238 momentum=0.9, dampening=0.9,239 weight_decay=args.wd)240 elif args.optimizer == 'adam':241 optimizer = optim.Adam(model.parameters(), lr=new_lr,242 weight_decay=args.wd)243 else:244 raise Exception('Not supported optimizer: {0}'.format(args.optimizer))245 return optimizer246optimizer1 = create_optimizer(model.features, args.lr)247print("Done!")248wait_for_user()249#-------------------------------------------------------------250print("Define a training module")251def train(train_loader, model, optimizer, epoch, logger, load_triplets = False):252 # switch to train mode253 model.train()254 pbar = tqdm(enumerate(train_loader))255 for batch_idx, data in pbar:256 if load_triplets:257 data_a, data_p, data_n = data258 else:259 data_a, data_p = data260 if args.cuda:261 data_a, data_p = data_a.cuda(), data_p.cuda()262 data_a, data_p = Variable(data_a), Variable(data_p)263 out_a = model(data_a)264 out_p = model(data_p)265 if load_triplets:266 data_n = data_n.cuda()267 data_n = Variable(data_n)268 out_n = model(data_n)269 270 271 loss = loss_DesNet(out_a, out_p,272 margin=args.margin,273 anchor_swap=args.anchorswap,274 anchor_ave=args.anchorave,275 batch_reduce = args.batch_reduce,276 loss_type = args.loss)277 if args.decor:278 loss += CorrelationPenaltyLoss()(out_a)279 280 if args.gor:281 loss += args.alpha*global_orthogonal_regularization(out_a, out_n)282 283 optimizer.zero_grad()284 loss.backward()285 optimizer.step()286 adjust_learning_rate(optimizer)287 if batch_idx % args.log_interval == 0:288 pbar.set_description(289 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(290 epoch, batch_idx * len(data_a), len(train_loader.dataset),291 100. * batch_idx / len(train_loader),292 loss.item()))293 if (args.enable_logging):294# logger.log_value('loss', loss.data[0]).step()295 logger.log_value('loss', loss.item()).step()296 try:297 os.stat('{}{}'.format(args.model_dir,suffix))298 except:299 os.makedirs('{}{}'.format(args.model_dir,suffix))300 torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict()},301 '{}{}/checkpoint_{}.pth'.format(args.model_dir,suffix,epoch))302 303 304def adjust_learning_rate(optimizer):305 """Updates the learning rate given the learning rate decay.306 The routine has been implemented according to the original Lua SGD optimizer307 """308 for group in optimizer.param_groups:309 if 'step' not in group:310 group['step'] = 0.311 else:312 group['step'] += 1.313 group['lr'] = args.lr * (314 1.0 - float(group['step']) * float(args.batch_size) / (args.n_triplets * float(args.epochs)))315 return316print("Done!")317wait_for_user()318#-------------------------------------------------------------319print("Define a test module")320def test(test_loader, model, epoch, logger, logger_test_name):321 # switch to evaluate mode322 model.eval()323 labels, distances = [], []324 pbar = tqdm(enumerate(test_loader))325 for batch_idx, (data_a, data_p, label) in pbar:326 # data_a.shape= torch.Size([1024, 1, 32, 32]) 327 # data_p.shape =torch.Size([1024, 1, 32, 32]) 328 # label.shape = torch.Size([1024])329 if args.cuda:330 data_a, data_p = data_a.cuda(), data_p.cuda()331 data_a, data_p, label = Variable(data_a, volatile=True), \332 Variable(data_p, volatile=True), Variable(label)333 out_a = model(data_a)334 out_p = model(data_p)335 dists = torch.sqrt(torch.sum((out_a - out_p) ** 2, 1)) # euclidean distance336 distances.append(dists.data.cpu().numpy().reshape(-1,1))337 ll = label.data.cpu().numpy().reshape(-1, 1)338 labels.append(ll)339 if batch_idx % args.log_interval == 0:340 pbar.set_description(logger_test_name+' Test Epoch: {} [{}/{} ({:.0f}%)]'.format(341 epoch, batch_idx * len(data_a), len(test_loader.dataset),342 100. * batch_idx / len(test_loader)))343 num_tests = test_loader.dataset.matches.size(0)344 labels = np.vstack(labels).reshape(num_tests)345 distances = np.vstack (distances).reshape(num_tests)346 fpr95 = ErrorRateAt95Recall(labels, 1.0 / (distances + 1e-8))347 print('\33[91mTest set: Accuracy(FPR95): {:.8f}\n\33[0m'.format(fpr95))348 if (args.enable_logging):349 logger.log_value(logger_test_name+' fpr95', fpr95)350 return351def ErrorRateAt95Recall(labels, scores):352 distances = 1.0 / (scores + 1e-8)353 recall_point = 0.95354 labels = labels[np.argsort(distances)]355 # Sliding threshold: get first index where recall >= recall_point. 356 # This is the index where the number of elements with label==1 below the threshold reaches a fraction of 357 # 'recall_point' of the total number of elements with label==1. 358 # (np.argmax returns the first occurrence of a '1' in a bool array). 359 threshold_index = np.argmax(np.cumsum(labels) >= recall_point * np.sum(labels)) 360 FP = np.sum(labels[:threshold_index] == 0) # Below threshold (i.e., labelled positive), but should be negative361 TN = np.sum(labels[threshold_index:] == 0) # Above threshold (i.e., labelled negative), and should be negative362 return float(FP) / float(FP + TN)363print("Done!")364wait_for_user()365#-------------------------------------------------------------366print("Training")367start = args.start_epoch368end = start + args.epochs369logger, file_logger = None, None370triplet_flag = args.load_random_triplets371from Losses import loss_DesNet372TEST_ON_W1BS = True373LOG_DIR = args.log_dir374if(args.enable_logging):375 from Loggers import Logger, FileLogger376 logger = Logger(LOG_DIR)377 378suffix = '{}_{}_{}'.format(args.experiment_name, args.training_set, args.batch_reduce)379if args.gor:380 suffix = suffix+'_gor_alpha{:1.1f}'.format(args.alpha)381if args.anchorswap:382 suffix = suffix + '_as'383if args.anchorave:384 suffix = suffix + '_av'385if args.fliprot:386 suffix = suffix + '_fliprot'387res_fpr_liberty = torch.zeros(end-start,1)388res_fpr_notredame = torch.zeros(end-start, 1)389res_fpr_yosemite = torch.zeros(end-start, 1)390for epoch in range(start, end):391 # iterate over test loaders and test results392 train(train_loader, model, optimizer1, epoch, logger, triplet_flag)393 for test_loader in test_loaders:394 test(test_loader['dataloader'], model, epoch, logger, test_loader['name'])395 #randomize train loader batches396 train_loader, test_loaders2 = create_loaders(dataset_names, load_random_triplets=triplet_flag)397print("Done!")398print("Finished.")399wait_for_user()...
Stand_alone.py
Source:Stand_alone.py
...21from copy import deepcopy, copy22from config_profile import args23from Utils import cv2_scale36, cv2_scale, np_reshape, np_reshape6424print("Starting up...")25def wait_for_user():26 input("Press Enter to continue...")27print("Done!")28wait_for_user()29#-------------------------------------------------------------30print("Check GPU availability, using nvidia-smi")31os.environ["CUDA_VISIBLE_DEVICES"] = "1"32print("Done!")33wait_for_user()34#-------------------------------------------------------------35print("Define PyTorch dataset")36class TripletPhotoTour(dset.PhotoTour):37 """38 From the PhotoTour Dataset it generates triplet samples39 note: a triplet is composed by a pair of matching images and one of40 different class.41 """42 def __init__(self, train=True, transform=None, batch_size = None,load_random_triplets = False, *arg, **kw):43 super(TripletPhotoTour, self).__init__(*arg, **kw)44 self.transform = transform45 self.out_triplets = load_random_triplets46 self.train = train47 self.n_triplets = args.n_triplets48 self.batch_size = batch_size49 if self.train:50 print('Generating {} triplets'.format(self.n_triplets))51 self.triplets = self.generate_triplets(self.labels, self.n_triplets)52 @staticmethod53 def generate_triplets(labels, num_triplets):54 def create_indices(_labels):55 inds = dict()56 for idx, ind in enumerate(_labels):57 if ind not in inds:58 inds[ind] = []59 inds[ind].append(idx)60 return inds61 triplets = []62 indices = create_indices(labels.numpy())63 unique_labels = np.unique(labels.numpy())64 n_classes = unique_labels.shape[0]65 # add only unique indices in batch66 already_idxs = set()67 for x in tqdm(range(num_triplets)):68 if len(already_idxs) >= args.batch_size:69 already_idxs = set()70 c1 = np.random.randint(0, n_classes)71 while c1 in already_idxs:72 c1 = np.random.randint(0, n_classes)73 already_idxs.add(c1)74 c2 = np.random.randint(0, n_classes)75 while c1 == c2:76 c2 = np.random.randint(0, n_classes)77 if len(indices[c1]) == 2: # hack to speed up process78 n1, n2 = 0, 179 else:80 n1 = np.random.randint(0, len(indices[c1]))81 n2 = np.random.randint(0, len(indices[c1]))82 while n1 == n2:83 n2 = np.random.randint(0, len(indices[c1]))84 n3 = np.random.randint(0, len(indices[c2]))85 triplets.append([indices[c1][n1], indices[c1][n2], indices[c2][n3]])86 return torch.LongTensor(np.array(triplets))87 def __getitem__(self, index):88 def transform_img(img):89 if self.transform is not None:90 img = self.transform(img.numpy())91 return img92 if not self.train:93 m = self.matches[index]94 img1 = transform_img(self.data[m[0]])95 img2 = transform_img(self.data[m[1]])96 return img1, img2, m[2]97 t = self.triplets[index]98 a, p, n = self.data[t[0]], self.data[t[1]], self.data[t[2]]99 img_a = transform_img(a)100 img_p = transform_img(p)101 img_n = None102 if self.out_triplets:103 img_n = transform_img(n)104 # transform images if required105 if args.fliprot:106 do_flip = random.random() > 0.5107 do_rot = random.random() > 0.5108 if do_rot:109 img_a = img_a.permute(0,2,1)110 img_p = img_p.permute(0,2,1)111 if self.out_triplets:112 img_n = img_n.permute(0,2,1)113 if do_flip:114 img_a = torch.from_numpy(deepcopy(img_a.numpy()[:,:,::-1]))115 img_p = torch.from_numpy(deepcopy(img_p.numpy()[:,:,::-1]))116 if self.out_triplets:117 img_n = torch.from_numpy(deepcopy(img_n.numpy()[:,:,::-1]))118 if self.out_triplets:119 return (img_a, img_p, img_n)120 else:121 return (img_a, img_p)122 def __len__(self):123 if self.train:124 return self.triplets.size(0)125 else:126 return self.matches.size(0)127print("Done!")128wait_for_user()129#-------------------------------------------------------------130print("Define the dataloader")131def create_loaders(dataset_names, load_random_triplets = False):132 test_dataset_names = copy(dataset_names)133 test_dataset_names.remove(args.training_set)134 kwargs = {'num_workers': args.num_workers, 'pin_memory': args.pin_memory} if args.cuda else {}135 np_reshape64 = lambda x: np.reshape(x, (64, 64, 1))136 transform_test = transforms.Compose([137 transforms.Lambda(np_reshape64),138 transforms.ToPILImage(),139 transforms.Resize(32),140 transforms.ToTensor()])141 transform_train = transforms.Compose([142 transforms.Lambda(np_reshape64),143 transforms.ToPILImage(),144 transforms.RandomRotation(5,PIL.Image.BILINEAR),145 transforms.RandomResizedCrop(32, scale = (0.9,1.0),ratio = (0.9,1.1)),146 transforms.Resize(32),147 transforms.ToTensor()])148 transform = transforms.Compose([149 transforms.Lambda(cv2_scale),150 transforms.Lambda(np_reshape),151 transforms.ToTensor(),152 transforms.Normalize((args.mean_image,), (args.std_image,))])153 if not args.augmentation:154 transform_train = transform155 transform_test = transform156 train_loader = torch.utils.data.DataLoader(157 TripletPhotoTour(train=True,158 load_random_triplets = load_random_triplets,159 batch_size=args.batch_size,160 root=args.dataroot,161 name=args.training_set,162 download=True,163 transform=transform_train),164 batch_size=args.batch_size,165 shuffle=False, **kwargs)166 test_loaders = [{'name': name,167 'dataloader': torch.utils.data.DataLoader(168 TripletPhotoTour(train=False,169 batch_size=args.test_batch_size,170 root=args.dataroot,171 name=name,172 download=True,173 transform=transform_test),174 batch_size=args.test_batch_size,175 shuffle=False, **kwargs)}176 for name in test_dataset_names]177 return train_loader, test_loaders178print("Done!")179wait_for_user()180#-------------------------------------------------------------181print("Load Data")182dataset_names = ['liberty', 'notredame', 'yosemite']183train_loader, test_loaders = create_loaders(dataset_names, load_random_triplets = args.load_random_triplets)184print("Done!")185wait_for_user()186#-------------------------------------------------------------187print("Visualizaiton of the Training and Testing Data")188nrow = 3 189def plot_examples(img_tensor, nrow): 190 fig, axs = plt.subplots(1, nrow)191 for i, ax in enumerate(axs):192 img = img_tensor[i, 0]193 ax.imshow(img, cmap='gray')194 ax.axis('off')195for i_batch, sample_batched in enumerate(train_loader):196 print("IN TRAINing, each data entry has {} elements, each with size of: ".format(len(sample_batched)))197 print(sample_batched[0].shape)198 print("Below two rows images are {} examples for patch_a and patch_p".format(nrow))199 if i_batch == 0:200 plot_examples(sample_batched[0], nrow)201 plot_examples(sample_batched[1], nrow)202 plt.show()203 break204print("Done!")205wait_for_user()206#-------------------------------------------------------------207print("Testing")208for i_batch, sample_batched in enumerate(test_loaders[0]['dataloader']):209 print("IN TESTING, each data entry has {} elements, with size of: {}, {}, and {}".format(len(sample_batched), 210 sample_batched[0].shape, 211 sample_batched[1].shape, 212 sample_batched[2].shape))213 print("\nBelow two rows images are {} examples for for patch_a and patch_p.".format(nrow))214 if i_batch == 0:215 plot_examples(sample_batched[0], nrow)216 plot_examples(sample_batched[1], nrow)217 print("labels are :", sample_batched[2][:nrow])218 plt.show()219 break220print("Done!")221wait_for_user()222#-------------------------------------------------------------223print("Build Network Model")224# load network225from descriptor import DesNet226model = DesNet()227if args.cuda:228 model.cuda()229print("Done!")230wait_for_user()231#-------------------------------------------------------------232print("Define optimize")233# define optimizer234def create_optimizer(model, new_lr):235 # setup optimizer236 if args.optimizer == 'sgd':237 optimizer = optim.SGD(model.parameters(), lr=new_lr,238 momentum=0.9, dampening=0.9,239 weight_decay=args.wd)240 elif args.optimizer == 'adam':241 optimizer = optim.Adam(model.parameters(), lr=new_lr,242 weight_decay=args.wd)243 else:244 raise Exception('Not supported optimizer: {0}'.format(args.optimizer))245 return optimizer246optimizer1 = create_optimizer(model.features, args.lr)247print("Done!")248wait_for_user()249#-------------------------------------------------------------250print("Define a training module")251def train(train_loader, model, optimizer, epoch, logger, load_triplets = False):252 # switch to train mode253 model.train()254 pbar = tqdm(enumerate(train_loader))255 for batch_idx, data in pbar:256 if load_triplets:257 data_a, data_p, data_n = data258 else:259 data_a, data_p = data260 if args.cuda:261 data_a, data_p = data_a.cuda(), data_p.cuda()262 data_a, data_p = Variable(data_a), Variable(data_p)263 out_a = model(data_a)264 out_p = model(data_p)265 if load_triplets:266 data_n = data_n.cuda()267 data_n = Variable(data_n)268 out_n = model(data_n)269 270 271 loss = loss_DesNet(out_a, out_p,272 margin=args.margin,273 anchor_swap=args.anchorswap,274 anchor_ave=args.anchorave,275 batch_reduce = args.batch_reduce,276 loss_type = args.loss)277 if args.decor:278 loss += CorrelationPenaltyLoss()(out_a)279 280 if args.gor:281 loss += args.alpha*global_orthogonal_regularization(out_a, out_n)282 283 optimizer.zero_grad()284 loss.backward()285 optimizer.step()286 adjust_learning_rate(optimizer)287 if batch_idx % args.log_interval == 0:288 pbar.set_description(289 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(290 epoch, batch_idx * len(data_a), len(train_loader.dataset),291 100. * batch_idx / len(train_loader),292 loss.item()))293 if (args.enable_logging):294# logger.log_value('loss', loss.data[0]).step()295 logger.log_value('loss', loss.item()).step()296 try:297 os.stat('{}{}'.format(args.model_dir,suffix))298 except:299 os.makedirs('{}{}'.format(args.model_dir,suffix))300 torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict()},301 '{}{}/checkpoint_{}.pth'.format(args.model_dir,suffix,epoch))302 303 304def adjust_learning_rate(optimizer):305 """Updates the learning rate given the learning rate decay.306 The routine has been implemented according to the original Lua SGD optimizer307 """308 for group in optimizer.param_groups:309 if 'step' not in group:310 group['step'] = 0.311 else:312 group['step'] += 1.313 group['lr'] = args.lr * (314 1.0 - float(group['step']) * float(args.batch_size) / (args.n_triplets * float(args.epochs)))315 return316print("Done!")317wait_for_user()318#-------------------------------------------------------------319print("Define a test module")320def test(test_loader, model, epoch, logger, logger_test_name):321 # switch to evaluate mode322 model.eval()323 labels, distances = [], []324 pbar = tqdm(enumerate(test_loader))325 for batch_idx, (data_a, data_p, label) in pbar:326 # data_a.shape= torch.Size([1024, 1, 32, 32]) 327 # data_p.shape =torch.Size([1024, 1, 32, 32]) 328 # label.shape = torch.Size([1024])329 if args.cuda:330 data_a, data_p = data_a.cuda(), data_p.cuda()331 data_a, data_p, label = Variable(data_a, volatile=True), \332 Variable(data_p, volatile=True), Variable(label)333 out_a = model(data_a)334 out_p = model(data_p)335 dists = torch.sqrt(torch.sum((out_a - out_p) ** 2, 1)) # euclidean distance336 distances.append(dists.data.cpu().numpy().reshape(-1,1))337 ll = label.data.cpu().numpy().reshape(-1, 1)338 labels.append(ll)339 if batch_idx % args.log_interval == 0:340 pbar.set_description(logger_test_name+' Test Epoch: {} [{}/{} ({:.0f}%)]'.format(341 epoch, batch_idx * len(data_a), len(test_loader.dataset),342 100. * batch_idx / len(test_loader)))343 num_tests = test_loader.dataset.matches.size(0)344 labels = np.vstack(labels).reshape(num_tests)345 distances = np.vstack (distances).reshape(num_tests)346 fpr95 = ErrorRateAt95Recall(labels, 1.0 / (distances + 1e-8))347 print('\33[91mTest set: Accuracy(FPR95): {:.8f}\n\33[0m'.format(fpr95))348 if (args.enable_logging):349 logger.log_value(logger_test_name+' fpr95', fpr95)350 return351def ErrorRateAt95Recall(labels, scores):352 distances = 1.0 / (scores + 1e-8)353 recall_point = 0.95354 labels = labels[np.argsort(distances)]355 # Sliding threshold: get first index where recall >= recall_point. 356 # This is the index where the number of elements with label==1 below the threshold reaches a fraction of 357 # 'recall_point' of the total number of elements with label==1. 358 # (np.argmax returns the first occurrence of a '1' in a bool array). 359 threshold_index = np.argmax(np.cumsum(labels) >= recall_point * np.sum(labels)) 360 FP = np.sum(labels[:threshold_index] == 0) # Below threshold (i.e., labelled positive), but should be negative361 TN = np.sum(labels[threshold_index:] == 0) # Above threshold (i.e., labelled negative), and should be negative362 return float(FP) / float(FP + TN)363print("Done!")364wait_for_user()365#-------------------------------------------------------------366print("Training")367start = args.start_epoch368end = start + args.epochs369logger, file_logger = None, None370triplet_flag = args.load_random_triplets371from Losses import loss_DesNet372TEST_ON_W1BS = True373LOG_DIR = args.log_dir374if(args.enable_logging):375 from Loggers import Logger, FileLogger376 logger = Logger(LOG_DIR)377 378suffix = '{}_{}_{}'.format(args.experiment_name, args.training_set, args.batch_reduce)379if args.gor:380 suffix = suffix+'_gor_alpha{:1.1f}'.format(args.alpha)381if args.anchorswap:382 suffix = suffix + '_as'383if args.anchorave:384 suffix = suffix + '_av'385if args.fliprot:386 suffix = suffix + '_fliprot'387res_fpr_liberty = torch.zeros(end-start,1)388res_fpr_notredame = torch.zeros(end-start, 1)389res_fpr_yosemite = torch.zeros(end-start, 1)390for epoch in range(start, end):391 # iterate over test loaders and test results392 train(train_loader, model, optimizer1, epoch, logger, triplet_flag)393 for test_loader in test_loaders:394 test(test_loader['dataloader'], model, epoch, logger, test_loader['name'])395 #randomize train loader batches396 train_loader, test_loaders2 = create_loaders(dataset_names, load_random_triplets=triplet_flag)397print("Done!")398print("Finished.")399wait_for_user()...
view.py
Source:view.py
1#!/usr/bin/env python32import os3import getpass4def wait_for_user():5 print('\n\n\nPress ENTER to continue...')6 input()7def display_header():8 os.system('clear')9 # print('***********************')10 # print('** **')11 # print('* Terminal Trader *')12 # print('** **')13 # print('***********************\n')14 print(80 * '*')15 print('{0:^80}'.format('TERMINAL TRADER'))16 print(80 * '*')17 print('\n\n')18def display_error(error):19 display_header()20 print('\n\n\nAn ERROR ocurred:')21 print('\n{0}'.format('Invalid input'))22 print('\n{0}'.format('Details:'))23 print('\n{0}'.format(error))24 wait_for_user()25def display_success():26 display_header()27 print('\n\n\nOperation executed successfully.')28 wait_for_user()29def display_failure():30 display_header()31 print('\n\n\nUnable to execute operation due to invalid input or business constraints.')32 wait_for_user()33def display_invalid_menu_option():34 display_header()35 print('\n\n\nInvalid option. Try again.')36 wait_for_user()37def display_invalid_login():38 display_header()39 print('\n\n\nInvalid login.')40 wait_for_user()41def display_insufficient_funds(balance):42 display_header()43 print('\n\n\nInsufficient funds. Your balance is {0:.2f}.'.format(balance))44 wait_for_user()45def display_insufficient_holdings(ticker_symbol, holding_volume):46 display_header()47 msg = '\n\n\nInsufficient holdings.\nYour holding volume for "{0}" is "{1:.2f}".'48 print(msg.format(ticker_symbol, holding_volume))49 wait_for_user()50def display_user_balance(username, balance):51 display_header()52 print('\nUser: {0}'.format(username))53 print('Balance: {0:.2f}'.format(balance))54 wait_for_user()55def display_last_price(price, wait):56 # display_header()57 print('Quote: {0:.2f}'.format(price))58 if wait:59 wait_for_user()60def display_lookup(ticker_symbol, wait):61 # display_header()62 print('\nSymbol: {0}'.format(ticker_symbol))63 if wait:64 wait_for_user()65def display_users(users):66 display_header()67 if users != None:68 # Prints column headers69 pattern = '{0:<6} | {1:<15} | {2:^9} | {3:>15}'70 print(pattern.format('Id', 'Username', 'Profile', 'Balance'))71 # Prints column values72 pattern = '{0:06d} | {1:<15} | {2:^9} | {3:>15}'73 for user in users:74 print(pattern.format(user['pk'],75 user['username'],76 'Admin' if user['profile'] == 'A' else 'User',77 '{0:.2f}'.format(user['cur_balance'])))78 else:79 print('No user records available.')80 wait_for_user()81def display_order_history(username, orders):82 display_header()83 if orders != None:84 # Prints column headers85 pattern = '{0:<20} | {1:^6} | {2:^4} | {3:>5} | {4:>10} | {5:>10} | {6:>12}'86 print(pattern.format('Date', 'Symbol', 'Type', 'Fee',87 'Unit Price', 'Volume', 'Trade Value*'))88 # Prints column values89 pattern = '{0:<20} | {1:^6} | {2:^4} | {3:>5} | {4:>10} | {5:>10} | {6:>12}'90 for order in orders:91 trade_value = order['volume'] * order['unit_price']92 print(pattern.format(order['date_time'].strftime('%Y/%m/%d %H:%M:%S'),93 order['ticker_symbol'],94 'Buy' if order['order_type'] == 'B' else 'Sell',95 '{0:.2f}'.format(order['fee']),96 '{0:.2f}'.format(order['unit_price']),97 '{0:.2f}'.format(order['volume']),98 '{0:.2f}'.format(trade_value)99 ))100 print('\n\nTrade Value*: =(Unit Price x Volume). No fees included.')101 else:102 print('No order records available for "{0}".'.format(username))103 wait_for_user()104def display_account_summary(pl, wait, header):105 if header:106 display_header()107 print('Check out your account summary!')108 cur_balance = pl['cur_balance']109 hold_buy_value = pl['buy_hold_value']110 account_real_value = pl['account_real_value']111 hold_mkt_value = pl['mkt_hold_value']112 account_mkt_value = pl['account_mkt_value']113 print('\n\n{0:^80}'.format('Summary'))114 print(80 * '-')115 print('Initial Balance: {0:.2f}'.format(pl['initial_balance']))116 print('Current Balance: {0:.2f}\n'.format(cur_balance))117 print('Holdings (Buy Price): {0:.2f}'.format(118 hold_buy_value))119 print('Realized Account Value: {0:.2f}'.format(120 account_real_value))121 label = 'Realized P/L | (%): {0:.2f} ({1:.2f}%)\n'122 print(label.format(pl['real_pl_value'], pl['real_pl_percent']))123 print('Holdings (Market Price): {0:.2f}'.format(124 hold_mkt_value))125 print('Unrealized Account Value: {0:.2f}'.format(126 account_mkt_value))127 label = 'Unrealized P/L | (%): {0:.2f} ({1:.2f}%)'128 print(label.format(pl['unreal_pl_value'], pl['unreal_pl_percent']))129 if wait:130 wait_for_user()131def display_user_dashboard(username, pl):132 display_header()133 print(80 * '-')134 title = 'Dashboard - User "{0}"'.format(username)135 print('{0:^80}'.format(title))136 print(80 * '-')137 if pl != None:138 display_account_summary(pl, False, False)139 holdings = pl['holdings']140 if holdings != None:141 # Prints section title142 print('\n\n{0:^80}'.format('Holdings'))143 print(80 * '-')144 # Prints column headers145 pattern = '{0:^6} | {1:>6} | {2:>10} | {3:>10} | {4:>10} | {5:>10} | {6:>10}'146 print(pattern.format('Symbol', 'Volume', 'Buy Price*',147 'Mkt Price*', 'Buy Total', 'Mkt Total', 'Difference'))148 # Prints column values149 pattern = '{0:^6} | {1:>6} | {2:>10} | {3:>10} | {4:>10} | {5:>10} | {6:>10}'150 for item in holdings:151 print(pattern.format(item['ticker_symbol'],152 '{0:.2f}'.format(item['volume']),153 '{0:.2f}'.format(item['avg_buy_price']),154 '{0:.2f}'.format(item['mkt_price']),155 '{0:.2f}'.format(item['total_buy_price']),156 '{0:.2f}'.format(item['total_mkt_price']),157 '{0:.2f}'.format(item['difference'])158 ))159 print('\n\n')160 print(80 * '-')161 print('Buy Price*: Average buy prices. Fees included.')162 print('Mkt. Price*: Fees included.')163 else:164 print('No holding records available for "{0}".'.format(username))165 else:166 print('No P/L data available for "{0}".'.format(username))167 wait_for_user()168def display_leaderboard(user_accounts):169 # TODO Refactor - this is UGLY170 display_header()171 print(80 * '-')172 print('{0:^80}'.format('Leaderboards'))173 print(80 * '-')174 if len(user_accounts) > 0:175 pattern = '{0:>2} | {1:<10} | {2:>12} | {3:>13} | {4:>10} | {5:>10}'176 realized_pl = sorted(177 user_accounts, key=lambda k: k['real_pl_value'], reverse=True)178 count = 10 if len(realized_pl) > 10 else len(realized_pl)179 # Prints section title180 print('\n\n{0:^80}'.format('Realized Profit / Loss'))181 print(80 * '-')182 print(pattern.format('#', 'Username', 'Cur. Balance', 'Account Value',183 'P/L Value', 'P/L %'))184 # Prints columns185 for i in range(count):186 item = realized_pl[i]187 print(pattern.format(str(i + 1),188 item['username'],189 '{0:.2f}'.format(item['cur_balance']),190 '{0:.2f}'.format(item['account_real_value']),191 '{0:.2f}'.format(item['real_pl_value']),192 '{0:.2f}'.format(item['real_pl_percent'])193 ))194 unreal_pl = sorted(195 user_accounts, key=lambda k: k['unreal_pl_value'], reverse=True)196 count = 10 if len(unreal_pl) > 10 else len(unreal_pl)197 # Prints section title198 print('\n\n{0:^80}'.format('Unrealized Profit / Loss'))199 print(80 * '-')200 print(pattern.format('#', 'Username', 'Cur. Balance', 'Account Value',201 'P/L Value', 'P/L %'))202 # Prints columns203 for i in range(count):204 item = unreal_pl[i]205 print(pattern.format(str(i + 1),206 item['username'],207 '{0:.2f}'.format(item['cur_balance']),208 '{0:.2f}'.format(item['account_mkt_value']),209 '{0:.2f}'.format(item['unreal_pl_value']),210 '{0:.2f}'.format(item['unreal_pl_percent'])211 ))212 else:213 print('No user accounts available.')214 wait_for_user()215def main_global_menu():216 display_header()217 print('\n\n\n1 - Login')218 # print('2 - Create user')219 print('0 - Exit')220 return input('\n\n\nType your choice: ')221def main_admin_menu(username):222 display_header()223 print('Hello, {0}!'.format(username))224 print('\n\nChoose an option:')225 print('1 - Create user account')226 print('2 - List user accounts')227 print('3 - Delete user account')228 print('4 - Leaderboard')229 print('0 - Exit')230 return input('\n\nType your option: ')231def main_user_menu(username, balance):232 display_header()233 print('Hello, {0}!'.format(username))234 print('Your balance is, {0:.2f}!'.format(balance))235 print('\n\nChoose an option:')236 print('b|buy - Buy Stock')237 print('s|sell - Sell Stock')238 print('l|lookup - Lookup Stock Symbol')239 print('q|quote - Stock Quote')240 print('a|balance - Balance')241 print('o|orders - Order History')242 print('d|dashboard - Dashboard (Summary + Holding Details)')243 print('e|exit - Exit')244 return input('\n\nType your option: ')245def login_menu():246 display_header()247 username = input('Login: ')248 pwd = getpass.getpass('Password: ')249 return username, pwd250def delete_user_menu():251 display_header()252 username = input('Username to be deleted: ')253 print('\nDeleting this user will also delete its orders and holdings history.')254 print('\nAre you sure you want to continue?')255 print('Type "yes" to delete the user or anything else to cancel.')256 if input('Confirm? ').lower() != 'yes':257 username = ''258 return username259def buy_menu_ticker_symbol():260 display_header()261 ticker_symbol = input('Ticker Symbol: ')262 return ticker_symbol263def buy_menu_volume_confirmation():264 print('\nAlert: Stock quotes are subject to change.')265 print('By the time you confirm this operation, the updated market quote will be used to buy your stock.')266 trade_volume = input('\nTrade Volume: ')267 return trade_volume268def sell_menu():269 display_header()270 ticker_symbol = input('Ticker Symbol: ')271 trade_volume = input('Trade Volume: ')272 return ticker_symbol, trade_volume273def lookup_menu():274 display_header()275 company_name = input('Company Name: ')276 return company_name277def quote_menu():278 display_header()279 ticker_symbol = input('Ticker Symbol: ')280 return ticker_symbol281def exit_message():282 display_header()283 print('\n\n\nThanks for using Terminal Trader!')284 wait_for_user()285# def display_user_dashboard(username, holdings, pl):286# display_header()287# cur_balance = 0288# holdings_total = 0289# print(50 * '-')290# title = 'Dashboard - User "{0}"'.format(username)291# print('{0:^50}'.format(title))292# print(50 * '-')293# if pl != None:294# # Prints section title295# print('\n\n{0:^50}'.format('Balance and Profit/Loss (P/L)'))296# print(50 * '-')297# # Prints balance and realized PL.298# print('Initial Balance: {0:.2f}'.format(pl['initial_balance']))299# print('Current Balance: {0:.2f}'.format(pl['cur_balance']))300# print('Realized P/L: {0:.2f}'.format(pl['pl_value']))301# print('Realized P/L (%): {0:.2f}'.format(pl['pl_percent']))302# cur_balance = pl['cur_balance']303# else:304# print('No balance available for "{0}".'.format(username))305# if holdings != None:306# # Prints section title307# print('\n\n{0:^50}'.format('Holdings'))308# print(50 * '-')309# # Prints column headers310# pattern = '{0:^6} | {1:>11} | {2:>10} | {3:>12}'311# print(pattern.format('Symbol', 'Avg. Price*', 'Volume', 'Total'))312# # Prints column values313# pattern = '{0:^6} | {1:>11} | {2:>10} | {3:>12}'314# for item in holdings:315# total = item['volume'] * item['average_price']316# print(pattern.format(item['ticker_symbol'],317# '{0:.2f}'.format(item['average_price']),318# '{0:.2f}'.format(item['volume']),319# '{0:.2f}'.format(total)320# ))321# holdings_total += total322# account_value = cur_balance + holdings_total323# print('\n\n{0:^50}'.format('Summary'))324# print(50 * '-')325# print('Current Balance: {0:.2f}'.format(cur_balance))326# print('Total Holdings: {0:.2f}'.format(holdings_total))327# print('Realized Account Value*: {0:.2f}'.format(account_value))328# print('\n\n')329# print(50 * '-')330# print('Avg. Price*: Fees included.')331# print('Realized Account Value*: Considering price paid for stocks.')332# else:333# print('No holding records available for "{0}".'.format(username))334# wait_for_user()335# if __name__ == '__main__':...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!