Best Python code snippet using rester_python
train.py
Source:train.py
...17 self.model.to(self.device)18 self.loss.to(self.device)19 for metric in self.metrics:20 metric.to(self.device)21 def _format_logs(self, logs):22 str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]23 s = ', '.join(str_logs)24 return s25 def batch_update(self, x, y):26 raise NotImplementedError27 def on_epoch_start(self):28 pass29 def run(self, dataloader):30 self.on_epoch_start()31 logs = {}32 loss_meter = AverageValueMeter()33 metrics_meters = {metric.__name__: AverageValueMeter() for metric in self.metrics}34 with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:35 for x, y in iterator:36 x, y = x.to(self.device), y.to(self.device)37 loss, y_pred = self.batch_update(x, y)38 # update loss logs39 loss_value = loss.cpu().detach().numpy()40 loss_meter.add(loss_value)41 loss_logs = {self.loss.__name__: loss_meter.mean}42 logs.update(loss_logs)43 # update metrics logs44 for metric_fn in self.metrics:45 metric_value = metric_fn(y_pred, y).cpu().detach().numpy()46 metrics_meters[metric_fn.__name__].add(metric_value)47 metrics_logs = {k: v.mean for k, v in metrics_meters.items()}48 logs.update(metrics_logs)49 if self.verbose:50 s = self._format_logs(logs)51 iterator.set_postfix_str(s)52 return logs53class TrainEpoch(Epoch):54 def __init__(self, model, loss, metrics, optimizer, device='cpu', verbose=True):55 super().__init__(56 model=model,57 loss=loss,58 metrics=metrics,59 stage_name='train',60 device=device,61 verbose=verbose,62 )63 self.optimizer = optimizer64 def on_epoch_start(self):65 self.model.train()66 def batch_update(self, x, y):67 self.optimizer.zero_grad()68 prediction = self.model.forward(x)69 loss = self.loss(prediction, y)70 loss.backward()71 self.optimizer.step()72 return loss, prediction73 74class TrainEpochCustom(Epoch):75 def __init__(self, model, loss, metrics, optimizer, device='cpu', verbose=True):76 super().__init__(77 model=model,78 loss=loss,79 metrics=metrics,80 stage_name='train',81 device=device,82 verbose=verbose,83 )84 self.optimizer = optimizer85 self.loss_clf = nn.BCEWithLogitsLoss()86 def run(self, dataloader):87 self.on_epoch_start()88 logs = {}89 loss_meter = AverageValueMeter()90 metrics_meters = {metric.__name__: AverageValueMeter() for metric in self.metrics}91 with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:92 for x, y in iterator:93 x, y = x.to(self.device), (y[0].to(self.device), y[1].to(self.device))94 loss, y_pred = self.batch_update(x, y)95 # update loss logs96 loss_value = loss.cpu().detach().numpy()97 loss_meter.add(loss_value)98 loss_logs = {self.loss.__name__: loss_meter.mean}99 logs.update(loss_logs)100 # update metrics logs101 for metric_fn in self.metrics:102 metric_value = metric_fn(y_pred, y[0]).cpu().detach().numpy()103 metrics_meters[metric_fn.__name__].add(metric_value)104 metrics_logs = {k: v.mean for k, v in metrics_meters.items()}105 logs.update(metrics_logs)106 if self.verbose:107 s = self._format_logs(logs)108 iterator.set_postfix_str(s)109 return logs110 def on_epoch_start(self):111 self.model.train()112 def batch_update(self, x, y):113 self.optimizer.zero_grad()114 prediction_mask, prediction_clf = self.model.forward(x)115 y_mask, y_clf = y116 loss = self.loss(prediction_mask, y_mask) + self.loss_clf(prediction_clf, y_clf) * 0.5117 loss.backward()118 self.optimizer.step()119 return loss, prediction_mask120class ValidEpochCustom(Epoch):121 def __init__(self, model, loss, metrics, device='cpu', verbose=True):122 super().__init__(123 model=model,124 loss=loss,125 metrics=metrics,126 stage_name='valid',127 device=device,128 verbose=verbose,129 )130 self.loss_clf = nn.BCEWithLogitsLoss()131 def on_epoch_start(self):132 self.model.eval()133 def batch_update(self, x, y):134 y_mask, y_clf = y135 with torch.no_grad():136 prediction_mask, prediction_clf = self.model.forward(x)137 loss = self.loss(prediction_mask, y_mask) + self.loss_clf(prediction_clf, y_clf) * 0.5138 return loss, prediction_mask139 def run(self, dataloader):140 self.on_epoch_start()141 logs = {}142 loss_meter = AverageValueMeter()143 metrics_meters = {metric.__name__: AverageValueMeter() for metric in self.metrics}144 with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:145 for x, y in iterator:146 x, y = x.to(self.device), (y[0].to(self.device), y[1].to(self.device))147 loss, y_pred = self.batch_update(x, y)148 # update loss logs149 loss_value = loss.cpu().detach().numpy()150 loss_meter.add(loss_value)151 loss_logs = {self.loss.__name__: loss_meter.mean}152 logs.update(loss_logs)153 # update metrics logs154 for metric_fn in self.metrics:155 metric_value = metric_fn(y_pred, y[0]).cpu().detach().numpy()156 metrics_meters[metric_fn.__name__].add(metric_value)157 metrics_logs = {k: v.mean for k, v in metrics_meters.items()}158 logs.update(metrics_logs)159 if self.verbose:160 s = self._format_logs(logs)161 iterator.set_postfix_str(s)162 return logs163class ValidEpoch(Epoch):164 def __init__(self, model, loss, metrics, device='cpu', verbose=True):165 super().__init__(166 model=model,167 loss=loss,168 metrics=metrics,169 stage_name='valid',170 device=device,171 verbose=verbose,172 )173 def on_epoch_start(self):174 self.model.eval()...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!