Best Python code snippet using lisa_python
fab_dav_q.py
Source:fab_dav_q.py
...144#145# @hosts(mq_host + cafecommander_host)146# def get_cpu(interval=10, count=30):147# ret = run('sar -u %d %d' % (interval, count))148# _save_logs('%s_cpu.log' % env.host, ret)149#150#151# @hosts(mq_host + cafecommander_host)152# def get_mem(interval=10, count=30):153# ret = run('sar -r %d %d' % (interval, count))154# _save_logs('%s_mem.log' % env.host, ret)155#156#157# @hosts(mq_host + cafecommander_host)158# def get_load(interval=10, count=30):159# ret = run('sar -q %d %d' % (interval, count))160# _save_logs('%s_load.log' % env.host, ret)161#162#163# def _save_logs(logname, content):164# dirname = 'logs'165# local('mkdir -p %s' % dirname)166# logname = os.path.join(dirname, logname)167# with open(logname, 'w') as f:...
experiment.py
Source:experiment.py
...27 def evaluate_metrics(samples, sample_time, suffix):28 samples = samples[self.args['burn_in']:]29 np.save(os.path.join(self.logs['results'], 'samples_%s.npy' % suffix),30 samples)31 self._save_logs('results.txt', 'sample time %s' % suffix, sample_time)32 print('evaluating samples')33 bmess = batch_means_ess(samples)34 min_bmess = np.mean(np.min(bmess, axis=1), axis=0)35 std_bmess = np.std(np.min(bmess, axis=1), axis=0)36 gr = gelman_rubin_diagnostic(samples)37 acc_rate = acceptance_rate_2(samples)38 self._save_logs('results.txt', 'num_samples_%s:' % suffix, samples.shape[0])39 self._save_logs('results.txt', 'ess_%s' % suffix, bmess)40 self._save_logs('results.txt', 'min_ess_%s' % suffix, min_bmess)41 self._save_logs('results.txt', 'std_ess_%s' % suffix, std_bmess)42 self._save_logs('results.txt', 'gelman_rubin_%s:' % suffix, gr)43 self._save_logs('results.txt', 'acceptance_rate_%s:' % suffix, acc_rate)44 self._trace_plot(samples)45 eval, eval_std = self._evaluate(samples)46 self._save_logs('results.txt', 'eval_metric_%s' % suffix, eval)47 self._save_logs('results.txt', 'eval_metric_std_%s' % suffix, eval_std)48 print('starting training')49 losses, train_time = self.sampler.train(**self.args)50 np.save(os.path.join(self.logs['results'], 'losses.npy'), np.array(losses))51 self._plot(losses[1:], 'losses.png')52 self._save_logs('results.txt', 'train time', train_time)53 print('drawing samples')54 samples, sample_time = self.sampler.sample(**self.args)55 samples_irr, sample_time_irr = self.sampler.sample_irr(**self.args)56 evaluate_metrics(samples, sample_time, '')57 evaluate_metrics(samples_irr, sample_time_irr, 'irr')58 if samples.shape[2] == 2:59 self._plot2d(samples)60 def _trace_plot(self, samples):61 i = np.random.randint(0, samples.shape[1])62 fig, ax = plt.subplots()63 ax.plot(samples[:, i, :], alpha=0.4)64 ax.plot(samples[:, i, :], '+')65 ax.set_xlim([0, samples.shape[0]])66 ax.set_aspect('auto', 'datalim')67 plt.savefig(os.path.join(self.logs['figs'], 'trace.png'),68 bbox_inches='tight')69 plt.close()70 def _plot2d(self, samples):71 fig, ax = plt.subplots()72 ax.hist2d(samples[:, 0, 0], samples[:, 0, 1], bins=200)73 ax.set_aspect('equal', 'box')74 plt.savefig(os.path.join(self.logs['figs'], 'samples.png'))75 plt.close()76 def _plot(self, data, title):77 fig, ax = plt.subplots()78 ax.set_aspect('auto', 'datalim')79 ax.plot(data)80 plt.savefig(os.path.join(self.logs['figs'], title), bbox_inches='tight')81 plt.close()82 def _evaluate(self, samples):83 T, M, D = samples.shape84 eval = []85 for i in range(M):86 eval.append(self.dist.evaluate(samples[:, i, :], self.sess))87 return np.mean(eval), np.std(eval)88 def _save_params(self, my_dict):89 my_dict = {k: str(v) for k, v in my_dict.items()}90 with open(os.path.join(self.logs['info'], 'params.txt'), 'w') as f:91 json.dump(my_dict, f, indent=4)92 def _save_logs(self, filename, savestring, arg):93 with open(os.path.join(self.logs['results'], filename), 'a') as f:...
train.py
Source:train.py
...50 t_start = time.time()51 # Save52 if (itr+1) % args.save_interval == 0:53 saver.save(sess, args.path['model'] + '/itr-' + str(itr))54 _save_logs(os.path.join(args.path['log'], 'train.pickle'), logs['train'])55 _save_logs(os.path.join(args.path['log'], 'val.pickle'), logs['val'])56def _update_logs(logs, log):57 for key in logs.keys():58 logs[key].extend([log[key]])59 return logs60def _save_logs(filename, results):61 with open(filename, 'wb') as f:...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!