Best Python code snippet using lisa_python
parser.py
Source:parser.py
...29 self.progress = 030 self.network = []31 self.process_pool = Pool(processes=4)32 self._count_atoms()33 self._track_progress()34 def _count_atoms(self):35 with open(self.filename,'r') as infile:36 try:37 self._atoms_number = int(infile.readline())38 except:39 print("Could not read the number of atoms,please make sure the .xyz file has the correct format.")40 exit()41 def _make_last_movie_frame(self):42 os.system("rm last_frame.xyz")43 os.system("rm last.xyz")44 cmd = "tail -"+str(self._atoms_number)+" fibro.xyz >> last_frame.xyz "45 with open("last_frame.xyz",'w') as outfile:46 outfile.write("{}\nAtoms. Timestep: 0\n".format(self._atoms_number))47 os.system(cmd)48 cmd = "cat last_frame.xyz last_frame.xyz >> last.xyz"49 os.system(cmd)50 def _track_progress(self):51 if os.path.exists("log.lammps"):52 os.system("tail -1 log.lammps >> progress.txt")53 with open("progress.txt") as infile:54 try:55 self.progress = int(re.findall(r'\d+', infile.readline())[0])56 except:57 print("Could not find the last frame, run the simulation for longer!")58 os.system("rm progress.txt")59 else:60 print("Warning: Could not find the LAMMPS log file.")61 def go_through_frames(self,filename):62 frame_index = 063 types = []64 xs = []; ys = []; zs = []65 jobs = []66 fibril_analyser = FibrilAnalyser()67 with open(filename, 'r') as infile:68 for item in infile:69 if len(item.split()) == 1 and types:70 frame_index += 171 types = np.asarray(types);xs=np.asarray(xs);ys=np.asarray(ys);zs=np.asarray(zs)72 if args.verbose and not args.last_frame:73 print("Processing frame......{}".format(frame_index))74 self.process_pool.apply_async(fibril_analyser.analyse_fibril_distribution,75 args=(args.nematic_cutoff,args.radial_cutoff,types,xs,ys,zs))76 types=[];xs=[];ys=[];zs=[]77 if len(item.split()) > 3:78 atom_type,x,y,z=item.split()79 atom_type = str(atom_type)80 x, y, z = float(x), float(y), float(z)81 types.append(int(atom_type))82 xs.append(x);ys.append(y);zs.append(z)83 self.process_pool.close()84 self.process_pool.join()85if __name__ == "__main__":86 parser = Parser(args.movie_name)87 parser._count_atoms()88 parser._track_progress()89 if args.last_frame:90 print("analysing last frame")91 parser._make_last_movie_frame()92 parser.go_through_frames("last.xyz")93 else:94 parser.go_through_frames(args.movie_name)95 print("Successfully analysed all frames!")...
rollout_saver.py
Source:rollout_saver.py
1import shelve2import argparse3import json4import numpy as np5import os6import os.path as osp7import torch8from pathlib import Path9from typing import List, Optional, Tuple, Union, Dict10from collections import defaultdict11from grf_imitation.infrastructure.datas import Batch, convert_batch_to_dict12from grf_imitation.infrastructure.utils import utils13class RolloutSaver:14 """15 Utility class for storing rollouts.16 Each rollout is stored with a key based on the episode number (0-indexed),17 and the number of episodes is stored with the key "num_episodes",18 so to load the shelf file, use something like:19 with shelve.open('rollouts.pkl') as rollouts:20 for episode_id in range(rollouts["episodes"]):21 rollout = rollouts[str(episode_id)]22 If outfile is None, this class does nothing.23 """24 def __init__(25 self,26 exp_dir=None,27 num_steps=None,28 num_episodes=None,29 track_progress=False,30 save_info=False,31 ):32 self._exp_dir = exp_dir33 self._track_progress = track_progress34 self._outfile = None # exp_dir/rollouts/rollouts35 self._progressfile = None # exp_dir/rollouts/__progress_rollouts36 self._shelf = None37 self._episodes = 038 self._steps = 039 self._num_episodes = num_episodes40 self._num_steps = num_steps41 self._save_info = save_info42 if self._exp_dir:43 self._rollout_dir = osp.join(self._exp_dir, 'evaluation')44 os.makedirs(self._rollout_dir, exist_ok=True)45 self._outfile = osp.join(self._rollout_dir, 'rollouts') 46 def _get_tmp_progress_filename(self):47 outpath = Path(self._outfile)48 return outpath.parent / ("progress_" + outpath.name)49 @property50 def outfile(self):51 return self._outfile52 @property53 def is_invalid(self):54 return self._exp_dir is None55 def __enter__(self):56 if self._outfile:57 self._shelf = shelve.open(self._outfile)58 if self._track_progress:59 self._progressfile = self._get_tmp_progress_filename().open(mode='w')60 return self61 62 def __exit__(self, type, value, traceback):63 if self._shelf:64 # Close the shelf file, and store the number of episodes for ease65 self._shelf['episodes'] = self._episodes66 self._shelf.close()67 if self._track_progress:68 # Remove the temp progress file:69 # self._get_tmp_progress_filename().unlink()70 self._progressfile.close()71 def _get_progress(self):72 if self._num_episodes:73 return f"{self._episodes} / {self._num_episodes} episodes completed"74 elif self._num_steps:75 return f"{self._steps} / {self._num_steps} steps completed"76 else:77 return f"{self._episodes} episodes completed"78 def store(self, batch: Batch):79 if self._outfile:80 # don't save img_obs to save space81 self._shelf[str(self._episodes)] = convert_batch_to_dict(batch, ignore_keys=['img_obs'])82 self._episodes += 183 if self._progressfile:84 log_str = f"episode reward: {batch.rew[:, :4].sum() // 4}\t" + self._get_progress() + "\n"85 print(log_str)86 self._progressfile.write(log_str)87 self._progressfile.flush()88 @property89 def rollout_dir(self):...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!