Best Python code snippet using slash
test_storage.py
Source:test_storage.py
...116 self.assertEqual(117 storage.content_dir,118 os.path.join(storage.shared_dir, 'content'))119 @patch('pulp.server.content.storage.SharedStorage.shared_dir', 'abcd/')120 def test_links_dir(self):121 storage = SharedStorage('git', '1234')122 self.assertEqual(123 storage.links_dir,124 os.path.join(storage.shared_dir, 'links'))125 def test_put(self):126 unit = Mock()127 storage = SharedStorage('git', '1234')128 storage.link = Mock()129 storage.put(unit)130 storage.link.assert_called_once_with(unit)131 def test_get(self):132 storage = SharedStorage('git', '1234')133 storage.get(None) # just for coverage134 @patch('os.symlink')...
linking_crawler.py
Source:linking_crawler.py
2from crawler import Crawler3class LinkingCrawler (Crawler):4 def __init__(self, log_filepath="./log-crawler.txt"):5 Crawler.__init__(self, log_filepath)6 self.set_links_dir("./links/")7 self.mode_dict['2m-photos'] = self.run_2m_photos8 self.mode_dict['8m-photos'] = self.run_8m_photos9 self.mode_dict['8m-lasers'] = self.run_8m_lasers10 self.mode_dict['manual'] = self.run_manual_data11 def set_links_dir(self, links_dir):12 self.links_dir = links_dir13 self.write_log(["Links dir set to " + links_dir])14 def populate_links_dir(self, make_subdirs=True, rename_fu=None):15 # Redefine the Crawler version to allow for subdirectories based on 16 # experiment names (1A, 1B, 2A, etc.)17 18 if not os.path.isdir(self.links_dir):19 msg = "Making links directory at {}.".format(self.links_dir)20 self.write_log([msg], True)21 os.makedirs(self.links_dir, exist_ok=True)22 if make_subdirs:23 sub_dirs = ['1A', '1B', '2A', '2B', '3A', '3B', '4A', '5A',]24 for sub_dir in sub_dirs:25 sub_path = os.path.join(self.links_dir, sub_dir)26 if not os.path.isdir(sub_path):27 msg = "Making links directory at {}.".format(sub_path)28 self.write_log([msg], True)29 os.makedirs(sub_path, exist_ok=True)30 for filepath in self.file_list:31 path, filename = os.path.split(filepath)32 sub_dir = filename[0:2] if make_subdirs else ''33 if rename_fu:34 filename = rename_fu(filename, path)35 linkpath = os.path.join(self.links_dir, sub_dir, filename)36 if os.path.exists(linkpath):37 msg = "Link for {} exists already. Skipping.".format(linkpath)38 self.write_log([msg], True, indent=4*' ')39 else:40 msg = "Making symlink for {}.".format(linkpath)41 self.write_log([msg], True, indent=4*' ')42 os.symlink(filepath, linkpath)43 def rename_cart_file(self, filename, source_path):44 try:45 exp, step, time, length, name = filename.split('-')46 except ValueError:47 print(filename)48 raise ValueError49 limb = step[0]50 flow = int(step[1:-1])51 time = int(time[1:])52 length = length[0]53 limb_bin = 0 if 'r' == limb else 154 order = flow + 2 * limb_bin * (100 - flow) + int(time/10)55 new_name = "{}-{:03d}-{}{}L-t{}-{}m-{}".format(56 exp, order, limb, flow, time, length, name)57 msg = "Renaming {} to {}".format(filename, new_name)58 self.write_log([msg], True, 4*' ')59 return new_name60 def rename_manual_file(self, filename, source_path):61 exp, name = filename.split('-', 1)62 return "{}-{}.xlsx".format(name[:-5], exp)63 def run_2m_photos(self):64 self.write_log_section_break()65 self.write_log(["Running 2m photos"], verbose=True)66 self.set_target_names(['*2m-Composite.JPG',])67 self.set_links_dir('/home/alex/ubc/research/feed-timing/data/data-links/2m-photos')68 self.collect_names()69 self.populate_links_dir(rename_fu=self.rename_cart_file)70 self.end()71 def run_8m_photos(self):72 self.write_log_section_break()73 self.write_log(["Running 8m photos"], verbose=True)74 self.set_target_names(['*8m-Composite.JPG',])75 self.set_links_dir('/home/alex/ubc/research/feed-timing/data/data-links/8m-photos')76 self.collect_names()77 self.populate_links_dir(rename_fu=self.rename_cart_file)78 self.end()79 def run_8m_lasers(self):80 self.write_log_section_break()81 self.write_log(["Running 8m laser scans"], verbose=True)82 self.set_target_names(['*beddatafinal.txt',83 '*laser.*',84 ])85 self.set_links_dir('/home/alex/ubc/research/feed-timing/data/data-links/8m-lasers')86 self.collect_names()87 self.populate_links_dir(rename_fu=self.rename_cart_file)88 self.end()89 def run_manual_data(self):90 self.write_log_section_break()91 self.write_log(["Running manual data"], verbose=True)92 self.set_root('/home/alex/ubc/research/feed-timing/data/manual-data')93 self.set_target_names(['??-flow-depths.xlsx',94 '??-masses.xlsx',95 ])96 self.set_links_dir('/home/alex/ubc/research/feed-timing/data/data-links/manual-data')97 self.collect_names()98 self.populate_links_dir(make_subdirs=False, rename_fu=self.rename_manual_file)99 self.end()100if __name__ == "__main__":101 crawler = LinkingCrawler()102 exp_root = '/home/alex/ubc/research/feed-timing/data/{}'103 crawler.set_root(exp_root.format('cart'))...
split_segments.py
Source:split_segments.py
1#!/usr/bin/env python32"""Program for splitting frames into action segments3See :ref:`cli_tools_action_segmentation` for usage details """4import argparse5import logging6import os7import pathlib8import sys9import pandas as pd10from epic_kitchens.labels import VIDEO_ID_COL11from epic_kitchens.video import (12 ModalityIterator,13 FlowModalityIterator,14 RGBModalityIterator,15 split_video_frames,16)17HELP = """\18Process frame dumps, and a set of annotations in a pickled dataframe19to produce a set of segmented action videos using symbolic links.20Taking a set of videos in the directory format (for RGB):21 P01_0122 |--- frame_0000000001.jpg23 |--- frame_0000000002.jpg24 |--- ...25Produce a set of action segments in the directory format:26 P01_01_0_chop-wood27 |--- frame_0000000001.jpg28 |--- ...29 |--- frame_0000000735.jpg30The final number `Z` in `PXX_YY_Z-narration` denotes the index of the segment, this can then31be used to look up the corresponding information on the segment such as the raw narration,32verb class, noun classes etc33If segmenting optical flow then frames are contained in a `u` or `v` subdirectory.34"""35LOG = logging.getLogger(__name__)36parser = argparse.ArgumentParser(37 description=HELP, formatter_class=argparse.RawTextHelpFormatter38)39parser.add_argument("video", type=str, help="Video ID to segment")40parser.add_argument(41 "frame_dir",42 type=lambda p: pathlib.Path(p).absolute(),43 help="Path to frames, if RGB should contain images, if flow, should contain u, "44 "v subdirectories with images",45)46parser.add_argument(47 "links_dir",48 type=lambda p: pathlib.Path(p).absolute(),49 help="Path to save segments into",50)51parser.add_argument(52 "labels",53 type=pathlib.Path,54 help="Path to the pickle or CSV file which contains the meta information about the dataset.",55)56parser.add_argument(57 "modality",58 type=str.lower,59 default="rgb",60 choices=["rgb", "flow"],61 help="Modality of frames that are being segmented",62)63parser.add_argument(64 "--frame-format",65 type=str,66 default="frame_%010d.jpg",67 help="Pattern of frame filenames (default: %(default)s)",68)69parser.add_argument(70 "--fps",71 type=float,72 default=60,73 help="FPS of extracted frames (default: %(default)s)",74)75parser.add_argument(76 "--of-stride",77 type=int,78 default=2,79 help="Optical flow stride parameter used for frame extraction (default: %(default)s)",80)81parser.add_argument(82 "--of-dilation",83 type=int,84 default=3,85 help="Optical flow dilation parameter used for frame extraction "86 "(default: %(default)s)",87)88def main(args):89 logging.basicConfig(level=logging.INFO)90 if not args.labels.exists():91 LOG.error("Annotations pickle: '{}' does not exist".format(args.labels))92 sys.exit(1)93 if args.labels.suffix.lower() == ".pkl":94 annotations = pd.read_pickle(args.labels)95 elif args.labels.suffix.lower() == ".csv":96 annotations = pd.read_csv(args.labels, index_col="uid")97 else:98 raise ValueError("Expected .csv or .pkl suffix for annotation file")99 fps = float(args.fps)100 if args.modality.lower() == "rgb":101 frame_dirs = [args.frame_dir]102 links_dirs = [args.links_dir]103 modality = RGBModalityIterator(fps=fps) # type: ModalityIterator104 elif args.modality.lower() == "flow":105 axes = ["u", "v"]106 frame_dirs = [args.frame_dir.joinpath(axis) for axis in axes]107 links_dirs = [args.links_dir.joinpath(axis) for axis in axes]108 modality = FlowModalityIterator(109 rgb_fps=fps, stride=int(args.of_stride), dilation=int(args.of_dilation)110 )111 else:112 raise ValueError("Modality '{}' is not recognised".format(args.modality))113 video_annotations = annotations[annotations[VIDEO_ID_COL] == args.video]114 for frame_dir, links_dir in zip(frame_dirs, links_dirs):115 split_video_frames(116 modality, args.frame_format, video_annotations, links_dir, frame_dir117 )118if __name__ == "__main__":...
07_get_docs_for_ner.py
Source:07_get_docs_for_ner.py
1#!/usr/bin/env python2# -*- coding: utf-8 -*-3"""4Script to select documents with references to the selected yago entities.5Usage:6 07_get_docs_for_ner.py <pickles_dir> <resources_dir> <links_dir> <output_dir>7pickles_dir is the directory with the pickled yago downloads8"""9from __future__ import unicode_literals, print_function10import cPickle as pickle11import os12import sys13from bs4 import BeautifulSoup14from docopt import docopt15from tqdm import tqdm16args = docopt(__doc__, version=1.0)17pickles_dir = args['<pickles_dir>']18resources_dir = args['<resources_dir>']19links_dir = args['<links_dir>']20output_dir = args['<output_dir>']21wikipages = set()22ids_urls = {}23uris_urls = {}24for pkl in tqdm(os.listdir(pickles_dir)):25 with open(os.path.join(pickles_dir, pkl)) as f:26 wikipages = wikipages.union({w[1] for w in pickle.load(f)[1:]})27with open(os.path.join(resources_dir, "ids_urls.txt"), "r") as f:28 for line in tqdm(f.readlines()):29 line = line.strip().split(",", 1)30 ids_urls[line[0]] = line[1]31with open(os.path.join(resources_dir, "parsed_uris.txt"), "r") as f:32 for line in tqdm(f.readlines()):33 line = line.strip().split(",http://", 1)34 uris_urls[line[0]] = "http://{}".format(line[1])35for wiki_doc in sorted(os.listdir(links_dir)):36 print("Extracting NE from {}".format(wiki_doc), file=sys.stderr)37 last_doc_id = None38 docs_for_ner = {}39 last_doc_in_wikipages = False40 with open(os.path.join(links_dir, wiki_doc), "r") as f:41 for line in tqdm(f):42 soup = BeautifulSoup(line.strip().decode("utf-8"), "lxml")43 if soup.find('doc') is not None:44 last_doc_id = soup.doc["id"]45 last_doc_in_wikipages = (last_doc_id in ids_urls and46 ids_urls[last_doc_id] in wikipages)47 if last_doc_in_wikipages:48 docs_for_ner[last_doc_id] = ids_urls[last_doc_id]49 elif soup.find('a') is not None and not last_doc_in_wikipages:50 try:51 a = soup.a['href']52 if a in uris_urls and uris_urls[a] in wikipages:53 docs_for_ner[last_doc_id] = ids_urls[last_doc_id]54 last_doc_in_wikipages = True55 except KeyError:56 pass57 print("Finished extracting NE from {}. Saving files.".format(wiki_doc),58 file=sys.stderr)59 with open(os.path.join(output_dir, "docs_in_ner.txt"), "a") as f:60 for doc in tqdm(sorted(docs_for_ner)):...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!