Best Python code snippet using prospector_python
__init__.py
Source:__init__.py
...28 self._zoom = zoom29 @property30 def zoom(self):31 return self._zoom32def _example_path(extension):33 return os.path.join(os.path.dirname(__file__),'files',extension)34def _icon_path(suffix):35 return os.path.join(os.path.dirname(__file__), 'icons', suffix)36_grid_obstacle_dict = dict({'ego-xvar-module' : 'robot',37 'ego-xvar-name' : 'ax',38 'ego-yvar-module' : 'robot',39 'ego-yvar-name': 'ay',40 'xmax-constant': 'axMAX',41 'ymax-constant': 'ayMAX',42 'target-label': 'goal',43 'traps-label': 'traps'44 })45def obstacle(N, full_observable = False):46 if full_observable:47 return Model(_example_path("obstacle_full_observable.nm"), ProgramAnnotation(_grid_obstacle_dict),48 ["Pmax=? [ \"notbad\" U \"goal\"]"], constants=f"N={N}")49 return Model(_example_path("obstacle.nm"), ProgramAnnotation(_grid_obstacle_dict),50 ["Pmax=? [ \"notbad\" U \"goal\"]"], constants=f"N={N}")51_drone_dict = dict({'ego-xvar-module' : 'drone',52 'ego-xvar-name' : 'dx',53 'ego-yvar-module' : 'drone',54 'ego-yvar-name': 'dy',55 'adv-xvar-module': 'agent',56 'adv-xvar-name': 'ax',57 'adv-yvar-module': 'agent',58 'adv-yvar-name': 'ay',59 'xmax-constant': 'xMAX',60 'ymax-constant': 'yMAX',61 'target-label': 'goal',62 'ego-radius-constant' : "RADIUS",63 'scan-action': 'scan',64 'adv-area': ['a'],65 'traps-label': None66 })67def evade(N,RADIUS):68 return Model(_example_path("evade.nm"), ProgramAnnotation(_drone_dict),69 ["Pmax=? [\"notbad\" U \"goal\"]"], constants=f"N={N},RADIUS={RADIUS}")70_intercept_dict = dict({'ego-xvar-module' : 'drone',71 'ego-xvar-name' : 'dx',72 'ego-yvar-module' : 'drone',73 'ego-yvar-name': 'dy',74 'adv-xvar-module': 'agent',75 'adv-xvar-name': 'ax',76 'adv-yvar-module': 'agent',77 'adv-yvar-name': 'ay',78 'xmax-constant': 'dxMAX',79 'ymax-constant': 'dyMAX',80 'traps-label': None,81 'ego-radius-constant': "RADIUS",82 'camera': ['CAMERA'],83 'adv-goals-label': 'exits'84 })85def intercept(N,RADIUS):86 return Model(_example_path("intercept.nm"), ProgramAnnotation(_intercept_dict),87 ["Pmax=? [\"notbad\" U \"goal\"]"], constants=f"N={N},RADIUS={RADIUS}")88_surveillance_dict = dict({'ego-xvar-module' : 'drone',89 'ego-xvar-name' : 'dx',90 'ego-yvar-module' : 'drone',91 'ego-yvar-name': 'dy',92 'adv-xvar-module': ['agent','agent2'],93 'adv-xvar-name': ['ax','ax2'],94 'adv-yvar-module': ['agent','agent2'],95 'adv-yvar-name': ['ay','ay2'],96 'adv-dirvar-module': ['agent', 'agent2'],97 'adv-dirvar-name': ['dir', 'dir2'],98 'xmax-constant': 'xMAX',99 'ymax-constant': 'yMAX',100 'target-label': 'goal',101 'traps-label': None,102 'adv-dirvalue-mapping': {1: Direction.WEST, 0: Direction.EAST},103 'adv-radius-constant' : "ARADIUS",104 'ego-radius-constant' : "RADIUS"105 })106def surveillance(N,RADIUS=2):107 return Model(_example_path("avoid.nm"), ProgramAnnotation(_surveillance_dict), ["Pmax=? [\"notbad\" U \"goal\"]"], constants=f"N={N},RADIUS={RADIUS}")108_grid_refuel = dict({'ego-xvar-module' : 'rover',109 'ego-xvar-name' : 'ax',110 'ego-yvar-module' : 'rover',111 'ego-yvar-name': 'ay',112 'xmax-constant': 'axMAX',113 'ymax-constant': 'ayMAX',114 'target-label': 'goal',115 'traps-label': 'traps',116 'landmarks': 'stationvisit',117 'resource-module': 'tank',118 'resource-variable': 'fuel',119 'resource-name': 'fuel',120 'resource-maximum-constant': 'fuelCAP'121 })122def refuel(N, ENERGY):123 return Model(_example_path("refuel.nm"), ProgramAnnotation(_grid_refuel), ["Pmax=? [\"notbad\" U \"goal\"]"], constants=f"N={N},ENERGY={ENERGY}")124_grid_rocks = dict({'ego-xvar-module' : 'robot',125 'ego-xvar-name' : 'x',126 'ego-yvar-module' : 'robot',127 'ego-yvar-name': 'y',128 'xmax-constant': 'xMAX',129 'ymax-constant': 'yMAX',130 'target-label': 'goal',131 'traps-label' : None,132 'interactive-landmarks-x': ['r1x', 'r2x'],133 'interactive-landmarks-y': ['r1y', 'r2y'],134 'il-statusvar-module': ['rock1', 'rock2'],135 'il-statusvar-name': ['r1qual', 'r2qual'],136 'il-clearancevar-module': ['rock1', 'rock2'],137 'il-clearancevar-name': ['r1taken', 'r2taken'],138 'goal-action' : True139 })140def rocks(N, K=2):141 K = int(K)142 if K == 2:143 return Model(_example_path("rocks2.nm"), ProgramAnnotation(_grid_rocks), ["Pmax=? [\"notbad\" U \"goal\"]"], constants=f"N={N}")144 else:...
tf_io_pipline_tools.py
Source:tf_io_pipline_tools.py
1#!/usr/bin/env python32# -*- coding: utf-8 -*-3# @Time : 19-2-15 ä¸å2:134# @Author : MaybeShewill-CV5# @Site : https://github.com/MaybeShewill-CV/CRNN_Tensorflow6# @File : tf_io_pipline_tools.py7# @IDE: PyCharm8"""9Some tensorflow records io tools10"""11import os12import os.path as ops13import cv214import tensorflow as tf15import glog as log16from config import global_config17CFG = global_config.cfg18_R_MEAN = 123.6819_G_MEAN = 116.7820_B_MEAN = 103.9421_CHANNEL_MEANS = [_B_MEAN, _G_MEAN, _R_MEAN]22def int64_feature(value):23 """24 :return:25 """26 return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))27def bytes_feature(value):28 """29 :param value:30 :return:31 """32 return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))33def write_example_tfrecords(example_paths, example_labels, tfrecords_path):34 """35 write tfrecords36 :param example_paths:37 :param example_labels:38 :param tfrecords_path:39 :return:40 """41 _tfrecords_dir = ops.split(tfrecords_path)[0]42 os.makedirs(_tfrecords_dir, exist_ok=True)43 log.info('Writing {:s}....'.format(tfrecords_path))44 with tf.python_io.TFRecordWriter(tfrecords_path) as _writer:45 for _index, _example_path in enumerate(example_paths):46 with open(_example_path, 'rb') as f:47 check_chars = f.read()[-2:]48 if check_chars != b'\xff\xd9':49 log.error('Image file {:s} is not complete'.format(_example_path))50 continue51 else:52 _example_image = cv2.imread(_example_path, cv2.IMREAD_COLOR)53 _example_image = cv2.resize(_example_image,54 dsize=(CFG.TRAIN.IMG_WIDTH, CFG.TRAIN.IMG_HEIGHT),55 interpolation=cv2.INTER_CUBIC)56 _example_image_raw = _example_image.tostring()57 _example = tf.train.Example(58 features=tf.train.Features(59 feature={60 'height': int64_feature(CFG.TRAIN.IMG_HEIGHT),61 'width': int64_feature(CFG.TRAIN.IMG_WIDTH),62 'depth': int64_feature(3),63 'label': int64_feature(example_labels[_index]),64 'image_raw': bytes_feature(_example_image_raw)65 }))66 _writer.write(_example.SerializeToString())67 log.info('Writing {:s} complete'.format(tfrecords_path))68 return69def decode(serialized_example):70 """71 Parses an image and label from the given `serialized_example`72 :param serialized_example:73 :return:74 """75 features = tf.parse_single_example(76 serialized_example,77 # Defaults are not specified since both keys are required.78 features={79 'image_raw': tf.FixedLenFeature([], tf.string),80 'label': tf.FixedLenFeature([], tf.int64),81 'height': tf.FixedLenFeature([], tf.int64),82 'width': tf.FixedLenFeature([], tf.int64),83 'depth': tf.FixedLenFeature([], tf.int64)84 })85 # decode image86 image = tf.decode_raw(features['image_raw'], tf.uint8)87 image_shape = tf.stack([CFG.TRAIN.IMG_HEIGHT, CFG.TRAIN.IMG_WIDTH, 3])88 image = tf.reshape(image, image_shape)89 # Convert label from a scalar int64 tensor to an int32 scalar.90 label = tf.cast(features['label'], tf.int32)91 return image, label92def augment_for_train(image, label):93 """94 :param image:95 :param label:96 :return:97 """98 # first apply random crop99 image = tf.image.random_crop(value=image,100 size=[CFG.TRAIN.CROP_IMG_HEIGHT, CFG.TRAIN.CROP_IMG_WIDTH, 3],101 seed=tf.set_random_seed(1234),102 name='crop_image')103 # apply random flip104 image = tf.image.random_flip_left_right(image=image, seed=tf.set_random_seed(1234))105 return image, label106def augment_for_validation(image, label):107 """108 :param image:109 :param label:110 :return:111 """112 assert CFG.TRAIN.IMG_HEIGHT == CFG.TRAIN.IMG_WIDTH113 assert CFG.TRAIN.CROP_IMG_HEIGHT == CFG.TRAIN.CROP_IMG_WIDTH114 # apply central crop115 central_fraction = CFG.TRAIN.CROP_IMG_HEIGHT / CFG.TRAIN.IMG_HEIGHT116 image = tf.image.central_crop(image=image, central_fraction=central_fraction)117 return image, label118def normalize(image, label):119 """120 Normalize the image data by substracting the imagenet mean value121 :param image:122 :param label:123 :return:124 """125 if image.get_shape().ndims != 3:126 raise ValueError('Input must be of size [height, width, C>0]')127 image_fp = tf.cast(image, dtype=tf.float32)128 means = tf.expand_dims(tf.expand_dims(_CHANNEL_MEANS, 0), 0)...
utils.py
Source:utils.py
1import re2class DataFiles:3 _code_path = ""4 _input_path = ""5 _example_path = ""6 input = ""7 inputEX = ""8 raw_input = ""9 raw_inputEX = ""10 def __init__(self, path):11 self._code_path = path12 self._input_path = path.replace("code", "input").replace("Day", "").replace(".py", ".txt")13 self._example_path = re.sub(r"(\w+)(.txt$)", r"Example\g<2>", self._input_path)14 self._inputFile = open(self._input_path)15 self._inputEXFile = open(self._example_path)16 self.input = self.get_input()17 self.inputEX = self.get_inputEX()18 def __del__(self):19 self._inputFile.close()20 self._inputEXFile.close()21 def print_paths(self):22 print(self._code_path)23 print(self._input_path)24 print(self._example_path)25 def get_input(self):26 return self._inputFile.read().splitlines()27 def get_inputEX(self):28 return self._inputEXFile.read().splitlines()29 def get_input_raw(self):30 return self._inputFile.read()31 def get_inputEX_raw(self):...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!