How to use test_transformations method in Slash

Best Python code snippet using slash

library.py

Source:library.py Github

copy

Full Screen

1import pickle 2import os3import math4import collections5import random6class TopicSeries(list):7 """8 Derived "list" class, with ability to return9 all subsets of a particular length10 """11 def get_subseries(self,length):12 """ Generator that returns all sub-lists of self 13 that are length "length"14 """15 index = 016 while index + length <= len(self):17 yield self[index:index+length]18 index += 119class Library(object):20 def __init__(self, **kwargs):21 """22 set up internal lists, get parameter values, 23 and set up transformation functions24 """25 self.trends = []26 self.non_trends = []27 28 self.config = {}29 # default values30 self.config["reference_length"] = 21031 self.config["n_smooth"] = 8032 self.config["alpha"] = 1.233 # add values passed into ctor34 self.config.update(kwargs["config"])35 # Transformation functions are defined globally and added here to list.36 37 # transformations to be run on reference series38 self.transformations = []39 self.transformations.append(add_one)40 self.transformations.append(unit_normalization)41 self.transformations.append(logarithmic_scaling)42 self.transformations.append(smoothing)43 self.transformations.append(sizing)44 45 # transformations to be run on test series 46 self.test_transformations = []47 self.test_transformations.append(add_one)48 self.test_transformations.append(unit_normalization)49 self.test_transformations.append(logarithmic_scaling)50 self.test_transformations.append(smoothing)51 52 def add_reference_series(self,series,is_trend=True):53 """54 add a reference time series to the internal lists,55 after transforming it 56 """57 self.config["is_trend"] = is_trend58 series = self.transform_input(series,is_test_series=False) 59 if is_trend:60 self.trends.append( TopicSeries(series) )61 else:62 self.non_trends.append( TopicSeries(series) )63 def transform_input(self,series,is_test_series,config=None):64 """65 Run series sequentially through the functions 66 in the transformations list67 """68 transformations = self.transformations69 if is_test_series:70 transformations = self.test_transformations71 72 for transformation in transformations:73 if config is not None:74 series = transformation(series,config) 75 else:76 series = transformation(series,self.config)77 return series78 def combine(self, lib):79 """80 Manage all attributes of class that are important for combinations. 81 Take care not to allow duplicates.82 """83 if lib.trends != []:84 assert self.trends == []85 self.trends = lib.trends86 if lib.non_trends != []: 87 assert self.non_trends == []88 self.non_trends = lib.non_trends89def add_one(series, config):90 """ Add a count of 1 to every count in the series """91 return [ ct+1 for ct in series ]92def unit_normalization(series, config):93 """ Do unit normalization based on "reference_length" number of bins94 at the end of the series"""95 reference_length = int(config["reference_length"])96 SMALL_NUMBER = 0.0000197 offset = int(config["baseline_offset"])98 lower_idx = -(int(config["reference_length"]) + offset)99 upper_idx = -offset100 total = sum(series[lower_idx:upper_idx])/float(reference_length)101 if total == 0:102 total = SMALL_NUMBER103 return [float(pt)/total for pt in series]104def spike_normalization(series, config):105 alpha = float(config["alpha"])106 new_series = []107 prev_pt = 0108 for pt in series: 109 if pt == 0:110 new_pt = 0111 else:112 new_pt = math.pow(abs(pt - prev_pt), alpha)113 new_series.append(new_pt)114 prev_pt = pt115 return new_series116def smoothing(series,config): 117 n_smooth = int(config["n_smooth"])118 queue = collections.deque()119 new_series = []120 for pt in series:121 queue.append(pt)122 new_series.append( float(sum(queue))/len(queue) )123 if len(queue) >= n_smooth:124 queue.popleft()125 return new_series126def slow_smoothing(series,config): 127 n_smooth = int(config["n_smooth"])128 queue = []129 new_series = []130 for pt in series:131 queue.append(pt)132 new_series.append( float(sum(queue))/len(queue) )133 if len(queue) >= n_smooth:134 del queue[0]135 return new_series136def index_smoothing(series,config): 137 n_smooth = int(config["n_smooth"])138 new_series = []139 idx = 1140 while idx < len(series):141 lower_idx = max(0,idx-n_smooth)142 sub_series = series[lower_idx:idx]143 new_series.append( float(sum(sub_series))/len(sub_series) )144 idx+=1145 return new_series146def logarithmic_scaling(series, config): 147 new_series = []148 149 for pt in series:150 if pt <= 0:151 pt = 0.00001152 new_series.append(math.log10(pt))153 return new_series154def sizing(series, config): 155 new_series = series[-int(config["reference_length"]):]156 return new_series157def save_library(library, file_name):158 pickle.dump(library,open(file_name,"w"))159def load_library(file_name):160 try:161 return pickle.load(open(file_name)) 162 except EOFError:163 return Library()164def merge_library(library, file_name): 165 """166 if file exists, get Library object from it,167 and combine with library passed to function168 """169 if os.path.exists( os.path.join(os.getcwd(),file_name) ):170 lib_from_file = load_library(file_name)171 library.combine(lib_from_file)172 return library173if __name__ == "__main__":174 import sys175 import argparse176 parser = argparse.ArgumentParser() 177 parser.add_argument("-t",dest="is_trend",default=False,action="store_true")178 parser.add_argument("-f",dest="lib_file_name",default="library.pkl")179 args = parser.parse_args()180 series = []181 for ct in sys.stdin: 182 series.append(ct)183 lib = Library()184 lib.add_reference_series(series,trend = args.is_trend)185 merge_library(lib,args.lib_file_name)...

Full Screen

Full Screen

torchvision_dataset.py

Source:torchvision_dataset.py Github

copy

Full Screen

1import torch2from torchvision import datasets, transforms, models3from collections.abc import Iterable4# this shit seems to be required to download5from six.moves import urllib6opener = urllib.request.build_opener()7opener.addheaders = [('User-agent', 'Mozilla/5.0')]8urllib.request.install_opener(opener)9def get_datasets(name, batch_size_train=256, batch_size_test=1024,10 num_workers=2, pin_memory=True, transformation_kwargs=None):11 # TODO: validation12 dataset = getattr(datasets, name)13 if transformation_kwargs is None:14 transformation_kwargs = {}15 train_transform, test_transform = get_transformations(**transformation_kwargs)16 train_dataset = dataset(f'{name.lower()}_data', train=True, download=True,17 transform=train_transform)18 test_dataset = dataset(f'{name.lower()}_data', train=False, download=False,19 transform=test_transform)20 return train_dataset, test_dataset21def get_transformations(flip=False, crop=False, crop_size=32, crop_padding=4, normalize=None):22 train_transformations = []23 test_transformations = []24 if flip:25 train_transformations.append(transforms.RandomHorizontalFlip())26 if crop:27 train_transformations.append(transforms.RandomCrop(crop_size, padding=crop_padding))28 # to tensor29 train_transformations.append(transforms.ToTensor())30 test_transformations.append(transforms.ToTensor())31 if normalize == 'cifar':32 train_transformations.append(transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)))33 test_transformations.append(transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)))34 elif isinstance(normalize, Iterable):35 train_transformations.append(transforms.Normalize(normalize[0], normalize[1]))36 test_transformations.append(transforms.Normalize(normalize[0], normalize[1]))37 train_transform = transforms.Compose(train_transformations)38 test_transform = transforms.Compose(test_transformations)...

Full Screen

Full Screen

cases_transformations_called.py

Source:cases_transformations_called.py Github

copy

Full Screen

1from pytest_cases import case2@case(id="squash_columns called")3def case_squash():4 test_transformations = {5 "squash_columns": [6 {7 "original_columns": ["Dep Adrs1", "Dep Adrs2", "Dep Adrs3"],8 "aggregate_col": "address_lines",9 }10 ],11 }12 log_message = "mock squash_columns"13 return (test_transformations, log_message)14@case(id="convert_to_bool called")15def case_bool():16 test_transformations = {17 "convert_to_bool": [18 {19 "original_columns": ["Dep Adrs1", "Dep Adrs2", "Dep Adrs3"],20 "aggregate_col": "address_lines",21 }22 ],23 }24 log_message = "mock convert_to_bool"25 return (test_transformations, log_message)26@case(id="unique_number called")27def case_unique():28 test_transformations = {29 "unique_number": [30 {31 "original_columns": ["Dep Adrs1", "Dep Adrs2", "Dep Adrs3"],32 "aggregate_col": "address_lines",33 }34 ],35 }36 log_message = "mock unique_number"...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful