Best Python code snippet using autotest_python
Setup.py
Source:Setup.py
1import warnings2import numpy as np3import json4import os5import datetime6import glob7import re8import ntpath9import shutil10from keras.models import load_model11class Setup(object):12 def __init__(self, name):13 self._name = name14 self._model = None15 self._emptyModel = None16 self._training_ids = None17 self._validation_ids = None18 self._testing_ids = None19 self._training_data = None20 self._validation_data = None21 self._testing_data = None22 self._training_targets = None23 self._validation_targets = None24 self._testing_targets = None25 self._training_ids_directory = None26 self._validation_ids_directory = None27 self._testing_ids_directory = None28 self._training_data_directory = None29 self._validation_data_directory = None30 self._testing_data_directory = None31 self._training_targets_directory = None32 self._validation_targets_directory = None33 self._testing_targets_directory = None34 self._training_accuracy = []35 self._training_auc = []36 self._training_loss = []37 self._validation_accuracy = []38 self._validation_auc = []39 self._validation_loss = []40 self._testing_accuracy = []41 self._testing_auc = []42 self._testing_loss = []43 self._batch_size = None44 self._epochs = 045 self._others = {}46 self._setup = {47 'name': self._name,48 'file': {49 'setup': '',50 'model': '',51 'model_arch_json': '',52 'model_arch_yaml': '',53 'model_weights': '',54 'training_ids': '',55 'validation_ids': '',56 'testing_ids': '',57 'training_data': '',58 'validation_data': '',59 'testing_data': '',60 'training_targets': '',61 'validation_targets': '',62 'testing_targets': '',63 },64 'directory': {65 'training_ids': '',66 'validation_ids': '',67 'testing_ids': '',68 'training_data': '',69 'validation_data': '',70 'testing_data': '',71 'training_targets': '',72 'validation_targets': '',73 'testing_targets': '',74 },75 'training_accuracy': self._training_accuracy,76 'training_accuracy': self._training_auc,77 'training_loss': self._training_loss,78 'validation_accuracy': self._validation_accuracy,79 'validation_accuracy': self._validation_auc,80 'validation_loss': self._validation_loss,81 'testing_accuracy': self._testing_accuracy,82 'testing_accuracy': self._testing_auc,83 'testing_loss': self._testing_loss,84 'epochs': self._epochs,85 'others': self._others,86 }87 def getName(self):88 return self._name89 def setName(self, name):90 self._name = name91 def getModel(self):92 return self._model93 def setModel(self, model):94 self._model = model95 # TODO: get the empty model and assign it to self._emptyModel96 def getData(self):97 return self._training_ids, self._training_data, self._training_targets, \98 self._validation_ids, self._validation_data, self._validation_targets, \99 self._testing_ids, self._testing_data, self._testing_targets100 def setData(self,101 training_ids=None, training_data=None, training_targets=None,102 validation_ids=None, validation_data=None, validation_targets=None,103 testing_ids=None, testing_data=None, testing_targets=None):104 self._training_ids = training_ids if training_ids is not None else self._training_ids105 self._validation_ids = validation_ids if validation_ids is not None else self._validation_ids106 self._testing_ids = testing_ids if testing_ids is not None else self._testing_ids107 self._training_data = training_data if training_data is not None else self._training_data108 self._validation_data = validation_data if validation_data is not None else self._validation_data109 self._testing_data = testing_data if testing_data is not None else self._testing_data110 self._training_targets = training_targets if training_targets is not None else self._training_targets111 self._validation_targets = validation_targets if validation_targets is not None else self._validation_targets112 self._testing_targets = testing_targets if testing_targets is not None else self._testing_targets113 def getDataDirectory(self):114 return self._training_data_directory, self._training_data_directory, self._training_targets_directory, \115 self._validation_data_directory, self._validation_data_directory, self._validation_targets_directory, \116 self._testing_data_directory, self._testing_data_directory, self._testing_targets_directory117 def setDataDirectory(self,118 training_ids_directory=None, training_data_directory=None, training_targets_directory=None,119 validation_ids_directory=None, validation_data_directory=None, validation_targets_directory=None,120 testing_ids_directory=None, testing_data_directory=None, testing_targets_directory=None):121 self._training_ids_directory = training_ids_directory if training_ids_directory is not None else self._training_ids_directory122 self._validation_ids_directory = validation_ids_directory if validation_ids_directory is not None else self._validation_ids_directory123 self._testing_ids_directory = testing_ids_directory if testing_ids_directory is not None else self._testing_ids_directory124 self._training_data_directory = training_data_directory if training_data_directory is not None else self._training_data_directory125 self._validation_data_directory = validation_data_directory if validation_data_directory is not None else self._validation_data_directory126 self._testing_data_directory = testing_data_directory if testing_data_directory is not None else self._testing_data_directory127 self._training_targets_directory = training_targets_directory if training_targets_directory is not None else self._training_targets_directory128 self._validation_targets_directory = validation_targets_directory if validation_targets_directory is not None else self._validation_targets_directory129 self._testing_targets_directory = testing_targets_directory if testing_targets_directory is not None else self._testing_targets_directory130 def getEpoch(self):131 return self._epochs132 def updateEpochs(self, add_epochs,133 training_acc, training_auc, training_loss,134 validation_acc, validation_auc, validation_loss,135 testing_acc, testing_auc, testing_loss,136 allow_modify=True):137 # TODO: check138 def checkListLength(length, mList, listName, allowModify):139 modifiedList = mList140 if allow_modify:141 if len(modifiedList) < length:142 modifiedList.extend([mList[-1] for i in range(length - len(modifiedList))])143 elif len(modifiedList) > length:144 warnings.warn('%s list is longer than add_epochs. Trimmed list will be used.' % listName)145 modifiedList = modifiedList[:length]146 else:147 if len(modifiedList) != length:148 raise ValueError('%s list length is not equal to add_epochs' % listName)149 return modifiedList150 # Checking parameters151 # add_epochs152 if add_epochs is None or type(add_epochs) != int:153 raise TypeError('add_epochs should have type \'int\'')154 elif add_epochs < 0:155 raise ValueError('add_epochs should be > 0')156 if training_acc is None or type(training_acc) != list or \157 training_auc is None or type(training_auc) != list or \158 training_loss is None or type(training_loss) != list or \159 validation_acc is None or type(validation_acc) != list or \160 validation_auc is None or type(validation_auc) != list or \161 validation_loss is None or type(validation_loss) != list or \162 testing_acc is None or type(testing_acc) != list or \163 testing_auc is None or type(testing_auc) != list or \164 testing_loss is None or type(testing_loss) != list:165 raise TypeError('training_acc, training_auc, training_loss, '166 'validation_acc, validation_auc, validation_loss, '167 'testing_acc, testing_auc, testing_loss should have type \'list\'')168 new_train_acc = checkListLength(add_epochs, training_acc, 'training_acc', allow_modify)169 new_train_auc = checkListLength(add_epochs, training_auc, 'training_auc', allow_modify)170 new_train_loss = checkListLength(add_epochs, training_loss, 'training_loss', allow_modify)171 new_val_acc = checkListLength(add_epochs, validation_acc, 'validation_acc', allow_modify)172 new_val_auc = checkListLength(add_epochs, validation_auc, 'validation_auc', allow_modify)173 new_val_loss = checkListLength(add_epochs, validation_loss, 'validation_loss', allow_modify)174 new_test_acc = checkListLength(add_epochs, testing_acc, 'testing_acc', allow_modify)175 new_test_auc = checkListLength(add_epochs, testing_auc, 'testing_auc', allow_modify)176 new_test_loss = checkListLength(add_epochs, testing_loss, 'testing_loss', allow_modify)177 self._epochs += add_epochs178 self._training_accuracy.extend(new_train_acc)179 self._training_auc.extend(new_train_auc)180 self._training_loss.extend(new_train_loss)181 self._validation_accuracy.extend(new_val_acc)182 self._validation_auc.extend(new_val_auc)183 self._validation_loss.extend(new_val_loss)184 self._testing_accuracy.extend(new_test_acc)185 self._testing_auc.extend(new_test_auc)186 self._testing_loss.extend(new_test_loss)187 def getOthers(self):188 return self._others189 def setOthers(self, others):190 for key in others:191 self._others[key] = others[key]192 def save(self, rel_path):193 # Save every information or object194 if rel_path is None:195 raise ValueError('rel_path should not be None')196 else:197 pass198 self._setup['name'] = self._name199 self._setup['time'] = str(datetime.datetime.now()),200 self._setup['file']['setup'] = os.path.join('setup.json')201 self._setup['file']['model'] = os.path.join('model.h5')202 self._setup['file']['model_arch_json'] = os.path.join('model_architecture.json')203 self._setup['file']['model_arch_yaml'] = os.path.join('model_architecture.yaml')204 self._setup['file']['model_weights'] = os.path.join('model_weights.h5')205 self._setup['file']['training_ids'] = os.path.join('training_ids.npy')206 self._setup['file']['validation_ids'] = os.path.join('validation_ids.npy')207 self._setup['file']['testing_ids'] = os.path.join('testing_ids.npy')208 self._setup['file']['training_data'] = os.path.join('training_data.npy')209 self._setup['file']['validation_data'] = os.path.join('validation_data.npy')210 self._setup['file']['testing_data'] = os.path.join('testing_data.npy')211 self._setup['file']['training_targets'] = os.path.join('training_targets.npy')212 self._setup['file']['validation_targets'] = os.path.join('validation_targets.npy')213 self._setup['file']['testing_targets'] = os.path.join('testing_targets.npy')214 self._setup['directory']['training_ids'] = self._training_ids_directory215 self._setup['directory']['validation_ids'] = self._validation_ids_directory216 self._setup['directory']['testing_ids'] = self._testing_ids_directory217 self._setup['directory']['training_data'] = self._training_data_directory218 self._setup['directory']['validation_data'] = self._validation_data_directory219 self._setup['directory']['testing_data'] = self._testing_data_directory220 self._setup['directory']['training_targets'] = self._training_targets_directory221 self._setup['directory']['validation_targets'] = self._validation_targets_directory222 self._setup['directory']['testing_targets'] = self._testing_targets_directory223 self._setup['training_accuracy'] = self._training_accuracy224 self._setup['training_auc'] = self._training_auc225 self._setup['training_loss'] = self._training_loss226 self._setup['validation_accuracy'] = self._validation_accuracy227 self._setup['validation_auc'] = self._validation_auc228 self._setup['validation_loss'] = self._validation_loss229 self._setup['testing_accuracy'] = self._testing_accuracy230 self._setup['testing_auc'] = self._testing_auc231 self._setup['testing_loss'] = self._testing_loss232 self._setup['epochs'] = self._epochs233 self._setup['others'] = self._others234 if not os.path.exists(os.path.join(os.getcwd(), rel_path)):235 os.mkdir(os.path.join(os.getcwd(), rel_path))236 if not os.path.exists(os.path.join(os.getcwd(), rel_path, self._name)):237 os.mkdir(os.path.join(os.getcwd(), rel_path, self._name))238 if len(glob.glob(os.path.join(os.getcwd(), rel_path, self._name, '*.*'))) > 0:239 versions = []240 pattern = r'^.*version(?P<versionnumber>\d*)$'241 for dir in glob.glob(os.path.join(os.getcwd(), rel_path, self._name, 'version*')):242 regex = re.search(pattern, dir)243 versions.append(int(regex.group('versionnumber')))244 if len(versions) == 0:245 maxVer = 0246 else:247 maxVer = np.max(versions)248 newVerDirName = 'version%s' % (maxVer + 1)249 os.mkdir(os.path.join(os.getcwd(), rel_path, self._name, newVerDirName))250 self._backup_version(os.path.join(os.getcwd(), rel_path, self._name),251 os.path.join(os.getcwd(), rel_path, self._name, newVerDirName))252 if (maxVer + 1 - 10) > 0 and (maxVer + 1 - 10) % 20 != 0:253 oldVerDirName = 'version%s' % (maxVer + 1 - 10)254 shutil.rmtree(os.path.join(os.getcwd(), rel_path, self._name, oldVerDirName))255 # ==========================================256 # Save whole model257 self._model.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['model']))258 # ==========================================259 # Save model architecture260 json_model_arch = self._model.to_json()261 with open(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['model_arch_json']), 'w') as jsonfile:262 jsonfile.write(json_model_arch)263 yaml_model_arch = self._model.to_yaml()264 with open(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['model_arch_yaml']), 'w') as yamlfile:265 yamlfile.write(yaml_model_arch)266 # ==========================================267 # Save model weights268 self._model.save_weights(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['model_weights']))269 # ==========================================270 # Save data271 if self._training_ids is not None and type(self._training_ids) == np.ndarray:272 try:273 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['training_ids']), self._training_ids)274 except Exception as e:275 self._setup['file']['training_ids'] = None276 if self._validation_ids is not None and type(self._validation_ids) == np.ndarray:277 try:278 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['validation_ids']), self._validation_ids)279 except Exception as e:280 self._setup['file']['validation_ids'] = None281 if self._testing_ids is not None and type(self._testing_ids) == np.ndarray:282 try:283 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['testing_ids']), self._testing_ids)284 except Exception as e:285 self._setup['file']['testing_ids'] = None286 if self._training_data is not None and type(self._training_data) == np.ndarray:287 try:288 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['training_data']), self._training_data)289 except Exception as e:290 self._setup['file']['training_data'] = None291 if self._validation_data is not None and type(self._validation_data) == np.ndarray:292 try:293 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['validation_data']), self._validation_data)294 except Exception as e:295 self._setup['file']['validation_data'] = None296 if self._testing_data is not None and type(self._testing_data) == np.ndarray:297 try:298 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['testing_data']), self._testing_data)299 except Exception as e:300 self._setup['file']['testing_data'] = None301 if self._training_targets is not None and type(self._training_targets) == np.ndarray:302 try:303 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['training_targets']), self._training_targets)304 except Exception as e:305 self._setup['file']['training_targets'] = None306 if self._validation_targets is not None and type(self._validation_targets) == np.ndarray:307 try:308 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['validation_targets']), self._validation_targets)309 except Exception as e:310 self._setup['file']['validation_targets'] = None311 if self._testing_targets is not None and type(self._testing_targets) == np.ndarray:312 try:313 np.save(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['testing_targets']), self._testing_targets)314 except Exception as e:315 self._setup['file']['testing_targets'] = None316 # ==========================================317 # Save setup318 with open(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['setup']), 'w') as setupfile:319 json.dump(self._setup, setupfile)320 def load(self, rel_filepath):321 cwd = os.getcwd()322 if rel_filepath is None:323 raise ValueError('rel_filepath should be None')324 else:325 pass326 # ==========================================327 # Load info328 with open(os.path.join(cwd, rel_filepath), 'r') as setupfile:329 self._setup = json.load(setupfile)330 rel_filepath = rel_filepath.replace('setup.json', '')331 # ==========================================332 # Load name333 self._name = self._setup['name']334 # ==========================================335 # Load whole model336 self._model = load_model(os.path.join(cwd, rel_filepath, self._setup['file']['model']))337 # TODO: if loading from model h5 file fails, then load from model arch file and load weights338 # # ==========================================339 # # Load model architecture340 # with open(os.path.join(directory, self.setup['model_arch_json']), 'r') as jsonfile:341 # self.emptyModel = model_from_json(jsonfile.read())342 #343 # with open(os.path.join(directory, self.setup['model_arch_yaml']), 'r') as yamlfile:344 # self.emptyModel = model_from_yaml(yamlfile.read())345 # # ==========================================346 # # Load model weights347 # self.model.load_weights(os.path.join(directory, self.setup['model_weights']))348 # ==========================================349 # Load data350 self._training_ids = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['training_ids'])) \351 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['training_ids'])) else self._training_ids352 self._validation_ids = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['validation_ids'])) \353 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['validation_ids'])) else self._validation_ids354 self._testing_ids = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['testing_ids'])) \355 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['testing_ids'])) else self._testing_ids356 self._training_data = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['training_data'])) \357 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['training_data'])) else self._training_data358 self._validation_data = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['validation_data'])) \359 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['validation_data'])) else self._validation_data360 self._testing_data = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['testing_data'])) \361 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['testing_data'])) else self._testing_data362 self._training_targets = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['training_targets'])) \363 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['training_targets'])) else self._training_targets364 self._validation_targets = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['validation_targets'])) \365 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['validation_targets'])) else self._validation_targets366 self._testing_targets = np.load(os.path.join(cwd, rel_filepath, self._setup['file']['testing_targets'])) \367 if os.path.exists(os.path.join(cwd, rel_filepath, self._setup['file']['testing_targets'])) else self._testing_targets368 # ==========================================369 # Load data directory370 self._training_data_directory = self._setup['directory']['training_data']371 self._validation_data_directory = self._setup['directory']['validation_data']372 self._testing_data_directory = self._setup['directory']['testing_data']373 self._training_targets_directory = self._setup['directory']['training_targets']374 self._validation_targets_directory = self._setup['directory']['validation_targets']375 self._testing_targets_directory = self._setup['directory']['testing_targets']376 # ==========================================377 # Load info378 self._training_accuracy = self._setup['training_accuracy']379 self._training_auc = self._setup['training_auc']380 self._training_loss = self._setup['training_loss']381 self._validation_accuracy = self._setup['validation_accuracy']382 self._validation_auc = self._setup['validation_auc']383 self._validation_loss = self._setup['validation_loss']384 self._testing_accuracy = self._setup['testing_accuracy']385 self._testing_auc = self._setup['testing_auc']386 self._testing_loss = self._setup['testing_loss']387 self._epochs = self._setup['epochs']388 self._others = self._setup['others']389 def save_setupfile(self, rel_path):390 with open(os.path.join(os.getcwd(), rel_path, self._name, self._setup['file']['setup']), 'w') as setupfile:391 json.dump(self._setup, setupfile)392 def _backup_version(self, source, destination):393 ignore_list = ['training_data',394 'validation_data',395 'testing_data',396 'training_targets',397 'validation_targets',398 'testing_targets']399 for file in glob.glob(os.path.join(source, '*.*')):400 ignore = False401 for ignored_filename in ignore_list:402 if ignored_filename in file:403 ignore = True404 if ignore:405 continue406 else:...
test_scales.py
Source:test_scales.py
...25 @pytest.fixture26 def x(self):27 return pd.Series([1, 3, 9], name="x", dtype=float)28 def setup_ticks(self, x, *args, **kwargs):29 s = Continuous().tick(*args, **kwargs)._setup(x, Coordinate())30 a = PseudoAxis(s._matplotlib_scale)31 a.set_view_interval(0, 1)32 return a33 def setup_labels(self, x, *args, **kwargs):34 s = Continuous().label(*args, **kwargs)._setup(x, Coordinate())35 a = PseudoAxis(s._matplotlib_scale)36 a.set_view_interval(0, 1)37 locs = a.major.locator()38 return a, locs39 def test_coordinate_defaults(self, x):40 s = Continuous()._setup(x, Coordinate())41 assert_series_equal(s(x), x)42 def test_coordinate_transform(self, x):43 s = Continuous(trans="log")._setup(x, Coordinate())44 assert_series_equal(s(x), np.log10(x))45 def test_coordinate_transform_with_parameter(self, x):46 s = Continuous(trans="pow3")._setup(x, Coordinate())47 assert_series_equal(s(x), np.power(x, 3))48 def test_coordinate_transform_error(self, x):49 s = Continuous(trans="bad")50 with pytest.raises(ValueError, match="Unknown value provided"):51 s._setup(x, Coordinate())52 def test_interval_defaults(self, x):53 s = Continuous()._setup(x, IntervalProperty())54 assert_array_equal(s(x), [0, .25, 1])55 def test_interval_with_range(self, x):56 s = Continuous((1, 3))._setup(x, IntervalProperty())57 assert_array_equal(s(x), [1, 1.5, 3])58 def test_interval_with_norm(self, x):59 s = Continuous(norm=(3, 7))._setup(x, IntervalProperty())60 assert_array_equal(s(x), [-.5, 0, 1.5])61 def test_interval_with_range_norm_and_transform(self, x):62 x = pd.Series([1, 10, 100])63 # TODO param order?64 s = Continuous((2, 3), (10, 100), "log")._setup(x, IntervalProperty())65 assert_array_equal(s(x), [1, 2, 3])66 def test_color_defaults(self, x):67 cmap = color_palette("ch:", as_cmap=True)68 s = Continuous()._setup(x, Color())69 assert_array_equal(s(x), cmap([0, .25, 1])[:, :3]) # FIXME RGBA70 def test_color_named_values(self, x):71 cmap = color_palette("viridis", as_cmap=True)72 s = Continuous("viridis")._setup(x, Color())73 assert_array_equal(s(x), cmap([0, .25, 1])[:, :3]) # FIXME RGBA74 def test_color_tuple_values(self, x):75 cmap = color_palette("blend:b,g", as_cmap=True)76 s = Continuous(("b", "g"))._setup(x, Color())77 assert_array_equal(s(x), cmap([0, .25, 1])[:, :3]) # FIXME RGBA78 def test_color_callable_values(self, x):79 cmap = color_palette("light:r", as_cmap=True)80 s = Continuous(cmap)._setup(x, Color())81 assert_array_equal(s(x), cmap([0, .25, 1])[:, :3]) # FIXME RGBA82 def test_color_with_norm(self, x):83 cmap = color_palette("ch:", as_cmap=True)84 s = Continuous(norm=(3, 7))._setup(x, Color())85 assert_array_equal(s(x), cmap([-.5, 0, 1.5])[:, :3]) # FIXME RGBA86 def test_color_with_transform(self, x):87 x = pd.Series([1, 10, 100], name="x", dtype=float)88 cmap = color_palette("ch:", as_cmap=True)89 s = Continuous(trans="log")._setup(x, Color())90 assert_array_equal(s(x), cmap([0, .5, 1])[:, :3]) # FIXME RGBA91 def test_tick_locator(self, x):92 locs = [.2, .5, .8]93 locator = mpl.ticker.FixedLocator(locs)94 a = self.setup_ticks(x, locator)95 assert_array_equal(a.major.locator(), locs)96 def test_tick_locator_input_check(self, x):97 err = "Tick locator must be an instance of .*?, not <class 'tuple'>."98 with pytest.raises(TypeError, match=err):99 Continuous().tick((1, 2))100 def test_tick_upto(self, x):101 for n in [2, 5, 10]:102 a = self.setup_ticks(x, upto=n)103 assert len(a.major.locator()) <= (n + 1)104 def test_tick_every(self, x):105 for d in [.05, .2, .5]:106 a = self.setup_ticks(x, every=d)107 assert np.allclose(np.diff(a.major.locator()), d)108 def test_tick_every_between(self, x):109 lo, hi = .2, .8110 for d in [.05, .2, .5]:111 a = self.setup_ticks(x, every=d, between=(lo, hi))112 expected = np.arange(lo, hi + d, d)113 assert_array_equal(a.major.locator(), expected)114 def test_tick_at(self, x):115 locs = [.2, .5, .9]116 a = self.setup_ticks(x, at=locs)117 assert_array_equal(a.major.locator(), locs)118 def test_tick_count(self, x):119 n = 8120 a = self.setup_ticks(x, count=n)121 assert_array_equal(a.major.locator(), np.linspace(0, 1, n))122 def test_tick_count_between(self, x):123 n = 5124 lo, hi = .2, .7125 a = self.setup_ticks(x, count=n, between=(lo, hi))126 assert_array_equal(a.major.locator(), np.linspace(lo, hi, n))127 def test_tick_minor(self, x):128 n = 3129 a = self.setup_ticks(x, count=2, minor=n)130 # I am not sure why matplotlib's minor ticks include the131 # largest major location but exclude the smalllest one ...132 expected = np.linspace(0, 1, n + 2)[1:]133 assert_array_equal(a.minor.locator(), expected)134 def test_log_tick_default(self, x):135 s = Continuous(trans="log")._setup(x, Coordinate())136 a = PseudoAxis(s._matplotlib_scale)137 a.set_view_interval(.5, 1050)138 ticks = a.major.locator()139 assert np.allclose(np.diff(np.log10(ticks)), 1)140 def test_log_tick_upto(self, x):141 n = 3142 s = Continuous(trans="log").tick(upto=n)._setup(x, Coordinate())143 a = PseudoAxis(s._matplotlib_scale)144 assert a.major.locator.numticks == n145 def test_log_tick_count(self, x):146 with pytest.raises(RuntimeError, match="`count` requires"):147 Continuous(trans="log").tick(count=4)148 s = Continuous(trans="log").tick(count=4, between=(1, 1000))149 a = PseudoAxis(s._setup(x, Coordinate())._matplotlib_scale)150 a.set_view_interval(.5, 1050)151 assert_array_equal(a.major.locator(), [1, 10, 100, 1000])152 def test_log_tick_every(self, x):153 with pytest.raises(RuntimeError, match="`every` not supported"):154 Continuous(trans="log").tick(every=2)155 def test_symlog_tick_default(self, x):156 s = Continuous(trans="symlog")._setup(x, Coordinate())157 a = PseudoAxis(s._matplotlib_scale)158 a.set_view_interval(-1050, 1050)159 ticks = a.major.locator()160 assert ticks[0] == -ticks[-1]161 pos_ticks = np.sort(np.unique(np.abs(ticks)))162 assert np.allclose(np.diff(np.log10(pos_ticks[1:])), 1)163 assert pos_ticks[0] == 0164 def test_label_formatter(self, x):165 fmt = mpl.ticker.FormatStrFormatter("%.3f")166 a, locs = self.setup_labels(x, fmt)167 labels = a.major.formatter.format_ticks(locs)168 for text in labels:169 assert re.match(r"^\d\.\d{3}$", text)170 def test_label_like_pattern(self, x):171 a, locs = self.setup_labels(x, like=".4f")172 labels = a.major.formatter.format_ticks(locs)173 for text in labels:174 assert re.match(r"^\d\.\d{4}$", text)175 def test_label_like_string(self, x):176 a, locs = self.setup_labels(x, like="x = {x:.1f}")177 labels = a.major.formatter.format_ticks(locs)178 for text in labels:179 assert re.match(r"^x = \d\.\d$", text)180 def test_label_like_function(self, x):181 a, locs = self.setup_labels(x, like="{:^5.1f}".format)182 labels = a.major.formatter.format_ticks(locs)183 for text in labels:184 assert re.match(r"^ \d\.\d $", text)185 def test_label_base(self, x):186 a, locs = self.setup_labels(100 * x, base=2)187 labels = a.major.formatter.format_ticks(locs)188 for text in labels[1:]:189 assert not text or "2^" in text190 def test_label_unit(self, x):191 a, locs = self.setup_labels(1000 * x, unit="g")192 labels = a.major.formatter.format_ticks(locs)193 for text in labels[1:-1]:194 assert re.match(r"^\d+ mg$", text)195 def test_label_unit_with_sep(self, x):196 a, locs = self.setup_labels(1000 * x, unit=("", "g"))197 labels = a.major.formatter.format_ticks(locs)198 for text in labels[1:-1]:199 assert re.match(r"^\d+mg$", text)200 def test_label_empty_unit(self, x):201 a, locs = self.setup_labels(1000 * x, unit="")202 labels = a.major.formatter.format_ticks(locs)203 for text in labels[1:-1]:204 assert re.match(r"^\d+m$", text)205 def test_label_base_from_transform(self, x):206 s = Continuous(trans="log")207 a = PseudoAxis(s._setup(x, Coordinate())._matplotlib_scale)208 a.set_view_interval(10, 1000)209 label, = a.major.formatter.format_ticks([100])210 assert r"10^{2}" in label211 def test_label_type_checks(self):212 s = Continuous()213 with pytest.raises(TypeError, match="Label formatter must be"):214 s.label("{x}")215 with pytest.raises(TypeError, match="`like` must be"):216 s.label(like=2)217class TestNominal:218 @pytest.fixture219 def x(self):220 return pd.Series(["a", "c", "b", "c"], name="x")221 @pytest.fixture222 def y(self):223 return pd.Series([1, -1.5, 3, -1.5], name="y")224 def test_coordinate_defaults(self, x):225 s = Nominal()._setup(x, Coordinate())226 assert_array_equal(s(x), np.array([0, 1, 2, 1], float))227 def test_coordinate_with_order(self, x):228 s = Nominal(order=["a", "b", "c"])._setup(x, Coordinate())229 assert_array_equal(s(x), np.array([0, 2, 1, 2], float))230 def test_coordinate_with_subset_order(self, x):231 s = Nominal(order=["c", "a"])._setup(x, Coordinate())232 assert_array_equal(s(x), np.array([1, 0, np.nan, 0], float))233 def test_coordinate_axis(self, x):234 ax = mpl.figure.Figure().subplots()235 s = Nominal()._setup(x, Coordinate(), ax.xaxis)236 assert_array_equal(s(x), np.array([0, 1, 2, 1], float))237 f = ax.xaxis.get_major_formatter()238 assert f.format_ticks([0, 1, 2]) == ["a", "c", "b"]239 def test_coordinate_axis_with_order(self, x):240 order = ["a", "b", "c"]241 ax = mpl.figure.Figure().subplots()242 s = Nominal(order=order)._setup(x, Coordinate(), ax.xaxis)243 assert_array_equal(s(x), np.array([0, 2, 1, 2], float))244 f = ax.xaxis.get_major_formatter()245 assert f.format_ticks([0, 1, 2]) == order246 def test_coordinate_axis_with_subset_order(self, x):247 order = ["c", "a"]248 ax = mpl.figure.Figure().subplots()249 s = Nominal(order=order)._setup(x, Coordinate(), ax.xaxis)250 assert_array_equal(s(x), np.array([1, 0, np.nan, 0], float))251 f = ax.xaxis.get_major_formatter()252 assert f.format_ticks([0, 1, 2]) == [*order, ""]253 def test_coordinate_axis_with_category_dtype(self, x):254 order = ["b", "a", "d", "c"]255 x = x.astype(pd.CategoricalDtype(order))256 ax = mpl.figure.Figure().subplots()257 s = Nominal()._setup(x, Coordinate(), ax.xaxis)258 assert_array_equal(s(x), np.array([1, 3, 0, 3], float))259 f = ax.xaxis.get_major_formatter()260 assert f.format_ticks([0, 1, 2, 3]) == order261 def test_coordinate_numeric_data(self, y):262 ax = mpl.figure.Figure().subplots()263 s = Nominal()._setup(y, Coordinate(), ax.yaxis)264 assert_array_equal(s(y), np.array([1, 0, 2, 0], float))265 f = ax.yaxis.get_major_formatter()266 assert f.format_ticks([0, 1, 2]) == ["-1.5", "1.0", "3.0"]267 def test_coordinate_numeric_data_with_order(self, y):268 order = [1, 4, -1.5]269 ax = mpl.figure.Figure().subplots()270 s = Nominal(order=order)._setup(y, Coordinate(), ax.yaxis)271 assert_array_equal(s(y), np.array([0, 2, np.nan, 2], float))272 f = ax.yaxis.get_major_formatter()273 assert f.format_ticks([0, 1, 2]) == ["1.0", "4.0", "-1.5"]274 def test_color_defaults(self, x):275 s = Nominal()._setup(x, Color())276 cs = color_palette()277 assert_array_equal(s(x), [cs[0], cs[1], cs[2], cs[1]])278 def test_color_named_palette(self, x):279 pal = "flare"280 s = Nominal(pal)._setup(x, Color())281 cs = color_palette(pal, 3)282 assert_array_equal(s(x), [cs[0], cs[1], cs[2], cs[1]])283 def test_color_list_palette(self, x):284 cs = color_palette("crest", 3)285 s = Nominal(cs)._setup(x, Color())286 assert_array_equal(s(x), [cs[0], cs[1], cs[2], cs[1]])287 def test_color_dict_palette(self, x):288 cs = color_palette("crest", 3)289 pal = dict(zip("bac", cs))290 s = Nominal(pal)._setup(x, Color())291 assert_array_equal(s(x), [cs[1], cs[2], cs[0], cs[2]])292 def test_color_numeric_data(self, y):293 s = Nominal()._setup(y, Color())294 cs = color_palette()295 assert_array_equal(s(y), [cs[1], cs[0], cs[2], cs[0]])296 def test_color_numeric_with_order_subset(self, y):297 s = Nominal(order=[-1.5, 1])._setup(y, Color())298 c1, c2 = color_palette(n_colors=2)299 null = (np.nan, np.nan, np.nan)300 assert_array_equal(s(y), [c2, c1, null, c1])301 @pytest.mark.xfail(reason="Need to sort out float/int order")302 def test_color_numeric_int_float_mix(self):303 z = pd.Series([1, 2], name="z")304 s = Nominal(order=[1.0, 2])._setup(z, Color())305 c1, c2 = color_palette(n_colors=2)306 null = (np.nan, np.nan, np.nan)307 assert_array_equal(s(z), [c1, null, c2])308 def test_color_alpha_in_palette(self, x):309 cs = [(.2, .2, .3, .5), (.1, .2, .3, 1), (.5, .6, .2, 0)]310 s = Nominal(cs)._setup(x, Color())311 assert_array_equal(s(x), [cs[0], cs[1], cs[2], cs[1]])312 def test_color_unknown_palette(self, x):313 pal = "not_a_palette"314 err = f"{pal} is not a valid palette name"315 with pytest.raises(ValueError, match=err):316 Nominal(pal)._setup(x, Color())317 def test_object_defaults(self, x):318 class MockProperty(ObjectProperty):319 def _default_values(self, n):320 return list("xyz"[:n])321 s = Nominal()._setup(x, MockProperty())322 assert s(x) == ["x", "y", "z", "y"]323 def test_object_list(self, x):324 vs = ["x", "y", "z"]325 s = Nominal(vs)._setup(x, ObjectProperty())326 assert s(x) == ["x", "y", "z", "y"]327 def test_object_dict(self, x):328 vs = {"a": "x", "b": "y", "c": "z"}329 s = Nominal(vs)._setup(x, ObjectProperty())330 assert s(x) == ["x", "z", "y", "z"]331 def test_object_order(self, x):332 vs = ["x", "y", "z"]333 s = Nominal(vs, order=["c", "a", "b"])._setup(x, ObjectProperty())334 assert s(x) == ["y", "x", "z", "x"]335 def test_object_order_subset(self, x):336 vs = ["x", "y"]337 s = Nominal(vs, order=["a", "c"])._setup(x, ObjectProperty())338 assert s(x) == ["x", "y", None, "y"]339 def test_objects_that_are_weird(self, x):340 vs = [("x", 1), (None, None, 0), {}]341 s = Nominal(vs)._setup(x, ObjectProperty())342 assert s(x) == [vs[0], vs[1], vs[2], vs[1]]343 def test_alpha_default(self, x):344 s = Nominal()._setup(x, Alpha())345 assert_array_equal(s(x), [.95, .625, .3, .625])346 def test_fill(self):347 x = pd.Series(["a", "a", "b", "a"], name="x")348 s = Nominal()._setup(x, Fill())349 assert_array_equal(s(x), [True, True, False, True])350 def test_fill_dict(self):351 x = pd.Series(["a", "a", "b", "a"], name="x")352 vs = {"a": False, "b": True}353 s = Nominal(vs)._setup(x, Fill())354 assert_array_equal(s(x), [False, False, True, False])355 def test_fill_nunique_warning(self):356 x = pd.Series(["a", "b", "c", "a", "b"], name="x")357 with pytest.warns(UserWarning, match="The variable assigned to fill"):358 s = Nominal()._setup(x, Fill())359 assert_array_equal(s(x), [True, False, True, True, False])360 def test_interval_defaults(self, x):361 class MockProperty(IntervalProperty):362 _default_range = (1, 2)363 s = Nominal()._setup(x, MockProperty())364 assert_array_equal(s(x), [2, 1.5, 1, 1.5])365 def test_interval_tuple(self, x):366 s = Nominal((1, 2))._setup(x, IntervalProperty())367 assert_array_equal(s(x), [2, 1.5, 1, 1.5])368 def test_interval_tuple_numeric(self, y):369 s = Nominal((1, 2))._setup(y, IntervalProperty())370 assert_array_equal(s(y), [1.5, 2, 1, 2])371 def test_interval_list(self, x):372 vs = [2, 5, 4]373 s = Nominal(vs)._setup(x, IntervalProperty())374 assert_array_equal(s(x), [2, 5, 4, 5])375 def test_interval_dict(self, x):376 vs = {"a": 3, "b": 4, "c": 6}377 s = Nominal(vs)._setup(x, IntervalProperty())378 assert_array_equal(s(x), [3, 6, 4, 6])379 def test_interval_with_transform(self, x):380 class MockProperty(IntervalProperty):381 _forward = np.square382 _inverse = np.sqrt383 s = Nominal((2, 4))._setup(x, MockProperty())384 assert_array_equal(s(x), [4, np.sqrt(10), 2, np.sqrt(10)])385class TestTemporal:386 @pytest.fixture387 def t(self):388 dates = pd.to_datetime(["1972-09-27", "1975-06-24", "1980-12-14"])389 return pd.Series(dates, name="x")390 @pytest.fixture391 def x(self, t):392 return pd.Series(mpl.dates.date2num(t), name=t.name)393 def test_coordinate_defaults(self, t, x):394 s = Temporal()._setup(t, Coordinate())395 assert_array_equal(s(t), x)396 def test_interval_defaults(self, t, x):397 s = Temporal()._setup(t, IntervalProperty())398 normed = (x - x.min()) / (x.max() - x.min())399 assert_array_equal(s(t), normed)400 def test_interval_with_range(self, t, x):401 values = (1, 3)402 s = Temporal((1, 3))._setup(t, IntervalProperty())403 normed = (x - x.min()) / (x.max() - x.min())404 expected = normed * (values[1] - values[0]) + values[0]405 assert_array_equal(s(t), expected)406 def test_interval_with_norm(self, t, x):407 norm = t[1], t[2]408 s = Temporal(norm=norm)._setup(t, IntervalProperty())409 n = mpl.dates.date2num(norm)410 normed = (x - n[0]) / (n[1] - n[0])411 assert_array_equal(s(t), normed)412 def test_color_defaults(self, t, x):413 cmap = color_palette("ch:", as_cmap=True)414 s = Temporal()._setup(t, Color())415 normed = (x - x.min()) / (x.max() - x.min())416 assert_array_equal(s(t), cmap(normed)[:, :3]) # FIXME RGBA417 def test_color_named_values(self, t, x):418 name = "viridis"419 cmap = color_palette(name, as_cmap=True)420 s = Temporal(name)._setup(t, Color())421 normed = (x - x.min()) / (x.max() - x.min())422 assert_array_equal(s(t), cmap(normed)[:, :3]) # FIXME RGBA423 def test_coordinate_axis(self, t, x):424 ax = mpl.figure.Figure().subplots()425 s = Temporal()._setup(t, Coordinate(), ax.xaxis)426 assert_array_equal(s(t), x)427 locator = ax.xaxis.get_major_locator()428 formatter = ax.xaxis.get_major_formatter()429 assert isinstance(locator, mpl.dates.AutoDateLocator)430 assert isinstance(formatter, mpl.dates.AutoDateFormatter)431 @pytest.mark.skipif(432 Version(mpl.__version__) < Version("3.3.0"),433 reason="Test requires new matplotlib date epoch."434 )435 def test_tick_locator(self, t):436 locator = mpl.dates.YearLocator(month=3, day=15)437 s = Temporal().tick(locator)438 a = PseudoAxis(s._setup(t, Coordinate())._matplotlib_scale)439 a.set_view_interval(0, 365)440 assert 73 in a.major.locator()441 def test_tick_upto(self, t, x):442 n = 8443 ax = mpl.figure.Figure().subplots()444 Temporal().tick(upto=n)._setup(t, Coordinate(), ax.xaxis)445 locator = ax.xaxis.get_major_locator()446 assert set(locator.maxticks.values()) == {n}447 @pytest.mark.skipif(448 Version(mpl.__version__) < Version("3.3.0"),449 reason="Test requires new matplotlib date epoch."450 )451 def test_label_formatter(self, t):452 formatter = mpl.dates.DateFormatter("%Y")453 s = Temporal().label(formatter)454 a = PseudoAxis(s._setup(t, Coordinate())._matplotlib_scale)455 a.set_view_interval(10, 1000)456 label, = a.major.formatter.format_ticks([100])457 assert label == "1970"458 def test_label_concise(self, t, x):459 ax = mpl.figure.Figure().subplots()460 Temporal().label(concise=True)._setup(t, Coordinate(), ax.xaxis)461 formatter = ax.xaxis.get_major_formatter()...
trace_setup.py
Source:trace_setup.py
1from enum import Enum2from pathlib import Path3from subprocess import CalledProcessError, check_output4import yaml5import re6import cxxfilt7class BinaryAlreadyAddedError(Exception):8 def __init__(self, message=''):9 super().__init__(message)10class BinaryNotExistsError(Exception):11 def __init__(self, message=''):12 super().__init__(message)13class ConfigFileError(Exception):14 def __init__(self, message=''):15 super().__init__(message)16class FunctionNotInBinaryError(Exception):17 def __init__(self, message=''):18 super().__init__(message)19class BuiltInNotExistsError(Exception):20 def __init__(self, message=''):21 super().__init__(message)22class Setup:23 def __init__(self):24 self._setup = {}25 # initialize app and its functions for tracing26 def initialize_binary(self, path):27 if path in self._setup:28 raise BinaryAlreadyAddedError('Binary at {} already added'.format(path))29 try:30 symbols = check_output(['nm', path]).decode().rstrip().split('\n')31 except CalledProcessError:32 raise BinaryNotExistsError33 functions = [symbol.split()[-1] for symbol in symbols]34 init_state = {}35 for function in functions:36 try:37 name = cxxfilt.demangle(function)38 except cxxfilt.InvalidName:39 name = function40 init_state[name] = {41 'mangled': function,42 'traced': False,43 'parameters': {},44 'offset': 045 }46 self._setup[path] = init_state47 # search offset after setting stack pointer for built-in function48 def get_offset_for_built_in(self, func_name):49 offset = 050 try:51 # search address of function in symbol map52 sym_address = check_output(53 ['sudo',54 'grep',55 '-A1',56 '-w',57 func_name,58 '/proc/kallsyms']59 ).decode().rstrip().split('\n')60 if len(sym_address) == 2:61 # address of given function62 start_address = -163 start_address_match = re.search(r'^([a-f0-9]+)', sym_address[0])64 if start_address_match:65 start_address = int(start_address_match.group(1), base=16)66 # address of following symbol67 stop_address = -168 stop_address_match = re.search(r'^([a-f0-9]+)', sym_address[1])69 if stop_address_match:70 stop_address = int(stop_address_match.group(1), base=16)71 if start_address >= 0 and stop_address >= 0:72 # get instructions of function73 objdump_out = check_output(74 ['sudo',75 'objdump',76 '--prefix-addresses',77 '-d',78 '--start-address=0x{:X}'.format(start_address),79 '--stop-address=0x{:X}'.format(stop_address),80 '/proc/kcore']81 ).decode().rstrip().split('\n')82 # search first instruction after function prologue83 first = False84 second = False85 for line in objdump_out:86 if first and second:87 func_offset = re.search(r'(0x[a-f0-9]+)', line)88 if func_offset:89 # needed offset is difference between function's base address and90 # address of first instruction after function prologue91 offset = int(func_offset.group(1), base=16) - start_address92 break93 # second instruction of function prologue94 if first and re.search(r'mov\s+%rsp,%rbp', line):95 second = True96 # first instruction of function prologue97 if not(first) and re.search(r'push\s+%rbp', line):98 first = True99 except CalledProcessError:100 raise BuiltInNotExistsError101 return offset102 # initialize built-in function to be traced103 def initialize_built_in(self, func_name):104 if 'built-ins' not in self._setup:105 self._setup['built-ins'] = {}106 try:107 offset = self.get_offset_for_built_in(func_name)108 self._setup['built-ins'][func_name] = {109 'traced': True,110 'parameters': {},111 'offset': offset112 }113 except BuiltInNotExistsError:114 raise115 # Remove application from getting traced116 def remove_app(self, app):117 del self._setup[app]118 # Returns apps currently saved119 def get_apps(self):120 return list(self._setup.keys())121 # Return functions and their state of a given application122 def get_setup_of_app(self, app):123 return self._setup[app]124 # search offset after setting stack pointer for user function125 def get_offset_for_function(self, app_name, func_name):126 offset = 0127 # get instructions of function128 try:129 objdump_out = check_output(['objdump', "--disassemble="+func_name, "--prefix-addresses", app_name]).decode().rstrip().split('\n')130 except CalledProcessError:131 raise FunctionNotInBinaryError132 first = False133 second = False134 for line in objdump_out:135 if first and second:136 # first instruction after function prologue with offset137 func_offset = re.search(r'<'+func_name+r'\+(0x[a-f0-9]+)>', line)138 if func_offset:139 offset = int(func_offset.group(1), base=16)140 break141 # second instruction of function prologue142 if first and re.search(r'mov\s+%rsp,%rbp', line):143 second = True144 # first instruction of function prologue145 if not(first) and re.search(r'push\s+%rbp', line):146 first = True147 return offset148 # Sets up a function to be traced149 def setup_function_to_trace(self, app, function):150 try:151 self._setup[app][function]['traced'] = True152 offset = self.get_offset_for_function(app, function)153 self._setup[app][function]['offset'] = offset154 except KeyError:155 for func_name in self._setup[app]:156 if self._setup[app][func_name]['mangled'] == function:157 self._setup[app][func_name]['traced'] = True158 offset = self.get_offset_for_function(app, function)159 self._setup[app][func_name]['offset'] = offset160 return161 raise FunctionNotInBinaryError(162 'No function named {} was found in {}'.format(function, app)163 )164 # Removes a function from traced ones165 def remove_function_from_trace(self, app, function):166 try:167 self._setup[app][function]['traced'] = False168 except KeyError:169 for func_name in self._setup[app]:170 if self._setup[app][func_name]['mangled'] == function:171 self._setup[app][func_name]['traced'] = False172 return173 raise FunctionNotInBinaryError(174 'No function named {} was found in {}'.format(function, app)175 )176 # Returns the indexes where a parameter is set for tracing177 def get_parameters(self, app, function):178 return self._setup[app][function]['parameters']179 # Sets up a parameter to be traced180 def add_parameter(self, app, function, index, format):181 self._setup[app][function]['parameters'][index] = format182 # Removes a parameter from traced ones183 def remove_parameter(self, app, function, index):184 del self._setup[app][function]['parameters'][index]185 # Convert dictionary of functions to trace into properly186 # structured list of args to be used by the trace tool187 def generate_bcc_args(self):188 arguments = []189 for app in self._setup:190 for function in self._setup[app]:191 if self._setup[app][function]['traced']:192 if app == 'built-ins':193 argument = '{}+0x{:X}'.format(function, self._setup[app][function]['offset'])194 else:195 argument = '{}:{}+0x{:X}'.format(app, self._setup[app][function]['mangled'], self._setup[app][function]['offset'])196 params = self._setup[app][function]['parameters']197 if params:198 argument = '{} "{}", {}'.format(199 argument,200 ' '.join([params[index] for index in params]),201 ', '.join(['arg{}'.format(index) for index in params]))202 arguments.append(argument)203 return arguments204 def load_from_file(self, path):205 try:206 content = Path(path).read_text()207 except FileNotFoundError:208 raise ConfigFileError('Could not find config file at {}'.format(path))209 except IsADirectoryError:210 raise ConfigFileError('{} is a directory, not a file'.format(path))211 try:212 config = yaml.safe_load(content)213 except (yaml.parser.ParserError, yaml.scanner.ScannerError):214 raise ConfigFileError('File needs to be yaml format')215 err_message = ''216 for app in config:217 try:218 self.initialize_binary(app)219 for function in config[app]:220 self.setup_function_to_trace(app, function)221 for index in config[app][function]:222 self.add_parameter(app, function, index, config[app][function][index])223 except BinaryNotExistsError:224 try:225 self.initialize_built_in(app)226 for index in config[app]:227 self.add_parameter('built-ins', app, index, config[app][index])228 err_message = 'Some binaries were not found so they were assumed to be built-in functions'229 except BuiltInNotExistsError:230 err_message = 'Some binaries were not found, and neither as built-in functions'231 except TypeError:232 raise ConfigFileError('File format is incorrect')...
Demux16Way.py
Source:Demux16Way.py
...36 self.gate5 = Demux.Demux()37 self.gate6 = Demux.Demux()38 self.gate7 = Demux.Demux()39 self.gate8 = Demux.Demux()40 def _setup(self):41 self.gate0.a = self.a42 self.gate0.select = self.select[0:3]43 self.b = self.gate0.outaf()44 self.c = self.gate0.outbf()45 self.d = self.gate0.outcf()46 self.e = self.gate0.outdf()47 self.f = self.gate0.outef()48 self.g = self.gate0.outff()49 self.h = self.gate0.outgf()50 self.i = self.gate0.outhf()51 self.gate1.a = self.b52 self.gate1.select = self.select[3:4]53 self.outa = self.gate1.outaf()54 self.outb = self.gate1.outbf()55 self.gate2.a = self.c56 self.gate2.select = self.select[3:4]57 self.outc = self.gate2.outaf()58 self.outd = self.gate2.outbf()59 self.gate3.a = self.d60 self.gate3.select = self.select[3:4]61 self.oute = self.gate3.outaf()62 self.outf = self.gate3.outbf()63 self.gate4.a = self.e64 self.gate4.select = self.select[3:4]65 self.outg = self.gate4.outaf()66 self.outh = self.gate4.outbf()67 self.gate5.a = self.f68 self.gate5.select = self.select[3:4]69 self.outi = self.gate5.outaf()70 self.outj = self.gate5.outbf()71 self.gate6.a = self.g72 self.gate6.select = self.select[3:4]73 self.outk = self.gate6.outaf()74 self.outl = self.gate6.outbf()75 self.gate7.a = self.h76 self.gate7.select = self.select[3:4]77 self.outm = self.gate7.outaf()78 self.outn = self.gate7.outbf()79 self.gate8.a = self.i80 self.gate8.select = self.select[3:4]81 self.outo = self.gate8.outaf()82 self.outp = self.gate8.outbf()83 def outaf(self):84 self._setup()85 return self.outa86 def outbf(self):87 self._setup()88 return self.outb89 def outcf(self):90 self._setup()91 return self.outc92 def outdf(self):93 self._setup()94 return self.outd95 def outef(self):96 self._setup()97 return self.oute98 def outff(self):99 self._setup()100 return self.outf101 def outgf(self):102 self._setup()103 return self.outg104 def outhf(self):105 self._setup()106 return self.outh107 def outif(self):108 self._setup()109 return self.outi110 def outjf(self):111 self._setup()112 return self.outj113 def outkf(self):114 self._setup()115 return self.outk116 def outlf(self):117 self._setup()118 return self.outl119 def outmf(self):120 self._setup()121 return self.outm122 def outnf(self):123 self._setup()124 return self.outn125 def outof(self):126 self._setup()127 return self.outo128 def outpf(self):129 self._setup()...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!