Best Python code snippet using pypom_form_python
check_gradient.py
Source:check_gradient.py
...67 for i in range(self.nout):68 cell = get_output_cell(fn, self.nin, i)69 self.gfns.append(self.prepare_func(cell, grad_wraper))70 self.input_selector = input_selector71 self.adjust_input_selector()72 if output_selector:73 self.output_selector = output_selector74 else:75 self.output_selector = [i for i in range(self.nout)]76 def adjust_input_selector(self):77 raise Exception('Not implemented')78 def sampling(self, superset):79 # -1 stands for all80 if self.sampling_times == -1 or self.sampling_times >= len(superset):81 return superset82 np.random.seed(0)83 ret = np.random.choice(superset, self.sampling_times, replace=False)84 return list(ret)85 def prepare_func(self, f, grad_wraper=None):86 """Return a function that executes 'f'.87 Args:88 f: the function.89 grad_wraper: grad op90 Returns:91 a function that will be evaluated in both Graph and PyNative mode92 """93 set_block_param_with_rand(f, get_uniform_with_shape)94 if context.get_context("mode") == context.PYNATIVE_MODE:95 if grad_wraper:96 def func_backward_pynative(*inputs):97 net = gen_grad_net(f, grad_wraper, len(inputs) - 1, inputs[-1])98 def _func_pynative(*inputs):99 return net(*inputs)100 return _func_pynative(*(inputs[:-1]))101 return func_backward_pynative102 def func_forward_pynative(*inputs):103 net = gen_net(f, len(inputs))104 def _func_pynative(*inputs):105 return net(*inputs)106 return _func_pynative(*inputs)107 return func_forward_pynative108 if grad_wraper:109 def func_backward_graph(*inputs):110 set_block_phase(f, 'train')111 net = gen_grad_net(f, grad_wraper, len(inputs) - 1, inputs[-1])112 return net(*(inputs[:-1]))113 return func_backward_graph114 def func_forward_graph(*inputs):115 set_block_phase(f, 'predict')116 net = gen_net(f, len(inputs))117 return net(*inputs)118 return func_forward_graph119 def to_numpy(self, x):120 if isinstance(x, (Tensor, _c_expression.Tensor)):121 return x.asnumpy()122 return x123 def to_numpy_and_scale(self, x):124 if isinstance(x, (Tensor, _c_expression.Tensor)):125 return x.asnumpy() * self.delta126 return x * self.delta127 def wrap(self, x):128 if isinstance(x, tuple):129 return x130 return (x,)131 def get_sens(self, i):132 raise Exception('Not implemented')133 def get_ith_elem(self, c, i):134 if isinstance(c, (list, tuple)):135 return c[i]136 return c137 def compute_theoretical(self, i):138 args = list(self.args)139 args.append(self.get_sens(i))140 print('GradChecker.compute_theoretical.args', args)141 gout = self.gfns[i](*args)142 gout = self.wrap(gout)143 self.gout = [self.to_numpy_and_scale(g) if isinstance(g, _c_expression.Tensor) \144 else self.to_numpy_and_scale(np.array(g)) for g in gout]145 print('GradChecker.compute_theoretical.gout', self.gout)146 def check_against_numeric(self, out_index):147 raise Exception('Not implemented')148 def check_against_numeric_one_step(self, args, index, out_index):149 if isinstance(args, ParameterTuple):150 x = args[index].data.asnumpy()151 else:152 x = args[index]153 x_shape = x.shape154 x_size = np.product(x_shape)155 for row in self.sampling(list(range(x_size))):156 original = x.ravel().view()[row]157 x.ravel().view()[row] += self.delta158 y_pos = self.to_numpy_and_scale(self.get_ith_elem(self.fn(*self.args), out_index))159 x.ravel().view()[row] = original160 x.ravel().view()[row] -= self.delta161 y_neg = self.to_numpy_and_scale(self.get_ith_elem(self.fn(*self.args), out_index))162 x.ravel().view()[row] = original163 diff = (y_pos - y_neg) / self.scale164 numeric_grad = diff.sum()165 insert_virtual_grad = False166 if numeric_grad == 0 and not insert_virtual_grad:167 self.gout.insert(0, 0)168 insert_virtual_grad = True169 continue170 theoretical_grad = self.gout[index].ravel().view()[row]171 if np.fabs(numeric_grad - theoretical_grad).max() > self.max_error:172 raise Exception(f'Gradients of df{out_index}/darg{index},{row} do not match, '173 f'expect {numeric_grad}, actual {theoretical_grad}')174 print(f'GradChecker.check_against_numeric.numeric df{out_index}/darg{index}: '175 f'{numeric_grad}, theoretical: {theoretical_grad}')176 # approximate accuracy, but efficient177 def assert_match(self):178 print(f'==========================={self.fn.__name__}==================================')179 print('GradChecker.delta', self.delta)180 print('GradChecker.max_error', self.max_error)181 print('GradChecker.args', self.args)182 print('GradChecker.out', self.out)183 print('GradChecker.nin', self.nin)184 print('GradChecker.nout', self.nout)185 for i in self.output_selector:186 self.compute_theoretical(i)187 self.check_against_numeric(i)188 def check_against_numeric_jacobian(self, out_index):189 raise Exception('Not implemented')190 def check_against_numeric_jacobian_one_step(self, args, index, out_index):191 if isinstance(args, ParameterTuple):192 x = args[index].data.asnumpy()193 else:194 x = args[index]195 x_shape = x.shape196 x_size = np.product(x_shape)197 dy = self.to_numpy(self.get_sens(out_index))198 dy_size = np.product(dy.shape)199 numeric_jacobian = np.zeros((x_size, dy_size), dtype=self.to_numpy(x).dtype)200 for row in range(x_size):201 original = x.ravel().view()[row]202 x.ravel().view()[row] += self.delta203 y_pos = self.to_numpy_and_scale(self.get_ith_elem(self.fn(*self.args), out_index))204 x.ravel().view()[row] = original205 x.ravel().view()[row] -= self.delta206 y_neg = self.to_numpy_and_scale(self.get_ith_elem(self.fn(*self.args), out_index))207 x.ravel().view()[row] = original208 diff = (y_pos - y_neg) / self.scale209 numeric_jacobian[row, :] = diff.ravel().view(numeric_jacobian.dtype)210 dy_mask = np.zeros(dy.shape, dtype=dy.dtype)211 theoretical_jacobian = np.zeros((x_size, dy_size), dtype=self.to_numpy(x).dtype)212 for col in range(dy_size):213 col_jacobian = self.compute_theoretical_jacobian(index, out_index, dy_mask, col)214 theoretical_jacobian[:, col] = col_jacobian.ravel().view(theoretical_jacobian.dtype)215 if np.fabs(numeric_jacobian - theoretical_jacobian).max() > self.max_error:216 raise Exception(f'GradChecker.check_against_numeric_jacobian_one_step expect {out_index}/darg{index}: '217 f'{numeric_jacobian}, actual: {theoretical_jacobian}')218 print(f'GradChecker.check_against_numeric_jacobian_one_step.numeric jacobian of output{out_index}/darg{index}: '219 f'{numeric_jacobian}, theoretical: {theoretical_jacobian}')220 def compute_theoretical_jacobian(self, index, out_index, dy_mask, jacobian_col):221 if (out_index, jacobian_col, index) in self.theoretical_jacobian_cache:222 return self.theoretical_jacobian_cache[(out_index, jacobian_col, index)]223 dy_mask.ravel().view()[jacobian_col] = 1.0224 args = list(self.args)225 args.append(Tensor(dy_mask))226 print('GradChecker.compute_theoretical.args', args)227 gout = self.wrap(self.gfns[out_index](*args))228 gout = [self.to_numpy_and_scale(g) if isinstance(g, _c_expression.Tensor) \229 else self.to_numpy_and_scale(np.array(g)) for g in gout]230 print('GradChecker.compute_theoretical.gout', gout)231 dy_mask.ravel().view()[jacobian_col] = 0.0232 for i, g in enumerate(gout):233 self.theoretical_jacobian_cache[(out_index, jacobian_col, i)] = g234 return gout[index]235 # more accurate, but inefficient236 def assert_match_jacobian(self):237 print(f'==========================={self.fn.__name__}==================================')238 print('GradChecker.delta', self.delta)239 print('GradChecker.max_error', self.max_error)240 print('GradChecker.args', self.args)241 print('GradChecker.out', self.out)242 print('GradChecker.nin', self.nin)243 print('GradChecker.nout', self.nout)244 self.theoretical_jacobian_cache = {}245 for i in self.output_selector:246 self.check_against_numeric_jacobian(i)247class ScalarGradChecker(_GradChecker):248 def __init__(self,249 fn: Callable,250 args: List[Any],251 delta: float = 1e-3,252 max_error: float = 1e-3,253 input_selector=None,254 output_selector=None,255 sampling_times=-1,256 reduce_output=False) -> None:257 grad_op = GradOperation(get_all=True, sens_param=True)258 super(ScalarGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \259 output_selector, sampling_times, reduce_output)260 def adjust_input_selector(self):261 if not self.input_selector:262 self.input_selector = [i for i in range(self.nin)]263 def get_sens(self, i):264 return 1.0265 def check_against_numeric(self, out_index):266 args = list(self.args)267 for i in self.sampling(self.input_selector):268 print(f'GradChecker.check_against_numeric.args[{i}]', args[i])269 args_pos = args[:i] + [args[i] + self.delta] + args[i + 1:]270 args_neg = args[:i] + [args[i] - self.delta] + args[i + 1:]271 y_pos = self.to_numpy_and_scale(self.get_ith_elem(self.fn(*args_pos), out_index))272 y_neg = self.to_numpy_and_scale(self.get_ith_elem(self.fn(*args_neg), out_index))273 diff = (y_pos - y_neg) / self.scale274 if np.fabs(diff - self.gout[i]).max() > self.max_error:275 raise Exception(f'Gradients of df{out_index}/darg{i} do not match,'276 f'expect {diff}, actual {self.gout[i]}')277 print(f'GradChecker.check_against_numeric.numeric df{out_index}/darg{i}: {diff}, '278 f'theoretical: {self.gout[i]}')279 # for scalar, jacobian is same with gradient280 def assert_match_jacobian(self):281 self.assert_match()282class OperationGradChecker(_GradChecker):283 def __init__(self,284 fn: Callable,285 args: List[Any],286 delta: float = 1e-3,287 max_error: float = 1e-3,288 input_selector=None,289 output_selector=None,290 sampling_times=-1,291 reduce_output=False) -> None:292 grad_op = GradOperation(get_all=True, sens_param=True)293 super(OperationGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \294 output_selector, sampling_times, reduce_output)295 def get_sens(self, i):296 return Tensor(np.ones_like(self.out[i].asnumpy()))297 def adjust_input_selector(self):298 if not self.input_selector:299 self.input_selector = [i for i in range(self.nin)]300 def check_against_numeric(self, out_index):301 args = [self.to_numpy(arg) for arg in self.args]302 for i in self.input_selector:303 self.check_against_numeric_one_step(args, i, out_index)304 def check_against_numeric_jacobian(self, out_index):305 args = [self.to_numpy(arg) for arg in self.args]306 for i in self.input_selector:307 self.check_against_numeric_jacobian_one_step(args, i, out_index)308class NNGradChecker(_GradChecker):309 def __init__(self,310 fn: Callable,311 args: List[Any],312 delta: float = 1e-3,313 max_error: float = 1e-3,314 input_selector=None,315 output_selector=None,316 sampling_times=-1,317 reduce_output=False) -> None:318 grad_op = GradOperation(get_by_list=True, sens_param=True)319 self.params = ParameterTuple(fn.trainable_params())320 super(NNGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \321 output_selector, sampling_times, reduce_output)322 def get_sens(self, i):323 return Tensor(np.ones_like(self.out[i].asnumpy()))324 def adjust_input_selector(self):325 if not self.input_selector:326 self.input_selector = [i for i in range(len(self.params))]327 def check_against_numeric(self, out_index):328 for i in self.input_selector:329 self.check_against_numeric_one_step(self.params, i, out_index)330 def check_against_numeric_jacobian(self, out_index):331 for i in self.input_selector:332 self.check_against_numeric_jacobian_one_step(self.params, i, out_index)333def check_gradient(fn, *args, delta=1e-3, max_error=1e-3,334 grad_checker_class=OperationGradChecker,335 input_selector=None,336 output_selector=None,337 sampling_times=-1,338 reduce_output=False):...
config_util.py
Source:config_util.py
1# Copyright 2020 Huawei Technologies Co., Ltd2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ============================================================================15"""Utils for verification config."""16import numpy as np17from . import keyword18from .other_util import select_from_config_tuple19def get_input_config(d):20 """21 Get input config.22 Args:23 d (tuple): Config item in form of ([2, 2], {'dtype': np.float32, 'scale': 1}).24 Returns:25 Tuple, (shape, dtype, scale).26 """27 s = select_from_config_tuple(d, 0, d)28 dtype = np.float3229 scale = 130 if isinstance(d, tuple) and isinstance(d[-1], dict):31 ext_config = d[-1]32 dtype = ext_config.get(keyword.dtype, np.float32)33 scale = ext_config.get(keyword.scale, 1)34 return s, dtype, scale35def get_expect_config(d):36 """37 Get input config.38 Args:39 d (tuple): Config item in form of (file_path, {'dtype': np.float32,40 'scale': 1, 'max_error': 1e-3, 'check_tolerance': False, 'relative_tolerance': 0.0,41 'absolute_tolerance': 0.0}).42 Returns:43 Tuple, (file_path, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance).44 """45 s = select_from_config_tuple(d, 0, d)46 dtype = np.float3247 scale = 148 max_error = 1e-349 check_tolerance = False50 relative_tolerance = 0.051 absolute_tolerance = 0.052 if isinstance(d, tuple) and isinstance(d[-1], dict):53 ext_config = d[-1]54 dtype = ext_config.get(keyword.dtype, np.float32)55 scale = ext_config.get(keyword.scale, 1)56 max_error = ext_config.get(keyword.max_error, 1e-3)57 check_tolerance = ext_config.get(keyword.check_tolerance, False)58 relative_tolerance = ext_config.get(keyword.relative_tolerance, 0.0)59 absolute_tolerance = ext_config.get(keyword.absolute_tolerance, 0.0)60 return s, dtype, scale, max_error, check_tolerance, relative_tolerance, absolute_tolerance61def get_function_config(function):62 """63 Get input config.64 Args:65 function (dict): Config item in form of {'delta': 1e-3, 'max_error': 1e-3, 'input_selector': [0, 1],66 'output_selector': 0, 'sampling_times': 10, 'reduce_output': True, 'init_param_with': None,67 'split_outputs': True, 'exception': Exception}.68 Returns:69 Tuple, (delta, max_error, input_selector, output_selector, sampling_times,70 reduce_output, init_param_with, split_outputs, exception).71 """72 delta = function.get(keyword.delta, 1e-3)73 max_error = function.get(keyword.max_error, 1e-3)74 input_selector = function.get(keyword.input_selector, [])75 output_selector = function.get(keyword.output_selector, [])76 sampling_times = function.get(keyword.sampling_times, -1)77 reduce_output = function.get(keyword.reduce_output, True)78 init_param_with = function.get(keyword.init_param_with, None)79 split_outputs = function.get(keyword.split_outputs, True)80 exception = function.get(keyword.exception, Exception)81 error_keywords = function.get(keyword.error_keywords, None)82 return delta, max_error, input_selector, output_selector, sampling_times, \83 reduce_output, init_param_with, split_outputs, exception, error_keywords84def get_grad_checking_options(function, inputs):85 """86 Get input config.87 Args:88 function (dict): Config item in form of {'block': XCell, 'delta': 1e-3, 'max_error': 1e-3, 'input_selector':89 [0, 1], 'output_selector': 0, 'sampling_times': 10, 'reduce_output': True,90 'init_param_with': None, 'split_outputs': True, 'exception': Exception}.91 inputs (dict): Config item in form of {'desc_inputs': [[2, 2]]}.92 Returns:93 Tuple, (f, args, delta, max_error, input_selector, output_selector, sampling_times, reduce_output).94 """95 f = function[keyword.block]96 args = inputs[keyword.desc_inputs]97 delta, max_error, input_selector, output_selector, sampling_times, reduce_output, _, _, _, _ = \98 get_function_config(function)...
conftest.py
Source:conftest.py
1"""Pytest fixture for the whatweb agent."""2import pytest3import json4import pathlib5from ostorlab.agent import definitions as agent_definitions6from ostorlab.runtimes import definitions as runtime_definitions7from ostorlab.utils import defintions8from ostorlab.agent import message as m9from agent import whatweb_agent10@pytest.fixture11def domain_msg():12 """Creates a dummy message of type v3.asset.domain_name for testing purposes."""13 input_selector = 'v3.asset.domain_name'14 input_data = {'name': 'ostorlab.co'}15 message = m.Message.from_data(selector=input_selector, data=input_data)16 return message17@pytest.fixture18def domain_msg_with_port_and_schema():19 """Creates a dummy message of type v3.asset.domain_name.service for testing purposes."""20 input_selector = 'v3.asset.domain_name.service'21 input_data = {'name': 'ostorlab.co', 'port': 80, 'schema': 'http'}22 message = m.Message.from_data(selector=input_selector, data=input_data)23 return message24@pytest.fixture25def link_msg():26 """Creates a dummy message of type v3.asset.link for testing purposes."""27 input_selector = 'v3.asset.link'28 input_data = {'url': 'http://ostorlab.co', 'method': 'GET'}29 message = m.Message.from_data(selector=input_selector, data=input_data)30 return message31@pytest.fixture32def ip_msg():33 """Creates a dummy message of type v3.asset.ip for testing purposes."""34 input_selector = 'v3.asset.ip'35 input_data = {'host': '192.168.0.76'}36 message = m.Message.from_data(selector=input_selector, data=input_data)37 return message38@pytest.fixture39def ip_msg_with_port_and_schema():40 """Creates a dummy message of type v3.asset.ip.v4.port.service for testing purposes."""41 input_selector = 'v3.asset.ip.v4.port.service'42 input_data = {'host': '192.168.0.0', 'port': 80, 'protocol': 'http'}43 message = m.Message.from_data(selector=input_selector, data=input_data)44 return message45@pytest.fixture46def ip_msg_with_port_schema_mask():47 """Creates a dummy message of type v3.asset.ip.v4.port.service for testing purposes."""48 input_selector = 'v3.asset.ip.v4.port.service'49 input_data = {'host': '192.168.0.0', 'port': 80, 'mask': '31', 'protocol': 'http'}50 message = m.Message.from_data(selector=input_selector, data=input_data)51 return message52@pytest.fixture53def ip_msg_with_port_schema_mask_2():54 """Creates a dummy message of type v3.asset.ip.v4.port.service for testing purposes."""55 input_selector = 'v3.asset.ip.v4.port.service'56 input_data = {'host': '192.168.0.0', 'port': 80, 'mask': '32', 'protocol': 'http'}57 message = m.Message.from_data(selector=input_selector, data=input_data)58 return message59@pytest.fixture(scope='function')60def whatweb_test_agent(agent_persist_mock):61 """WhatWeb Agent fixture for testing purposes."""62 del agent_persist_mock63 with (pathlib.Path(__file__).parent.parent / 'ostorlab.yaml').open() as yaml_o:64 agent_definition = agent_definitions.AgentDefinition.from_yaml(yaml_o)65 agent_settings = runtime_definitions.AgentSettings(66 key='whatweb',67 redis_url='redis://redis',68 args=[69 defintions.Arg(name='schema',70 type='string',71 value=json.dumps('https').encode()),72 defintions.Arg(name='port',73 type='number',74 value=json.dumps(443).encode())75 ])...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!