Best Python code snippet using assertpy_python
_optimize.py
Source:_optimize.py
1from concurrent.futures import FIRST_COMPLETED2from concurrent.futures import Future3from concurrent.futures import ThreadPoolExecutor4from concurrent.futures import wait5import copy6import datetime7import gc8import itertools9import math10import os11import sys12from threading import Event13from threading import Thread14from typing import Any15from typing import Callable16from typing import cast17from typing import List18from typing import Optional19from typing import Sequence20from typing import Set21from typing import Tuple22from typing import Type23from typing import Union24import warnings25import optuna26from optuna import exceptions27from optuna import logging28from optuna import progress_bar as pbar_module29from optuna import storages30from optuna import trial as trial_module31from optuna.exceptions import ExperimentalWarning32from optuna.trial import FrozenTrial33from optuna.trial import TrialState34_logger = logging.get_logger(__name__)35def _optimize(36 study: "optuna.Study",37 func: "optuna.study.study.ObjectiveFuncType",38 n_trials: Optional[int] = None,39 timeout: Optional[float] = None,40 n_jobs: int = 1,41 catch: Tuple[Type[Exception], ...] = (),42 callbacks: Optional[List[Callable[["optuna.Study", FrozenTrial], None]]] = None,43 gc_after_trial: bool = False,44 show_progress_bar: bool = False,45) -> None:46 if not isinstance(catch, tuple):47 raise TypeError(48 "The catch argument is of type '{}' but must be a tuple.".format(type(catch).__name__)49 )50 if not study._optimize_lock.acquire(False):51 raise RuntimeError("Nested invocation of `Study.optimize` method isn't allowed.")52 # TODO(crcrpar): Make progress bar work when n_jobs != 1.53 progress_bar = pbar_module._ProgressBar(show_progress_bar and n_jobs == 1, n_trials, timeout)54 study._stop_flag = False55 try:56 if n_jobs == 1:57 _optimize_sequential(58 study,59 func,60 n_trials,61 timeout,62 catch,63 callbacks,64 gc_after_trial,65 reseed_sampler_rng=False,66 time_start=None,67 progress_bar=progress_bar,68 )69 else:70 if show_progress_bar:71 warnings.warn("Progress bar only supports serial execution (`n_jobs=1`).")72 if n_jobs == -1:73 n_jobs = os.cpu_count() or 174 time_start = datetime.datetime.now()75 futures: Set[Future] = set()76 with ThreadPoolExecutor(max_workers=n_jobs) as executor:77 for n_submitted_trials in itertools.count():78 if study._stop_flag:79 break80 if (81 timeout is not None82 and (datetime.datetime.now() - time_start).total_seconds() > timeout83 ):84 break85 if n_trials is not None and n_submitted_trials >= n_trials:86 break87 if len(futures) >= n_jobs:88 completed, futures = wait(futures, return_when=FIRST_COMPLETED)89 # Raise if exception occurred in executing the completed futures.90 for f in completed:91 f.result()92 futures.add(93 executor.submit(94 _optimize_sequential,95 study,96 func,97 1,98 timeout,99 catch,100 callbacks,101 gc_after_trial,102 True,103 time_start,104 None,105 )106 )107 finally:108 study._optimize_lock.release()109 progress_bar.close()110def _optimize_sequential(111 study: "optuna.Study",112 func: "optuna.study.study.ObjectiveFuncType",113 n_trials: Optional[int],114 timeout: Optional[float],115 catch: Tuple[Type[Exception], ...],116 callbacks: Optional[List[Callable[["optuna.Study", FrozenTrial], None]]],117 gc_after_trial: bool,118 reseed_sampler_rng: bool,119 time_start: Optional[datetime.datetime],120 progress_bar: Optional[pbar_module._ProgressBar],121) -> None:122 if reseed_sampler_rng:123 study.sampler.reseed_rng()124 i_trial = 0125 if time_start is None:126 time_start = datetime.datetime.now()127 while True:128 if study._stop_flag:129 break130 if n_trials is not None:131 if i_trial >= n_trials:132 break133 i_trial += 1134 if timeout is not None:135 elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()136 if elapsed_seconds >= timeout:137 break138 try:139 trial = _run_trial(study, func, catch)140 except Exception:141 raise142 finally:143 # The following line mitigates memory problems that can be occurred in some144 # environments (e.g., services that use computing containers such as CircleCI).145 # Please refer to the following PR for further details:146 # https://github.com/optuna/optuna/pull/325.147 if gc_after_trial:148 gc.collect()149 if callbacks is not None:150 frozen_trial = copy.deepcopy(study._storage.get_trial(trial._trial_id))151 for callback in callbacks:152 callback(study, frozen_trial)153 if progress_bar is not None:154 progress_bar.update((datetime.datetime.now() - time_start).total_seconds())155 study._storage.remove_session()156def _run_trial(157 study: "optuna.Study",158 func: "optuna.study.study.ObjectiveFuncType",159 catch: Tuple[Type[Exception], ...],160) -> trial_module.Trial:161 with warnings.catch_warnings():162 warnings.simplefilter("ignore", ExperimentalWarning)163 optuna.storages.fail_stale_trials(study)164 trial = study.ask()165 state: Optional[TrialState] = None166 values: Optional[List[float]] = None167 func_err: Optional[Exception] = None168 func_err_fail_exc_info: Optional[Any] = None169 # Set to a string if `func` returns correctly but the return value violates assumptions.170 values_conversion_failure_message: Optional[str] = None171 stop_event: Optional[Event] = None172 thread: Optional[Thread] = None173 if study._storage.is_heartbeat_enabled():174 stop_event = Event()175 thread = Thread(176 target=_record_heartbeat, args=(trial._trial_id, study._storage, stop_event)177 )178 thread.start()179 try:180 value_or_values = func(trial)181 except exceptions.TrialPruned as e:182 # TODO(mamu): Handle multi-objective cases.183 state = TrialState.PRUNED184 func_err = e185 except Exception as e:186 state = TrialState.FAIL187 func_err = e188 func_err_fail_exc_info = sys.exc_info()189 else:190 # TODO(hvy): Avoid checking the values both here and inside `Study.tell`.191 values, values_conversion_failure_message = _check_and_convert_to_values(192 len(study.directions), value_or_values, trial.number193 )194 if values_conversion_failure_message is not None:195 state = TrialState.FAIL196 else:197 state = TrialState.COMPLETE198 if study._storage.is_heartbeat_enabled():199 assert stop_event is not None200 assert thread is not None201 stop_event.set()202 thread.join()203 # `Study.tell` may raise during trial post-processing.204 try:205 study.tell(trial, values=values, state=state)206 except Exception:207 raise208 finally:209 if state == TrialState.COMPLETE:210 study._log_completed_trial(trial, cast(List[float], values))211 elif state == TrialState.PRUNED:212 _logger.info("Trial {} pruned. {}".format(trial.number, str(func_err)))213 elif state == TrialState.FAIL:214 if func_err is not None:215 _logger.warning(216 "Trial {} failed because of the following error: {}".format(217 trial.number, repr(func_err)218 ),219 exc_info=func_err_fail_exc_info,220 )221 elif values_conversion_failure_message is not None:222 _logger.warning(values_conversion_failure_message)223 else:224 assert False, "Should not reach."225 else:226 assert False, "Should not reach."227 if state == TrialState.FAIL and func_err is not None and not isinstance(func_err, catch):228 raise func_err229 return trial230def _check_and_convert_to_values(231 n_objectives: int, original_value: Union[float, Sequence[float]], trial_number: int232) -> Tuple[Optional[List[float]], Optional[str]]:233 if isinstance(original_value, Sequence):234 if n_objectives != len(original_value):235 return (236 None,237 (238 f"Trial {trial_number} failed, because the number of the values "239 f"{len(original_value)} did not match the number of the objectives "240 f"{n_objectives}."241 ),242 )243 else:244 _original_values = list(original_value)245 else:246 _original_values = [original_value]247 _checked_values = []248 for v in _original_values:249 checked_v, failure_message = _check_single_value(v, trial_number)250 if failure_message is not None:251 # TODO(Imamura): Construct error message taking into account all values and do not252 # early return253 # `value` is assumed to be ignored on failure so we can set it to any value.254 return None, failure_message255 elif isinstance(checked_v, float):256 _checked_values.append(checked_v)257 else:258 assert False259 return _checked_values, None260def _check_single_value(261 original_value: float, trial_number: int262) -> Tuple[Optional[float], Optional[str]]:263 value = None264 failure_message = None265 try:266 value = float(original_value)267 except (268 ValueError,269 TypeError,270 ):271 failure_message = (272 f"Trial {trial_number} failed, because the value {repr(original_value)} could not be "273 "cast to float."274 )275 if value is not None and math.isnan(value):276 value = None277 failure_message = (278 f"Trial {trial_number} failed, because the objective function returned "279 f"{original_value}."280 )281 return value, failure_message282def _record_heartbeat(trial_id: int, storage: storages.BaseStorage, stop_event: Event) -> None:283 heartbeat_interval = storage.get_heartbeat_interval()284 assert heartbeat_interval is not None285 while True:286 storage.record_heartbeat(trial_id)287 if stop_event.wait(timeout=heartbeat_interval):...
endpoint.py
Source:endpoint.py
...37 "must be an object of any supported "38 "return type, the second a valid "39 "HTTP return status code as an integer"40 }41 def func_err(message, http_status=500):42 if 500 <= http_status < 600:43 logger.exception(message)44 else:45 logger.error(message)46 return jsonify(47 data=message,48 docstring=utils.docstring(view_func, *parameters)49 ), http_status50 @wraps(view_func)51 def validate_and_execute(*args, **kwargs):52 # grabs incoming data (multiple methods)53 request_data = utils.get_request_data()54 # fall-back type annotations from function signatures55 # when no parameter type is specified (python >3.5 only)56 type_annotations = None57 if sys.version_info >= (3, 5):58 signature = inspect.signature(view_func)59 type_annotations = {k: v.annotation for k, v in60 signature.parameters.items()61 if v.annotation is not inspect._empty}62 for param in parameters:63 # normalize param key for the view_func(*args, **kwargs) call64 param_key_safe = param.key.replace('-', '_')65 66 # checks if param is required67 if param.key not in request_data[param.location]:68 if param.required:69 return func_err(messages["required"] % param.key)70 else:71 # set default value, if provided72 if param.default is not None:73 kwargs[param_key_safe] = param.default74 else:75 kwargs[param_key_safe] = None76 continue77 # set the param type from function annotation (runs only once)78 if type_annotations and param.type is None:79 if param.key in type_annotations:80 param.type = type_annotations[param.key]81 else:82 return func_err(messages["type_required_py3.5"] % param.key)83 # validate the param value84 value = request_data[param.location].get(param.key)85 if type(value) != param.type:86 if param.type in NUMERIC_TYPES:87 try:88 value = param.type(value) # opportunistic coercing to int/float/long89 except ValueError:90 return func_err(messages["type_error"] % (param.key, param.type))91 elif param.type in STRING_LIKE:92 pass93 elif param.type is ANY:94 pass95 elif param.type is datetime:96 try:97 value = dateutil.parser.parse(value)98 except:99 return func_err(messages["datetime_parse_error"] % (param.key, str(value)))100 elif param.type is bool and type(value) in STRING_LIKE:101 if value.lower() in ('true', 'y'):102 value = True103 elif value.lower() in ('false', 'n'):104 value = False105 else:106 return func_err(messages["type_error"] % (param.key, param.type))107 else:108 return func_err(messages["type_error"] % (param.key, param.type))109 # validate via custom validator, if provided110 if param.kwargs.get('validator', None):111 try:112 result = param.kwargs["validator"](value)113 if isinstance(result, Response):114 return result115 elif result:116 raise Exception("validator returned an unknown format. " 117 "either return nothing, raise an Exception or "118 "return a `flask.Response` object.")119 except Exception as ex:120 return func_err("parameter '%s' error: %s" % (param.key, str(ex)))121 kwargs[param_key_safe] = value122 try:123 result = view_func(*args, **kwargs)124 except HTTPException:125 raise126 except Exception as ex:127 return func_err(str(ex))128 if isinstance(result, (Response, WResponse)):129 return result130 elif result is None:131 return jsonify(data=None), 204132 elif isinstance(result, tuple):133 if not len(result) == 2 or not isinstance(result[1], int):134 return func_err(messages["bad_return_tuple"])135 return jsonify(data=result[0]), result[1]136 elif not isinstance(result, SUPPORTED_TYPES):137 raise TypeError("Bad return type for api_result")138 return jsonify(data=result)139 return validate_and_execute140class parameter:141 def __init__(self, key, type=None, default=None, required=False, validator=None, location='all'):142 """143 Endpoint parameter144 :param key: The parameter name as a string145 :param type: The parameter type146 :param default: The default value this parameter should hold147 :param required: Marks this parameter as 'required'148 :param validator: A custom function that further validates the parameter...
_solve.py
Source:_solve.py
1# -*- coding: utf-8 -*-2from typing import Tuple, Optional3import torch4from torch import Tensor5import numpy as np6from tqdm import trange7@torch.no_grad()8def optimize_W(9 X: Tensor,10 Z: Tensor,11 Y_w: Tensor,12 z_eye: Tensor,13 ones_w: Tensor,14 lamba_w: Tensor,15 beta: float,16 rho: float,17 ceta: float,18) -> Tuple[Tensor, Tensor]:19 20 optimize_zz = torch.matmul(Z, Z.T) + z_eye21 22 L_w_matrix = torch.matmul(torch.matmul(X, 2*optimize_zz - 2*Z), X.T)23 24 L_w = 2*torch.norm(L_w_matrix, 2)+ beta * lamba_w.shape[1] / ceta25 26 M_w = torch.matmul(2*L_w_matrix + beta/ceta, Y_w) - 2*torch.matmul(torch.matmul(X, optimize_zz), X.T) - beta/ceta + lamba_w27 28 W = (Y_w - M_w/L_w) * ones_w29 W = (torch.abs(W) + W) / 230 W = (W + W.T) / 231 32 leq3 = torch.sum(W, dim=0) - 133 34 lamba_w = lamba_w + beta*rho*leq335 36 return W, lamba_w37@torch.no_grad()38def optimize_Z(39 X: Tensor,40 W: Tensor,41 Y_z: Tensor,42 w_eye: Tensor,43 ones_z: Tensor,44 lamba_z: Tensor,45 beta: float,46 rho: float,47 ceta: float,48) -> Tuple[Tensor, Tensor]:49 50 optimize_ww = torch.matmul(W, W.T) + w_eye51 52 L_z_matrix = torch.matmul(torch.matmul(X.T, 2*optimize_ww - 2*W), X)53 54 L_z = 2*torch.norm(L_z_matrix, 2) + beta * lamba_z.shape[1] / ceta55 56 M_z = torch.matmul(2*L_z_matrix + beta/ceta, Y_z) - 2*torch.matmul(torch.matmul(X.T, optimize_ww), X) - beta/ceta + lamba_z57 58 Z = (Y_z - M_z/L_z) * ones_z59 Z = (torch.abs(Z) + Z)/260 Z = (Z + Z.T)/261 62 leq4 = torch.sum(Z, dim=0) - 163 64 lamba_z = lamba_z + beta*rho*leq465 66 return Z, lamba_z67@torch.no_grad()68def solve_Z(69 X: Tensor,70 W: Tensor,71 Z: Tensor,72 beta: float,73 tol_err: float,74 n_iters: int,75 SS_matrix: Optional[np.ndarray],76 device: torch.device,77 tqdm_params: dict,78) -> Tuple[Tensor, Tensor]:79 80 m, n = X.shape81 82 rho = 0.883 ceta_prev = 1 / rho84 ceta = 185 86 func_err = float('inf')87 88 W_prev = W89 Z_prev = Z90 91 lamba_w = torch.zeros(1, m).to(device)92 lamba_z = torch.zeros(1, n).to(device)93 94 z_eye = torch.eye(n).to(device)95 w_eye = torch.eye(m).to(device)96 97 if SS_matrix is None:98 ones_z = 1 - z_eye99 else:100 ones_z = torch.tensor(SS_matrix, dtype=torch.float32).to(device)101 ones_w = 1 - w_eye102 103 pbar = trange(n_iters, position=0, **tqdm_params)104 105 for Iter in pbar:106 107 func_err_prev = func_err108 109 Y_iter_value = (ceta * (1 - ceta_prev)) / ceta_prev110 111 Y_w = W + Y_iter_value * (W - W_prev)112 Y_z = Z + Y_iter_value * (Z - Z_prev)113 114 W_prev = W115 Z_prev = Z116 117 W, lamba_w = optimize_W(118 X=X,119 Z=Z,120 Y_w=Y_w,121 z_eye=z_eye,122 ones_w=ones_w,123 lamba_w=lamba_w,124 beta=beta,125 rho=rho,126 ceta=ceta,127 )128 129 Z, lamba_z = optimize_Z(130 X=X,131 W=W,132 Y_z=Y_z,133 w_eye=w_eye,134 ones_z=ones_z,135 lamba_z=lamba_z,136 beta=beta,137 rho=rho,138 ceta=ceta,139 )140 141 ceta_prev = ceta142 ceta = 1 / (1 - rho + 1 / ceta)143 144 func_1_err = torch.norm(torch.matmul(W.T, torch.matmul(X, z_eye-Z)), 'fro')145 func_2_err = torch.norm(torch.matmul(X, z_eye-Z), 'fro')146 func_3_err = torch.norm(torch.matmul(Z.T, torch.matmul(X.T, w_eye-W)), 'fro')147 func_4_err = torch.norm(torch.matmul(X.T, w_eye-W), 'fro')148 149 func_err = func_1_err + func_2_err + func_3_err + func_4_err150 151 func_err_rel = torch.abs(func_err_prev - func_err) / func_err_prev152 153 pbar.set_postfix_str(f'err={func_err_rel.item():.5e}')154 155 if func_err_rel < tol_err:156 pbar.set_postfix_str(f'err={func_err_rel.item():.5e}, converged!')157 break158 ...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!