Best JavaScript code snippet using storybook-root
meta.py
Source:meta.py
1# global2import ivy3from ivy.functional.ivy.core.gradients import gradient_descent_update4# Private #5# --------#6def _compute_cost_and_update_grads(cost_fn, order, batch, variables, outer_v, keep_outer_v,7 average_across_steps_or_final, all_grads, unique_outer, batched, num_tasks):8 if order == 1:9 cost, inner_grads = ivy.execute_with_gradients(10 lambda v: cost_fn(batch, v=variables.set_at_key_chains(v) if unique_outer else v),11 variables.at_key_chains(outer_v, ignore_none=True) if keep_outer_v else12 variables.prune_key_chains(outer_v, ignore_none=True), retain_grads=False)13 if batched:14 inner_grads = inner_grads * num_tasks15 if average_across_steps_or_final:16 all_grads.append(inner_grads)17 else:18 cost = cost_fn(batch, v=variables)19 return cost20def _train_task(inner_batch, outer_batch, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,21 inner_learning_rate, inner_optimization_step, order, average_across_steps, inner_v, keep_innver_v,22 outer_v, keep_outer_v, batched, num_tasks, stop_gradients):23 # init24 total_cost = 025 all_grads = list()26 # inner and outer27 unique_inner = inner_v is not None28 unique_outer = outer_v is not None29 # iterate through inner loop training steps30 for i in range(inner_grad_steps):31 # compute inner gradient for update the inner variables32 cost, inner_update_grads = ivy.execute_with_gradients(33 lambda v: inner_cost_fn(inner_batch, v=variables.set_at_key_chains(v) if unique_inner else v),34 variables.at_key_chains(inner_v, ignore_none=True) if keep_innver_v else35 variables.prune_key_chains(inner_v, ignore_none=True), retain_grads=order > 1)36 if batched:37 inner_update_grads = inner_update_grads * num_tasks38 # compute the cost to be optimized, and update all_grads if fist order method39 if outer_cost_fn is None and not unique_inner and not unique_outer:40 all_grads.append(inner_update_grads)41 else:42 cost = _compute_cost_and_update_grads(43 inner_cost_fn if outer_cost_fn is None else outer_cost_fn, order, outer_batch, variables, outer_v,44 keep_outer_v, average_across_steps, all_grads, unique_outer, batched, num_tasks)45 # update cost and update parameters46 total_cost = total_cost + cost47 if unique_inner:48 variables = variables.set_at_key_chains(49 inner_optimization_step(variables.at_key_chains(inner_v) if keep_innver_v else50 variables.prune_key_chains(inner_v), inner_update_grads,51 inner_learning_rate, inplace=False, stop_gradients=stop_gradients))52 else:53 variables = inner_optimization_step(variables, inner_update_grads, inner_learning_rate, inplace=False,54 stop_gradients=stop_gradients)55 # once training is finished, compute the final cost, and update all_grads if fist order method56 final_cost = _compute_cost_and_update_grads(57 inner_cost_fn if outer_cost_fn is None else outer_cost_fn, order, outer_batch, variables, outer_v,58 keep_outer_v, True, all_grads, unique_outer, batched, num_tasks)59 # update variables60 if stop_gradients:61 variables = variables.stop_gradients()62 if not batched:63 variables = variables.expand_dims(0)64 # average the cost or gradients across all timesteps if this option is chosen65 if average_across_steps:66 total_cost = total_cost + final_cost67 if order == 1:68 all_grads = sum(all_grads) / max(len(all_grads), 1)69 return total_cost / (inner_grad_steps + 1), variables, all_grads70 # else return only the final values71 if order == 1:72 all_grads = all_grads[-1]73 return final_cost, variables, all_grads74def _train_tasks_batched(batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables,75 inner_grad_steps, inner_learning_rate, inner_optimization_step, order, average_across_steps,76 inner_v, keep_innver_v, outer_v, keep_outer_v, return_inner_v, num_tasks, stop_gradients):77 inner_batch = batch78 outer_batch = batch79 if inner_batch_fn is not None:80 inner_batch = inner_batch_fn(inner_batch)81 if outer_batch_fn is not None:82 outer_batch = outer_batch_fn(outer_batch)83 cost, updated_ivs, grads = _train_task(inner_batch, outer_batch, inner_cost_fn, outer_cost_fn, variables,84 inner_grad_steps, inner_learning_rate, inner_optimization_step, order,85 average_across_steps, inner_v, keep_innver_v, outer_v, keep_outer_v, True,86 num_tasks, stop_gradients)87 grads = grads.reduce_mean(0) if isinstance(grads, ivy.Container) else grads88 if order == 1:89 if return_inner_v in ['all', True]:90 return cost, grads, updated_ivs91 elif return_inner_v == 'first':92 return cost, grads, updated_ivs[0:1]93 return cost, grads94 if return_inner_v in ['all', True]:95 return cost, updated_ivs96 elif return_inner_v == 'first':97 return cost, updated_ivs[0:1]98 return cost99def _train_tasks_with_for_loop(batch, inner_sub_batch_fn, outer_sub_batch_fn, inner_cost_fn, outer_cost_fn, variables,100 inner_grad_steps, inner_learning_rate, inner_optimization_step, order,101 average_across_steps, inner_v, keep_innver_v, outer_v, keep_outer_v, return_inner_v,102 num_tasks, stop_gradients):103 total_cost = 0104 updated_ivs_to_return = list()105 all_grads = list()106 if isinstance(inner_v, (list, tuple)) and isinstance(inner_v[0], (list, tuple, dict, type(None))):107 inner_v_seq = True108 else:109 inner_v_seq = False110 if isinstance(outer_v, (list, tuple)) and isinstance(outer_v[0], (list, tuple, dict, type(None))):111 outer_v_seq = True112 else:113 outer_v_seq = False114 for i, sub_batch in enumerate(batch.unstack(0, True, num_tasks)):115 if inner_sub_batch_fn is not None:116 inner_sub_batch = inner_sub_batch_fn(sub_batch)117 else:118 inner_sub_batch = sub_batch119 if outer_sub_batch_fn is not None:120 outer_sub_batch = outer_sub_batch_fn(sub_batch)121 else:122 outer_sub_batch = sub_batch123 iv = inner_v[i] if inner_v_seq else inner_v124 ov = outer_v[i] if outer_v_seq else outer_v125 cost, updated_iv, grads = _train_task(inner_sub_batch, outer_sub_batch, inner_cost_fn, outer_cost_fn, variables,126 inner_grad_steps, inner_learning_rate, inner_optimization_step, order,127 average_across_steps, iv, keep_innver_v, ov, keep_outer_v, False,128 num_tasks, stop_gradients)129 if (return_inner_v == 'first' and i == 0) or return_inner_v in ['all', True]:130 updated_ivs_to_return.append(updated_iv)131 total_cost = total_cost + cost132 all_grads.append(grads)133 if order == 1:134 if return_inner_v:135 return total_cost / num_tasks, sum(all_grads) / num_tasks, ivy.Container.concat(updated_ivs_to_return, 0)136 return total_cost / num_tasks, sum(all_grads) / num_tasks137 if return_inner_v:138 return total_cost / num_tasks, ivy.Container.concat(updated_ivs_to_return, 0)139 return total_cost / num_tasks140def _train_tasks(batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,141 inner_learning_rate, inner_optimization_step, order, average_across_steps, batched, inner_v,142 keep_innver_v, outer_v, keep_outer_v, return_inner_v, num_tasks, stop_gradients):143 if batched:144 return _train_tasks_batched(145 batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,146 inner_learning_rate, inner_optimization_step, order, average_across_steps, inner_v, keep_innver_v, outer_v,147 keep_outer_v, return_inner_v, num_tasks, stop_gradients)148 return _train_tasks_with_for_loop(149 batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,150 inner_learning_rate, inner_optimization_step, order, average_across_steps, inner_v, keep_innver_v, outer_v,151 keep_outer_v, return_inner_v, num_tasks, stop_gradients)152# Public #153# -------#154# First Order155def fomaml_step(batch, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps, inner_learning_rate,156 inner_optimization_step=gradient_descent_update, inner_batch_fn=None, outer_batch_fn=None,157 average_across_steps=False, batched=True, inner_v=None, keep_inner_v=True, outer_v=None,158 keep_outer_v=True, return_inner_v=False, num_tasks=None, stop_gradients=True):159 """160 Perform step of first order MAML.161 :param batch: The input batch162 :type batch: ivy.Container163 :param inner_cost_fn: callable for the inner loop cost function, receving task-specific sub-batch,164 inner vars and outer vars165 :type inner_cost_fn: callable166 :param outer_cost_fn: callable for the outer loop cost function, receving task-specific sub-batch,167 inner vars and outer vars. If None, the cost from the inner loop will also be168 optimized in the outer loop.169 :type outer_cost_fn: callable, optional170 :param variables: Variables to be optimized during the meta step171 :type variables: ivy.Container172 :param inner_grad_steps: Number of gradient steps to perform during the inner loop.173 :type inner_grad_steps: int174 :param inner_learning_rate: The learning rate of the inner loop.175 :type inner_learning_rate: float176 :param inner_optimization_step: The function used for the inner loop optimization.177 Default is ivy.gradient_descent_update.178 :type inner_optimization_step: callable, optional179 :param inner_batch_fn: Function to apply to the task sub-batch, before passing to the inner_cost_fn.180 Default is None.181 :type inner_batch_fn: callable, optional182 :param outer_batch_fn: Function to apply to the task sub-batch, before passing to the outer_cost_fn.183 Default is None.184 :type outer_batch_fn: callable, optional185 :param average_across_steps: Whether to average the inner loop steps for the outer loop update. Default is False.186 :type average_across_steps: bool, optional187 :param batched: Whether to batch along the time dimension, and run the meta steps in batch. Default is True.188 :type batched: bool, optional189 :param inner_v: Nested variable keys to be optimized during the inner loop, with same keys and boolean values.190 :type inner_v: dict str or list, optional191 :param keep_inner_v: If True, the key chains in inner_v will be kept, otherwise they will be removed.192 Default is True.193 :type keep_inner_v: bool, optional194 :param outer_v: Nested variable keys to be optimized during the inner loop, with same keys and boolean values.195 :type outer_v: dict str or list, optional196 :param keep_outer_v: If True, the key chains in inner_v will be kept, otherwise they will be removed.197 Default is True.198 :type keep_outer_v: bool, optional199 :param return_inner_v: Either 'first', 'all', or False. 'first' means the variables for the first task inner loop200 will also be returned. variables for all tasks will be returned with 'all'. Default is False.201 :type return_inner_v: str, optional202 :param num_tasks: Number of unique tasks to inner-loop optimize for the meta step. Determined from batch by default.203 :type num_tasks: int, optional204 :param stop_gradients: Whether to stop the gradients of the cost. Default is True.205 :type stop_gradients: bool, optional206 :return: The cost and the gradients with respect to the outer loop variables.207 """208 if num_tasks is None:209 num_tasks = batch.shape[0]210 rets = _train_tasks(211 batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,212 inner_learning_rate, inner_optimization_step, 1, average_across_steps, batched, inner_v, keep_inner_v, outer_v,213 keep_outer_v, return_inner_v, num_tasks, stop_gradients)214 cost = rets[0]215 if stop_gradients:216 cost = ivy.stop_gradient(cost, preserve_type=False)217 grads = rets[1]218 if return_inner_v:219 return cost, grads, rets[2]220 return cost, grads221def reptile_step(batch, cost_fn, variables, inner_grad_steps, inner_learning_rate,222 inner_optimization_step=gradient_descent_update, batched=True, return_inner_v=False, num_tasks=None,223 stop_gradients=True):224 """225 Perform step of Reptile.226 :param batch: The input batch227 :type batch: ivy.Container228 :param cost_fn: callable for the cost function, receivng the task-specific sub-batch and variables229 :type cost_fn: callable230 :param variables: Variables to be optimized231 :type variables: ivy.Container232 :param inner_grad_steps: Number of gradient steps to perform during the inner loop.233 :type inner_grad_steps: int234 :param inner_learning_rate: The learning rate of the inner loop.235 :type inner_learning_rate: float236 :param inner_optimization_step: The function used for the inner loop optimization.237 Default is ivy.gradient_descent_update.238 :type inner_optimization_step: callable, optional239 :param batched: Whether to batch along the time dimension, and run the meta steps in batch. Default is True.240 :type batched: bool, optional241 :param return_inner_v: Either 'first', 'all', or False. 'first' means the variables for the first task inner loop242 will also be returned. variables for all tasks will be returned with 'all'. Default is False.243 :type return_inner_v: str, optional244 :param num_tasks: Number of unique tasks to inner-loop optimize for the meta step. Determined from batch by default.245 :type num_tasks: int, optional246 :param stop_gradients: Whether to stop the gradients of the cost. Default is True.247 :type stop_gradients: bool, optional248 :return: The cost and the gradients with respect to the outer loop variables.249 """250 if num_tasks is None:251 num_tasks = batch.shape[0]252 # noinspection PyTypeChecker253 rets = _train_tasks(254 batch, None, None, cost_fn, None, variables, inner_grad_steps, inner_learning_rate, inner_optimization_step,255 1, True, batched, None, True, None, True, return_inner_v, num_tasks, stop_gradients)256 cost = rets[0]257 if stop_gradients:258 cost = ivy.stop_gradient(cost, preserve_type=False)259 grads = rets[1] / inner_learning_rate260 if return_inner_v:261 return cost, grads, rets[2]262 return cost, grads263# Second Order264def maml_step(batch, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps, inner_learning_rate,265 inner_optimization_step=gradient_descent_update, inner_batch_fn=None, outer_batch_fn=None,266 average_across_steps=False, batched=True, inner_v=None, keep_inner_v=True, outer_v=None,267 keep_outer_v=True, return_inner_v=False, num_tasks=None, stop_gradients=True):268 """269 Perform step of vanilla second order MAML.270 :param batch: The input batch271 :type batch: ivy.Container272 :param inner_cost_fn: callable for the inner loop cost function, receing sub-batch, inner vars and outer vars273 :type inner_cost_fn: callable274 :param outer_cost_fn: callable for the outer loop cost function, receving task-specific sub-batch,275 inner vars and outer vars. If None, the cost from the inner loop will also be276 optimized in the outer loop.277 :type outer_cost_fn: callable, optional278 :param variables: Variables to be optimized during the meta step279 :type variables: ivy.Container280 :param inner_grad_steps: Number of gradient steps to perform during the inner loop.281 :type inner_grad_steps: int282 :param inner_learning_rate: The learning rate of the inner loop.283 :type inner_learning_rate: float284 :param inner_optimization_step: The function used for the inner loop optimization.285 Default is ivy.gradient_descent_update.286 :type inner_optimization_step: callable, optional287 :param inner_batch_fn: Function to apply to the task sub-batch, before passing to the inner_cost_fn.288 Default is None.289 :type inner_batch_fn: callable, optional290 :param outer_batch_fn: Function to apply to the task sub-batch, before passing to the outer_cost_fn.291 Default is None.292 :type outer_batch_fn: callable, optional293 :param average_across_steps: Whether to average the inner loop steps for the outer loop update. Default is False.294 :type average_across_steps: bool, optional295 :param batched: Whether to batch along the time dimension, and run the meta steps in batch. Default is True.296 :type batched: bool, optional297 :param inner_v: Nested variable keys to be optimized during the inner loop, with same keys and boolean values.298 :type inner_v: dict str or list, optional299 :param keep_inner_v: If True, the key chains in inner_v will be kept, otherwise they will be removed.300 Default is True.301 :type keep_inner_v: bool, optional302 :param outer_v: Nested variable keys to be optimized during the inner loop, with same keys and boolean values.303 :type outer_v: dict str or list, optional304 :param keep_outer_v: If True, the key chains in inner_v will be kept, otherwise they will be removed.305 Default is True.306 :type keep_outer_v: bool, optional307 :param return_inner_v: Either 'first', 'all', or False. 'first' means the variables for the first task inner loop308 will also be returned. variables for all tasks will be returned with 'all'. Default is False.309 :type return_inner_v: str, optional310 :param num_tasks: Number of unique tasks to inner-loop optimize for the meta step. Determined from batch by default.311 :type num_tasks: int, optional312 :param stop_gradients: Whether to stop the gradients of the cost. Default is True.313 :type stop_gradients: bool, optional314 :return: The cost and the gradients with respect to the outer loop variables.315 """316 if num_tasks is None:317 num_tasks = batch.shape[0]318 unique_outer = outer_v is not None319 cost, grads, *rets = ivy.execute_with_gradients(lambda v: _train_tasks(320 batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn,321 variables.set_at_key_chains(v) if unique_outer else v, inner_grad_steps, inner_learning_rate,322 inner_optimization_step, 2, average_across_steps, batched, inner_v, keep_inner_v, outer_v, keep_outer_v,323 return_inner_v, num_tasks, False),324 variables.at_key_chains(outer_v, ignore_none=True)325 if keep_outer_v else variables.prune_key_chains(outer_v, ignore_none=True))326 if stop_gradients:327 cost = ivy.stop_gradient(cost, preserve_type=False)328 # noinspection PyRedundantParentheses...
ragged_factory_ops.py
Source:ragged_factory_ops.py
1# Copyright 2018 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Operations for constructing RaggedTensors."""16from __future__ import absolute_import17from __future__ import division18from __future__ import print_function19import numpy as np20from tensorflow.python.framework import constant_op21from tensorflow.python.framework import dtypes22from tensorflow.python.framework import ops23from tensorflow.python.framework import tensor_shape24from tensorflow.python.ops import array_ops25from tensorflow.python.ops.ragged import ragged_tensor26from tensorflow.python.ops.ragged import ragged_tensor_value27from tensorflow.python.util.tf_export import tf_export28#===============================================================================29# Op to construct a constant RaggedTensor from a nested Python list.30#===============================================================================31@tf_export("ragged.constant")32def constant(pylist, dtype=None, ragged_rank=None, inner_shape=None,33 name=None, row_splits_dtype=dtypes.int64):34 """Constructs a constant RaggedTensor from a nested Python list.35 Example:36 >>> tf.ragged.constant([[1, 2], [3], [4, 5, 6]])37 <tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>38 All scalar values in `pylist` must have the same nesting depth `K`, and the39 returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar40 values, then `K` is one greater than the maximum depth of empty lists in41 `pylist`. All scalar values in `pylist` must be compatible with `dtype`.42 Args:43 pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that44 is not a `list`, `tuple` or `np.ndarray` must be a scalar value45 compatible with `dtype`.46 dtype: The type of elements for the returned `RaggedTensor`. If not47 specified, then a default is chosen based on the scalar values in48 `pylist`.49 ragged_rank: An integer specifying the ragged rank of the returned50 `RaggedTensor`. Must be nonnegative and less than `K`. Defaults to51 `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K52 - 1 - len(inner_shape))` if `inner_shape` is specified.53 inner_shape: A tuple of integers specifying the shape for individual inner54 values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank`55 is not specified. If `ragged_rank` is specified, then a default is chosen56 based on the contents of `pylist`.57 name: A name prefix for the returned tensor (optional).58 row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits.59 One of `tf.int32` or `tf.int64`.60 Returns:61 A potentially ragged tensor with rank `K` and the specified `ragged_rank`,62 containing the values from `pylist`.63 Raises:64 ValueError: If the scalar values in `pylist` have inconsistent nesting65 depth; or if ragged_rank or inner_shape are incompatible with `pylist`.66 """67 def ragged_factory(values, row_splits):68 row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype)69 return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits,70 validate=False)71 with ops.name_scope(name, "RaggedConstant"):72 return _constant_value(ragged_factory, constant_op.constant, pylist, dtype,73 ragged_rank, inner_shape)74@tf_export(v1=["ragged.constant_value"])75def constant_value(pylist, dtype=None, ragged_rank=None, inner_shape=None,76 row_splits_dtype="int64"):77 """Constructs a RaggedTensorValue from a nested Python list.78 Warning: This function returns a `RaggedTensorValue`, not a `RaggedTensor`.79 If you wish to construct a constant `RaggedTensor`, use80 [`ragged.constant(...)`](constant.md) instead.81 Example:82 >>> tf.compat.v1.ragged.constant_value([[1, 2], [3], [4, 5, 6]])83 tf.RaggedTensorValue(values=array([1, 2, 3, 4, 5, 6]),84 row_splits=array([0, 2, 3, 6]))85 All scalar values in `pylist` must have the same nesting depth `K`, and the86 returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no87 scalar values, then `K` is one greater than the maximum depth of empty lists88 in `pylist`. All scalar values in `pylist` must be compatible with `dtype`.89 Args:90 pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that91 is not a `list` or `tuple` must be a scalar value compatible with `dtype`.92 dtype: `numpy.dtype`. The type of elements for the returned `RaggedTensor`.93 If not specified, then a default is chosen based on the scalar values in94 `pylist`.95 ragged_rank: An integer specifying the ragged rank of the returned96 `RaggedTensorValue`. Must be nonnegative and less than `K`. Defaults to97 `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K98 - 1 - len(inner_shape))` if `inner_shape` is specified.99 inner_shape: A tuple of integers specifying the shape for individual inner100 values in the returned `RaggedTensorValue`. Defaults to `()` if101 `ragged_rank` is not specified. If `ragged_rank` is specified, then a102 default is chosen based on the contents of `pylist`.103 row_splits_dtype: data type for the constructed `RaggedTensorValue`'s104 row_splits. One of `numpy.int32` or `numpy.int64`.105 Returns:106 A `tf.RaggedTensorValue` or `numpy.array` with rank `K` and the specified107 `ragged_rank`, containing the values from `pylist`.108 Raises:109 ValueError: If the scalar values in `pylist` have inconsistent nesting110 depth; or if ragged_rank or inner_shape are incompatible with `pylist`.111 """112 if dtype is not None and isinstance(dtype, dtypes.DType):113 dtype = dtype.as_numpy_dtype114 row_splits_dtype = dtypes.as_dtype(row_splits_dtype).as_numpy_dtype115 def _ragged_factory(values, row_splits):116 row_splits = np.array(row_splits, dtype=row_splits_dtype)117 return ragged_tensor_value.RaggedTensorValue(values, row_splits)118 def _inner_factory(pylist, dtype, shape, name=None): # pylint: disable=unused-argument119 return np.reshape(np.array(pylist, dtype=dtype), shape)120 return _constant_value(_ragged_factory, _inner_factory, pylist, dtype,121 ragged_rank, inner_shape)122def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank,123 inner_shape):124 """Constructs a constant RaggedTensor or RaggedTensorValue.125 Args:126 ragged_factory: A factory function with the signature:127 `ragged_factory(values, row_splits)`128 inner_factory: A factory function with the signature: `inner_factory(pylist,129 dtype, shape, name)`130 pylist: A nested `list`, `tuple` or `np.ndarray`.131 dtype: Data type for returned value.132 ragged_rank: Ragged rank for returned value.133 inner_shape: Inner value shape for returned value.134 Returns:135 A value returned by `ragged_factory` or `inner_factory`.136 Raises:137 ValueError: If the scalar values in `pylist` have inconsistent nesting138 depth; or if ragged_rank or inner_shape are incompatible with `pylist`.139 """140 if ragged_tensor.is_ragged(pylist):141 raise TypeError("pylist may not be a RaggedTensor or RaggedTensorValue.")142 # np.ndim builds an array, so we short-circuit lists and tuples.143 if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0:144 # Scalar value145 if ragged_rank is not None and ragged_rank != 0:146 raise ValueError("Invalid pylist=%r: incompatible with ragged_rank=%d" %147 (pylist, ragged_rank))148 if inner_shape is not None and inner_shape:149 raise ValueError(150 "Invalid pylist=%r: incompatible with dim(inner_shape)=%d" %151 (pylist, len(inner_shape)))152 return inner_factory(pylist, dtype, ())153 if ragged_rank is not None and ragged_rank < 0:154 raise ValueError(155 "Invalid ragged_rank=%r: must be nonnegative" % ragged_rank)156 # Find the depth of scalar values in `pylist`.157 scalar_depth, max_depth = _find_scalar_and_max_depth(pylist)158 if scalar_depth is not None:159 if max_depth > scalar_depth:160 raise ValueError("Invalid pylist=%r: empty list nesting is greater "161 "than scalar value nesting" % pylist)162 # If both inner_shape and ragged_rank were specified, then check that163 # they are compatible with pylist.164 if inner_shape is not None and ragged_rank is not None:165 expected_depth = ragged_rank + len(inner_shape) + 1166 if ((scalar_depth is not None and expected_depth != scalar_depth) or167 (scalar_depth is None and expected_depth < max_depth)):168 raise ValueError(169 "Invalid pylist=%r: incompatible with ragged_rank=%d "170 "and dim(inner_shape)=%d" % (pylist, ragged_rank, len(inner_shape)))171 # Check if the result is a `Tensor`.172 if (ragged_rank == 0 or173 (ragged_rank is None and174 ((max_depth < 2) or175 (inner_shape is not None and max_depth - len(inner_shape) < 2)))):176 return inner_factory(pylist, dtype, inner_shape)177 # Compute default value for inner_shape.178 if inner_shape is None:179 if ragged_rank is None:180 inner_shape = ()181 else:182 inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank)183 # Compute default value for ragged_rank.184 if ragged_rank is None:185 if scalar_depth is None:186 ragged_rank = max(1, max_depth - 1)187 else:188 ragged_rank = max(1, scalar_depth - 1 - len(inner_shape))189 # Build the splits for each ragged rank, and concatenate the inner values190 # into a single list.191 nested_splits = []192 values = pylist193 for dim in range(ragged_rank):194 nested_splits.append([0])195 concatenated_values = []196 for row in values:197 nested_splits[dim].append(nested_splits[dim][-1] + len(row))198 concatenated_values.extend(row)199 values = concatenated_values200 values = inner_factory(201 values, dtype=dtype, shape=(len(values),) + inner_shape, name="values")202 for row_splits in reversed(nested_splits):203 values = ragged_factory(values, row_splits)204 return values205def _find_scalar_and_max_depth(pylist):206 """Finds nesting depth of scalar values in pylist.207 Args:208 pylist: A nested python `list` or `tuple`.209 Returns:210 A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting211 depth of scalar values in `pylist`, or `None` if `pylist` contains no212 scalars. `max_depth` is the maximum depth of `pylist` (including213 empty lists).214 Raises:215 ValueError: If pylist has inconsistent nesting depths for scalars.216 """217 # Check if pylist is not scalar. np.ndim builds an array, so we218 # short-circuit lists and tuples.219 if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0:220 scalar_depth = None221 max_depth = 1222 for child in pylist:223 child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child)224 if child_scalar_depth is not None:225 if scalar_depth is not None and scalar_depth != child_scalar_depth + 1:226 raise ValueError("all scalar values must have the same nesting depth")227 scalar_depth = child_scalar_depth + 1228 max_depth = max(max_depth, child_max_depth + 1)229 return (scalar_depth, max_depth)230 return (0, 0)231def _default_inner_shape_for_pylist(pylist, ragged_rank):232 """Computes a default inner shape for the given python list."""233 def get_inner_shape(item):234 """Returns the inner shape for a python list `item`."""235 if not isinstance(item, (list, tuple)) and np.ndim(item) == 0:236 return ()237 elif item:238 return (len(item),) + get_inner_shape(item[0])239 return (0,)240 def check_inner_shape(item, shape):241 """Checks that `item` has a consistent shape matching `shape`."""242 is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0243 if is_nested != bool(shape):244 raise ValueError("inner values have inconsistent shape")245 if is_nested:246 if shape[0] != len(item):247 raise ValueError("inner values have inconsistent shape")248 for child in item:249 check_inner_shape(child, shape[1:])250 # Collapse the ragged layers to get the list of inner values.251 flat_values = pylist252 for dim in range(ragged_rank):253 if not all(254 isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values):255 raise ValueError("pylist has scalar values depth %d, but ragged_rank=%d "256 "requires scalar value depth greater than %d" %257 (dim + 1, ragged_rank, ragged_rank))258 flat_values = sum((list(v) for v in flat_values), [])259 # Compute the inner shape looking only at the leftmost elements; and then260 # use check_inner_shape to verify that other elements have the same shape.261 inner_shape = get_inner_shape(flat_values)262 check_inner_shape(flat_values, inner_shape)263 return inner_shape[1:]264@tf_export(v1=["ragged.placeholder"])265def placeholder(dtype, ragged_rank, value_shape=None, name=None):266 """Creates a placeholder for a `tf.RaggedTensor` that will always be fed.267 **Important**: This ragged tensor will produce an error if evaluated.268 Its value must be fed using the `feed_dict` optional argument to269 `Session.run()`, `Tensor.eval()`, or `Operation.run()`.270 @compatibility{eager} Placeholders are not compatible with eager execution.271 Args:272 dtype: The data type for the `RaggedTensor`.273 ragged_rank: The ragged rank for the `RaggedTensor`274 value_shape: The shape for individual flat values in the `RaggedTensor`.275 name: A name for the operation (optional).276 Returns:277 A `RaggedTensor` that may be used as a handle for feeding a value, but278 not evaluated directly.279 Raises:280 RuntimeError: if eager execution is enabled281 """282 if ragged_rank == 0:283 return array_ops.placeholder(dtype, value_shape, name)284 with ops.name_scope(name, "RaggedPlaceholder", []):285 flat_shape = tensor_shape.TensorShape([None]).concatenate(value_shape)286 result = array_ops.placeholder(dtype, flat_shape, "flat_values")287 for i in reversed(range(ragged_rank)):288 row_splits = array_ops.placeholder(dtypes.int64, [None],289 "row_splits_%d" % i)290 result = ragged_tensor.RaggedTensor.from_row_splits(result, row_splits)...
test_closure.py
Source:test_closure.py
...67 self.run_jit_inner_function(nopython=True)68 @testing.allow_interpreter_mode69 def test_return_closure(self):70 def outer(x):71 def inner():72 return x + 173 return inner74 cfunc = jit(outer)75 self.assertEqual(cfunc(10)(), outer(10)())76class TestInlinedClosure(TestCase):77 """78 Tests for (partial) closure support in njit. The support is partial79 because it only works for closures that can be successfully inlined80 at compile time.81 """82 @tag('important')83 def test_inner_function(self):84 def outer(x):85 def inner(x):86 return x * x87 return inner(x) + inner(x)88 cfunc = njit(outer)89 self.assertEqual(cfunc(10), outer(10))90 @tag('important')91 def test_inner_function_with_closure(self):92 def outer(x):93 y = x + 194 def inner(x):95 return x * x + y96 return inner(x) + inner(x)97 cfunc = njit(outer)98 self.assertEqual(cfunc(10), outer(10))99 @tag('important')100 def test_inner_function_with_closure_2(self):101 def outer(x):102 y = x + 1103 def inner(x):104 return x * y105 y = inner(x)106 return y + inner(x)107 cfunc = njit(outer)108 self.assertEqual(cfunc(10), outer(10))109 @unittest.skipIf(utils.PYVERSION < (3, 0), "needs Python 3")110 def test_inner_function_with_closure_3(self):111 code = """112 def outer(x):113 y = x + 1114 z = 0115 def inner(x):116 nonlocal z117 z += x * x118 return z + y119 return inner(x) + inner(x) + z120 """121 ns = {}122 exec(code.strip(), ns)123 cfunc = njit(ns['outer'])124 self.assertEqual(cfunc(10), ns['outer'](10))125 @tag('important')126 def test_inner_function_nested(self):127 def outer(x):128 def inner(y):129 def innermost(z):130 return x + y + z131 s = 0132 for i in range(y):133 s += innermost(i)134 return s135 return inner(x * x)136 cfunc = njit(outer)137 self.assertEqual(cfunc(10), outer(10))138 @tag('important')139 def test_bulk_use_cases(self):140 """ Tests the large number of use cases defined below """141 # jitted function used in some tests142 @njit143 def fib3(n):144 if n < 2:145 return n146 return fib3(n - 1) + fib3(n - 2)147 def outer1(x):148 """ Test calling recursive function from inner """149 def inner(x):150 return fib3(x)151 return inner(x)152 def outer2(x):153 """ Test calling recursive function from closure """154 z = x + 1155 def inner(x):156 return x + fib3(z)157 return inner(x)158 def outer3(x):159 """ Test recursive inner """160 def inner(x):161 if x + y < 2:162 return 10163 else:164 inner(x - 1)165 return inner(x)166 def outer4(x):167 """ Test recursive closure """168 y = x + 1169 def inner(x):170 if x + y < 2:171 return 10172 else:173 inner(x - 1)174 return inner(x)175 def outer5(x):176 """ Test nested closure """177 y = x + 1178 def inner1(x):179 z = y + x + 2180 def inner2(x):181 return x + z182 return inner2(x) + y183 return inner1(x)184 def outer6(x):185 """ Test closure with list comprehension in body """186 y = x + 1187 def inner1(x):188 z = y + x + 2189 return [t for t in range(z)]190 return inner1(x)191 _OUTER_SCOPE_VAR = 9192 def outer7(x):193 """ Test use of outer scope var, no closure """194 z = x + 1195 return x + z + _OUTER_SCOPE_VAR196 _OUTER_SCOPE_VAR = 9197 def outer8(x):198 """ Test use of outer scope var, with closure """199 z = x + 1200 def inner(x):201 return x + z + _OUTER_SCOPE_VAR202 return inner(x)203 def outer9(x):204 """ Test closure assignment"""205 z = x + 1206 def inner(x):207 return x + z208 f = inner209 return f(x)210 def outer10(x):211 """ Test two inner, one calls other """212 z = x + 1213 def inner(x):214 return x + z215 def inner2(x):216 return inner(x)217 return inner2(x)218 def outer11(x):219 """ return the closure """220 z = x + 1221 def inner(x):222 return x + z223 return inner224 def outer12(x):225 """ closure with kwarg"""226 z = x + 1227 def inner(x, kw=7):228 return x + z + kw229 return inner(x)230 def outer13(x, kw=7):231 """ outer with kwarg no closure"""232 z = x + 1 + kw233 return z234 def outer14(x, kw=7):235 """ outer with kwarg used in closure"""236 z = x + 1237 def inner(x):238 return x + z + kw239 return inner(x)240 def outer15(x, kw=7):241 """ outer with kwarg as arg to closure"""242 z = x + 1243 def inner(x, kw):244 return x + z + kw245 return inner(x, kw)246 def outer16(x):247 """ closure is generator, consumed locally """248 z = x + 1249 def inner(x):250 yield x + z251 return list(inner(x))252 def outer17(x):253 """ closure is generator, returned """254 z = x + 1255 def inner(x):256 yield x + z257 return inner(x)258 def outer18(x):259 """ closure is generator, consumed in loop """260 z = x + 1261 def inner(x):262 yield x + z263 for i in inner(x):264 t = i265 return t266 def outer19(x):267 """ closure as arg to another closure """268 z1 = x + 1269 z2 = x + 2270 def inner(x):271 return x + z1272 def inner2(f, x):273 return f(x) + z2274 return inner2(inner, x)275 def outer20(x):276 #""" Test calling numpy in closure """277 z = x + 1278 def inner(x):279 return x + numpy.cos(z)280 return inner(x)281 def outer21(x):282 #""" Test calling numpy import as in closure """283 z = x + 1284 def inner(x):285 return x + np.cos(z)286 return inner(x)287 # functions to test that are expected to pass288 f = [outer1, outer2, outer5, outer6, outer7, outer8,289 outer9, outer10, outer12, outer13, outer14,290 outer15, outer19, outer20, outer21]291 for ref in f:292 cfunc = njit(ref)293 var = 10294 self.assertEqual(cfunc(var), ref(var))295 # test functions that are expected to fail296 with self.assertRaises(NotImplementedError) as raises:297 cfunc = jit(nopython=True)(outer3)298 cfunc(var)299 msg = "Unsupported use of op_LOAD_CLOSURE encountered"300 self.assertIn(msg, str(raises.exception))...
Project2_Classification.py
Source:Project2_Classification.py
1# -*- coding: utf-8 -*-2"""3Created on Fri Nov 12 19:11:45 202145@author: Usuario6"""78from matplotlib.pyplot import figure, plot, xlabel, ylabel, legend, show9import sklearn.linear_model as lm10import numpy as np11import pandas as pd12from matplotlib.pylab import (figure, semilogx, loglog, xlabel, ylabel, legend, 13 title, subplot, show, grid)14import numpy as np15from scipy.io import loadmat16from scipy import stats17import sklearn.linear_model as lm18from sklearn import model_selection19from toolbox_02450 import rlr_validate20import torch21from toolbox_02450 import train_neural_net, draw_neural_net, visualize_decision_boundary22from sklearn.preprocessing import StandardScaler23from sklearn.linear_model import LogisticRegression24from sklearn.model_selection import train_test_split25import statistics as st26import array 2728# Dataset declaration and data cleasing2930df=pd.read_csv('heart.csv')31df.shape32df.info()33df.isnull().sum()34df.info()3536# Creation of matrix X3738X = df.values3940# Outlier removal4142z_scores = stats.zscore(df)43abs_z_scores = np.abs(z_scores)44filtered_entries = (abs_z_scores < 3).all(axis=1)45new_df = df[filtered_entries]4647X_new=new_df48y = X_new['target']49X_new=X_new.drop('target',axis=1)5051#---------Getting dummies for the categorical features--------52X = pd.get_dummies(X_new,columns=['cp','restecg','slope','ca','thal'],drop_first=False)535455X = (X - X.mean())/X.std()5657attributeNames = np.asarray(X.columns)5859N, M = np.shape(X)6061X = np.asarray(X)62y = np.asarray(y)6364#CLASSIFICATION-------------------------6566# Cross Validation Parameters for inner and outer fold.67K_Outer = 568CV_Outer = model_selection.KFold(K_Outer, shuffle=True)6970K_Inner = 571CV_Inner = model_selection.KFold(K_Inner, shuffle=True)7273#Neural Network parameters74h = [1, 3, 5, 7, 9]75max_iter = 100007677#--MODEL ERRORS78Error_test_LR=np.empty((K_Inner,1))79opt_lambda_idx=np.empty((K_Inner,1))80opt_lambda=np.empty((K_Inner,1))8182Error_train_bl_in = np.empty((K_Inner, 1))83Error_test_bl_in = np.empty((K_Inner, 1))84Error_train_bl_out = np.empty((K_Outer, 1))85Error_test_bl_out = np.empty((K_Outer, 1))8687Error_ANN_h = np.empty((K_Inner, 1))88error_in = []89error_out = []90Best_h = np.empty((K_Outer, 1))91Min_Error_h = np.empty((K_Inner, 1 ))92Error_ANN_out = []93949596## ----OUTER CROSS VALIDATION FOLD9798k_out=099for train_index, test_index in CV_Outer.split(X,y):100 print('Outer cross validation fold {0}/{1}:'.format(k_out+1,K_Outer))101 102 # Extract training and test set for the outer cross validation fold103 X_train_outer = X[train_index]104 y_train_outer = y[train_index]105 X_test_outer = X[test_index]106 y_test_outer = y[test_index]107108 # Fit regularized logistic regression model to training data to predict 109110 lambda_interval = np.logspace(-8, 2, 50)111 optim_lambdas = np.empty(K_Outer)112 train_error_rate = np.zeros(len(lambda_interval))113 test_error_rate = np.zeros(len(lambda_interval))114 coefficient_norm = np.zeros(len(lambda_interval))115 116 117 ## -----INNER CROSS VALIDATION FOLD118 119 k_in=0120 for train_index2, test_index2 in CV_Inner.split(X_train_outer,y_train_outer):121 h = [1, 3, 5, 7, 9]122 print('Inner cross validation fold {0}/{1}:'.format(k_in+1,K_Inner))123 124 # Extract inner training and test set for current CV fold125 X_train_inner, X_test_inner, y_train_inner, y_test_inner = train_test_split(X_train_outer, y_train_outer, test_size=.80)126 127 128 #----BASELINE MODEL129 Error_train_bl_in[k_in] = np.sum(y_train_inner != np.argmax(np.bincount(y_train_inner)))/len(y_train_inner)130 Error_test_bl_in[k_in] = np.sum(y_test_inner != np.argmax(np.bincount(y_test_inner)))/len(y_test_inner)131 132 133 134 #vector = np.vectorize(np.int)135 #vector(y_test_inner.numpy()) 136 137 138 #----LOGISTIC REGRESSION CLASSIFICATION139 140 # Selection of the best lambda for the inner cross validation fold141 for k in range(0, len(lambda_interval)):142 143 #Creation of the Logistic Regression Model144 mdl = LogisticRegression(penalty='l2', C=1/lambda_interval[k] )145 146 # Training of the model with the inner partition of the CV147 mdl.fit(X_train_inner, y_train_inner)148 149 # Prediction of the model on the inner test partitions150 y_train_est = mdl.predict(X_train_inner).T151 y_test_est = mdl.predict(X_test_inner).T #y_predict152 153 154 # Compute the model erro for each lambda155 train_error_rate[k] = np.sum(y_train_est != y_train_inner) / len(y_train_inner)156 test_error_rate[k] = np.sum(y_test_est != y_test_inner) / len(y_test_inner)157 158 w_est = mdl.coef_[0] 159 coefficient_norm[k] = np.sqrt(np.sum(w_est**2))160 161 #----ARTIFICIAL NEURAL NETWORK FOR CLASSIFICATION162 X_train_inner = torch.Tensor(X_train_outer[train_index2,:] )163 y_train_inner = torch.Tensor(y_train_outer[train_index2] )164 X_test_inner = torch.Tensor(X_train_outer[test_index2,:] )165 y_test_inner = torch.Tensor(y_train_outer[test_index2] )166 167 y_train_inner = y_train_inner.unsqueeze(1)168 error_in = []169 170 for i, j in enumerate(h): 171 172 # Create a model for each h173 inner_ann = lambda: torch.nn.Sequential(174 torch.nn.Linear(M, h[i]), #M features to H hiden units175 # 1st transfer function, either Tanh or ReLU:176 torch.nn.Tanh(), 177 torch.nn.Linear(h[i], 1), # H hidden units to 1 output neuron178 torch.nn.Sigmoid() #Final transfer function179 )180 loss_fn = torch.nn.BCELoss()181 print('\nTesting h: {0}'.format(j)) 182 183 184 # Train the new model185 net, final_loss_in, learning_curve = train_neural_net(inner_ann,186 loss_fn,187 X=X_train_inner,188 y=y_train_inner,189 n_replicates=1,190 max_iter=max_iter)191 192 print('\n\tBest loss: {}\n'.format(final_loss_in))193 194 # Determine estimated class labels for test set195 y_sigmoid_in = net(X_test_inner) # activation of final note, i.e. prediction of network196 y_test_est_in = (y_sigmoid_in > .5).type(dtype=torch.uint8) # threshold output of sigmoidal function197 y_test_in = y_test_inner.type(dtype=torch.uint8)198 # Determine errors and error rate199 e_in = (y_test_est_in != y_test_in)200 error_rate_in = (sum(e_in).type(torch.float)/len(y_test_inner)).data.numpy()201 error_in.append(error_rate_in) # store error rate for current CV fold202 Error_ANN_h[i] = round(np.mean(error_in),4)203 # Determine errors and error rate204 #InnerErrors_h[i] = final_loss_in/y_test_inner.shape[0]205 if (Error_ANN_h[i] < Error_ANN_h[i-1]):206 Besth = j207 else:208 Besth = h[0]209 210 211 212 #Choose the minimum error for given h213 Min_Error_h[k_in] = min(Error_ANN_h)214 215 # Best h for each inner fold 216 Best_h[k_out] = Besth217 218 219 k_in+=1220 221 # COMPUTE THE ERRORS OF THE BEST MODEL FOR THE OUTER FOLD222 223 # Baseline Model224 Error_train_bl_out[k_out] = min(Error_train_bl_in)225 Error_test_bl_out[k_out] = min(Error_test_bl_in)226 227 p=range(len(y_test_outer))228 y_predict_bl=array.array('i',[])229 for i in p:230 y_predict_bl.append(np.argmax(np.bincount(y_test_outer)))231 len(y_predict_bl)232 233 # Logistic Regression234 Error_test_LR[k_out] = np.min(test_error_rate)235 opt_lambda_idx[k_out] = np.argmin(test_error_rate)236 opt_lambda[k_out] = lambda_interval[int(opt_lambda_idx[k_out])]237 238 239 240 LR= LogisticRegression(penalty='l2' , C=1/opt_lambda[k_out].item() )241242 LR.fit(X_train_outer, y_train_outer)243 244 y_predict_LR = LR.predict(X_test_outer).T245 246247 248 249 # Neural Network for outer fold250 # - Create Outer ANN model251 outer_ann = lambda: torch.nn.Sequential(252 torch.nn.Linear(M, int(np.asarray(Best_h[k_out]))), #M features to H hiden units253 # 1st transfer function, either Tanh or ReLU:254 torch.nn.Tanh(), 255 torch.nn.Linear(int(np.asarray(Best_h[k_out])), 1), # H hidden units to 1 output neuron256 torch.nn.Sigmoid() #Final transfer function257 )258 loss_fn = torch.nn.BCELoss()259 260 # - Training data to pytorch261 X_train_out = torch.Tensor(X[train_index,:] )262 y_train_out = torch.Tensor(y[train_index] )263 X_test_out = torch.Tensor(X[test_index,:] )264 y_test_out = torch.Tensor(y[test_index] )265 266 267 # - Train the net with outer data folds268 y_train_out = y_train_out.unsqueeze(1)269 net, final_loss_out, learning_curve = train_neural_net(outer_ann,270 loss_fn,271 X=X_train_out,272 y=y_train_out,273 n_replicates=1,274 max_iter=max_iter)275 276 # - Compute the errors of the ANN277 # -- Determine estimated class labels for test set278 y_sigmoid_out = net(X_test_out) # activation of final note, i.e. prediction of network279 280 y_test_est_out = (y_sigmoid_out > .5).type(dtype=torch.uint8) # threshold output of sigmoidal function281282 y_predict_ANN = np.concatenate(y_test_est_out.numpy())283 284 y_test_out = y_test_out.type(dtype=torch.uint8)285 286 # -- Determine errors and error rate287 e_out = (y_test_est_out != y_test_out)288 error_rate_out = (sum(e_out).type(torch.float)/len(y_test_out)).data.numpy()289 Error_ANN_out.append(error_rate_out) # store error rate for current CV fold290 Error_ANN_out[k_out] = round(np.mean(error_in),4)291 292 293 k_out+=1294295296297298299300301302303304305306307308
...
ragged_merge_dims_op_test.py
Source:ragged_merge_dims_op_test.py
1# Copyright 2019 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Tests for RaggedTensor.merge_dims."""16from __future__ import absolute_import17from __future__ import division18from __future__ import print_function19from absl.testing import parameterized20from tensorflow.python.eager import context21from tensorflow.python.framework import test_util22from tensorflow.python.ops import array_ops23from tensorflow.python.ops.ragged import ragged_factory_ops24from tensorflow.python.platform import googletest25from tensorflow.python.util import nest26@test_util.run_all_in_graph_and_eager_modes27class RaggedMergeDimsOpTest(test_util.TensorFlowTestCase,28 parameterized.TestCase):29 @parameterized.named_parameters([30 {31 'testcase_name': '2DAxis0To1',32 'rt': [[1, 2], [], [3, 4, 5]],33 'outer_axis': 0,34 'inner_axis': 1,35 'expected': [1, 2, 3, 4, 5],36 },37 {38 'testcase_name': '3DAxis0To1',39 'rt': [[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []]],40 'outer_axis': 0,41 'inner_axis': 1,42 'expected': [[1, 2], [], [3, 4, 5], [6], [7, 8], []],43 },44 {45 'testcase_name': '3DAxis1To2',46 'rt': [[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []]],47 'outer_axis': 1,48 'inner_axis': 2,49 'expected': [[1, 2, 3, 4, 5], [6, 7, 8]],50 },51 {52 'testcase_name': '3DAxis0To2',53 'rt': [[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []]],54 'outer_axis': 0,55 'inner_axis': 2,56 'expected': [1, 2, 3, 4, 5, 6, 7, 8],57 },58 {59 'testcase_name': '3DAxis0To1WithDenseValues',60 'rt': [[[1, 2], [3, 4], [5, 6]], [[7, 8]]],61 'ragged_ranks': (1, 2),62 'outer_axis': 0,63 'inner_axis': 1,64 'expected': [[1, 2], [3, 4], [5, 6], [7, 8]],65 },66 {67 'testcase_name': '3DAxis1To2WithDenseValues',68 'rt': [[[1, 2], [3, 4], [5, 6]], [[7, 8]]],69 'ragged_ranks': (1, 2),70 'outer_axis': 1,71 'inner_axis': 2,72 'expected': [[1, 2, 3, 4, 5, 6], [7, 8]],73 },74 {75 'testcase_name': '4DAxis0To1',76 'rt': [[[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []]], [[[9], [0]]]],77 'outer_axis': 0,78 'inner_axis': 1,79 'expected': [[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []], [[9], [0]]],80 },81 {82 'testcase_name': '4DAxis1To2',83 'rt': [[[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []]], [[[9], [0]]]],84 'outer_axis': 1,85 'inner_axis': 2,86 'expected': [[[1, 2], [], [3, 4, 5], [6], [7, 8], []], [[9], [0]]],87 },88 {89 'testcase_name': '4DAxis2To3',90 'rt': [[[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []]], [[[9], [0]]]],91 'outer_axis': 2,92 'inner_axis': 3,93 'expected': [[[1, 2, 3, 4, 5], [6, 7, 8]], [[9, 0]]],94 },95 {96 'testcase_name': '4DAxis1To3',97 'rt': [[[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []]], [[[9], [0]]]],98 'outer_axis': 1,99 'inner_axis': 3,100 'expected': [[1, 2, 3, 4, 5, 6, 7, 8], [9, 0]],101 },102 {103 'testcase_name': '4DAxis1ToNeg1',104 'rt': [[[[1, 2], [], [3, 4, 5]], [[6], [7, 8], []]], [[[9], [0]]]],105 'outer_axis': 1,106 'inner_axis': -1,107 'expected': [[1, 2, 3, 4, 5, 6, 7, 8], [9, 0]],108 },109 {110 'testcase_name': '4DAxis1To2WithDenseValues',111 'rt': [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]]]],112 'ragged_ranks': (1, 2, 3),113 'outer_axis': 1,114 'inner_axis': 2,115 'expected': [[[1, 2], [3, 4], [5, 6], [7, 8]], [[9, 10], [11, 12]]],116 },117 {118 'testcase_name': '4DAxis2To3WithDenseValues',119 'rt': [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]]]],120 'ragged_ranks': (1, 2, 3),121 'outer_axis': 2,122 'inner_axis': 3,123 'expected': [[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12]]],124 },125 {126 'testcase_name': '4DAxis1To3WithDenseValues',127 'rt': [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[9, 10], [11, 12]]]],128 'ragged_ranks': (1, 2, 3),129 'outer_axis': 1,130 'inner_axis': 3,131 'expected': [[1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12]],132 },133 {134 'testcase_name': '5DAxis2To3WithDenseValues',135 'rt': [[[[[1, 2], [3, 4]]], [[[5, 6], [7, 8]]]],136 [[[[9, 10], [11, 12]]]]],137 'ragged_ranks': (1, 2, 3, 4),138 'outer_axis': 2,139 'inner_axis': 3,140 'expected': [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],141 [[[9, 10], [11, 12]]]],142 },143 {144 'testcase_name': '5DAxis3To4WithDenseValues',145 'rt': [[[[[1, 2], [3, 4]]], [[[5, 6], [7, 8]]]],146 [[[[9, 10], [11, 12]]]]],147 'ragged_ranks': (1, 2, 3, 4),148 'outer_axis': 3,149 'inner_axis': 4,150 'expected': [[[[1, 2, 3, 4]], [[5, 6, 7, 8]]], [[[9, 10, 11, 12]]]],151 },152 {153 'testcase_name': '5DAxis1To3WithDenseValues',154 'rt': [[[[[1, 2], [3, 4]]], [[[5, 6], [7, 8]]]],155 [[[[9, 10], [11, 12]]]]],156 'ragged_ranks': (1, 2, 3, 4),157 'outer_axis': 1,158 'inner_axis': 3,159 'expected': [[[1, 2], [3, 4], [5, 6], [7, 8]], [[9, 10], [11, 12]]],160 },161 ]) # pyformat: disable162 def testRaggedMergeDims(self,163 rt,164 outer_axis,165 inner_axis,166 expected,167 ragged_ranks=(None,)):168 for ragged_rank in ragged_ranks:169 x = ragged_factory_ops.constant(rt, ragged_rank=ragged_rank)170 # Check basic behavior.171 actual = x.merge_dims(outer_axis, inner_axis)172 self.assertAllEqual(expected, actual)173 if outer_axis >= 0 and inner_axis >= 0:174 self.assertEqual(actual.shape.rank,175 x.shape.rank - (inner_axis - outer_axis))176 # Check behavior with negative axis.177 if outer_axis >= 0 and inner_axis >= 0:178 actual_with_neg_axis = x.merge_dims(outer_axis - x.shape.rank,179 inner_axis - x.shape.rank)180 self.assertAllEqual(expected, actual_with_neg_axis)181 # Check behavior with placeholder input (no shape info).182 if (not context.executing_eagerly() and outer_axis >= 0 and183 inner_axis >= 0):184 x_with_placeholders = nest.map_structure(185 lambda t: array_ops.placeholder_with_default(t, None),186 x,187 expand_composites=True)188 actual_with_placeholders = x_with_placeholders.merge_dims(189 outer_axis, inner_axis)190 self.assertAllEqual(expected, actual_with_placeholders)191 @parameterized.parameters([192 {193 'rt': [[1]],194 'outer_axis': {},195 'inner_axis': 1,196 'exception': TypeError,197 'message': 'outer_axis must be an int',198 },199 {200 'rt': [[1]],201 'outer_axis': 1,202 'inner_axis': {},203 'exception': TypeError,204 'message': 'inner_axis must be an int',205 },206 {207 'rt': [[1]],208 'outer_axis': 1,209 'inner_axis': 3,210 'exception': ValueError,211 'message': 'inner_axis=3 out of bounds: expected -2<=inner_axis<2',212 },213 {214 'rt': [[1]],215 'outer_axis': 1,216 'inner_axis': -3,217 'exception': ValueError,218 'message': 'inner_axis=-3 out of bounds: expected -2<=inner_axis<2',219 },220 {221 'rt': [[1]],222 'outer_axis': 0,223 'inner_axis': 0,224 'exception': ValueError,225 'message': 'Expected outer_axis .* to be less than inner_axis .*',226 },227 {228 'rt': [[1]],229 'outer_axis': 1,230 'inner_axis': 0,231 'exception': ValueError,232 'message': 'Expected outer_axis .* to be less than inner_axis .*',233 },234 {235 'rt': [[1]],236 'outer_axis': -1,237 'inner_axis': -2,238 'exception': ValueError,239 'message': 'Expected outer_axis .* to be less than inner_axis .*',240 },241 {242 'rt': [[1]],243 'outer_axis': 1,244 'inner_axis': -1,245 'exception': ValueError,246 'message': 'Expected outer_axis .* to be less than inner_axis .*',247 },248 ]) # pyformat: disable249 def testRaggedMergeDimsError(self,250 rt,251 outer_axis,252 inner_axis,253 exception,254 message=None,255 ragged_rank=None):256 x = ragged_factory_ops.constant(rt, ragged_rank=ragged_rank)257 with self.assertRaisesRegexp(exception, message):258 self.evaluate(x.merge_dims(outer_axis, inner_axis))259if __name__ == '__main__':...
binding_test.py
Source:binding_test.py
...22 instance = object()23 binding = Binding(object)24 instance_binding = binding.__get__(instance, None)25 self.assertEqual(instance_binding.instance_args, [instance])26 def test_call_inner(self):27 self.inner_args = []28 self.inner_kwargs = {}29 def inner(*args, **kwargs):30 self.inner_args = args31 self.inner_kwargs = kwargs32 binding = Binding(inner, instance_args=['test'])33 try:34 binding('arg', v=2)35 except signals.TestSignal:36 pass37 self.assertEqual(self.inner_args, ('test', 'arg'))38 self.assertEqual(self.inner_kwargs, {'v': 2})39 def test_call_inner_pass_on_none(self):40 def inner(*args, **kwargs):41 pass42 binding = Binding(inner)43 try:44 binding()45 except signals.TestPass:46 pass47 def test_call_inner_pass_on_true(self):48 def inner(*args, **kwargs):49 return True50 binding = Binding(inner, instance_args=['test'])51 try:52 binding()53 except signals.TestPass:54 pass55 def test_call_inner_fail_on_false(self):56 def inner(*_, **__):57 return False58 binding = Binding(inner, instance_args=['test'])59 try:60 binding()61 except signals.TestFailure:62 pass63 def test_call_inner_pass_through_signal(self):64 def inner(*_, **__):65 raise signals.TestPass('DETAILS')66 binding = Binding(inner, instance_args=['test'])67 try:68 binding()69 except signals.TestPass as signal:70 self.assertEqual(signal.details, 'DETAILS')71 def test_arg_modifier(self):72 self.inner_args = []73 self.inner_kwargs = {}74 def arg_modifier(_, *args, **kwargs):75 new_args = list(args) + ['new arg']76 new_kwargs = dict(kwargs, kw='value')77 return new_args, new_kwargs78 def inner(*args, **kwargs):79 self.inner_args = args80 self.inner_kwargs = kwargs81 binding = Binding(inner, arg_modifier=arg_modifier)82 try:83 binding('arg', v=2)84 except signals.TestSignal:85 pass86 self.assertEqual(self.inner_args, ('arg', 'new arg'))87 self.assertEqual(self.inner_kwargs, {'v': 2, 'kw': 'value'})88 def test_call_before(self):89 self.has_called_before = False90 def before(*_, **__):91 self.has_called_before = True92 def inner(*_, **__):93 self.assertTrue(self.has_called_before)94 binding = Binding(inner, before=before)95 try:96 binding()97 except signals.TestSignal:98 pass99 self.assertTrue(self.has_called_before)100 def test_call_after(self):101 self.has_called_after = False102 def after(*_, **__):103 self.has_called_after = True104 def inner(*_, **__):105 self.assertFalse(self.has_called_after)106 binding = Binding(inner, after=after)107 try:108 binding()109 except signals.TestSignal:110 pass111 self.assertTrue(self.has_called_after)112 def test_signal_modify(self):113 def inner(*_, **__):114 raise signals.TestPass('DETAILS')115 def signal_modifier(_, signal, *__, **___):116 raise signals.TestFailure(signal.details)117 binding = Binding(inner, signal_modifier=signal_modifier)118 try:119 binding()120 except signals.TestFailure as signal:121 self.assertEqual(signal.details, 'DETAILS')122 def test_inner_attr_proxy_test(self):123 def some_func():124 pass125 inner = some_func126 inner.x = 10127 binding = Binding(inner)...
5. 파리 퇴치.py
Source:5. 파리 퇴치.py
1"""2ì²ììë 4ì¤ ë°ë³µë¬¸ì¼ë¡ 구íëë° 200ms ì ë ëìë¤.3ë¤ë¥¸ ì¬ëë¤ë³´ë¤ ë§ì´ ëì¨ ê² ê°ì DP ë¹ì·íê² êµ¬íëë°, ê·¸ëë 170ms ì ëìë¤.4í¹ìë ì¶ì´ í¨ì ëì 130ms ìì¤ë¥¼ ë´¤ëë°, ì¤íë ¤ ì²ì 구ííë ê±°ë ëê°ìë¤.5ë¨ì§ 4ì¤ ë°ë³µë¬¸ ëì ì í¨ì를 ì¼ë¤ë ì ì´ ë¬ëë¤.6: ì무ëë 4ì¤ for ë³´ë¤ë7í¨ì를 ì´ì©í´ for ì를 ì¤ì´ë ê² ì¢ì ë¯íë¤.8"""9from pathlib import Path10import sys11parent_dir = Path(__file__).parent12file_name = Path(__file__).stem13sys.stdin = open(f"{parent_dir}\{file_name} input.txt")14input = sys.stdin.readline15T = int(input())16for test_case in range(1, T + 1):17 length, swatter_length = map(int, input().split())18 flies = [[int(n) for n in input().split()] for _ in range(length)]19 inner_sum_list = []20 inner_sum = 021 for row in range(swatter_length):22 for col in range(swatter_length):23 inner_sum += flies[row][col]24 inner_sum_list.append(inner_sum)25 for row in range(length - swatter_length + 1):26 for col in range(length - swatter_length + 1):27 # ì´ì´ ë°ë ê²½ì°28 if col > 0:29 inner_sum = inner_sum_list[-1]30 # ë°ë ¤ë ì´ì ìë ê°ë¤ì ì§ìì£¼ê³ 31 # ìë¡ ë¤ì´ì¨ ì´ì ìë ê°ë¤ì ëí´ì¤32 for inner_row in range(row, row + swatter_length):33 inner_sum -= flies[inner_row][col - 1]34 inner_sum += flies[inner_row][col + swatter_length - 1]35 inner_sum_list.append(inner_sum)36 # íì´ ë°ë ê²½ì°37 elif row > 0:38 # ì´ì íì 기ì¤ì¼ë¡ íë ìì í© ê°ì ¸ì´39 inner_sum = inner_sum_list[-(length - swatter_length) - 1]40 # ì´ì íì ìë ê°ë¤ì ì§ìì£¼ê³ 41 # ìë¡ ë¤ì´ì¨ íì ìë ê°ë¤ì ëí´ì¤42 for inner_col in range(col, col + swatter_length):43 inner_sum -= flies[row - 1][inner_col]44 inner_sum += flies[row + swatter_length - 1][inner_col]45 inner_sum_list.append(inner_sum)46 elif row == 0 and col == 0:47 continue48 else:49 pass50 51 max_inner_sum = 052 for inner_sum in inner_sum_list:53 max_inner_sum = inner_sum if inner_sum > max_inner_sum else max_inner_sum54 print("#{} {}".format(test_case, max_inner_sum))55"""56T = int(input())57for test_case in range(1, T + 1):58 length, swatter_length = map(int, input().split())59 flies = [list(map(int, input().split())) for _ in range(length)]60 max_inner_sum = 061 for row in range(length - swatter_length + 1):62 for col in range(length - swatter_length + 1):63 inner_sum = 064 for inner_row in range(row, row + swatter_length):65 for inner_col in range(col, col + swatter_length):66 inner_sum += flies[inner_row][inner_col]67 max_inner_sum = inner_sum if inner_sum > max_inner_sum else max_inner_sum68 69 print("#{} {}".format(test_case, max_inner_sum))...
nested_workaround_runme.py
Source:nested_workaround_runme.py
1from nested_workaround import *2inner = Inner(5)3outer = Outer()4newInner = outer.doubleInnerValue(inner)5if newInner.getValue() != 10:6 raise RuntimeError7outer = Outer()8inner = outer.createInner(3)9newInner = outer.doubleInnerValue(inner)10if outer.getInnerValue(newInner) != 6:...
Using AI Code Generation
1import { withKnobs, text, boolean, number } from '@storybook/addon-knobs';2import { storiesOf } from '@storybook/react';3import { action } from '@storybook/addon-actions';4import { linkTo } from '@storybook/addon-links';5import React from 'react';6import { Button, Welcome } from '@storybook/react/demo';7storiesOf('Welcome', module).add('to Storybook', () => (8 <Welcome showApp={linkTo('Button')} />9));10storiesOf('Button', module)11 .addDecorator(withKnobs)12 .add('with text', () => (13 <Button onClick={action('clicked')}>{text('Label', 'Hello Button')}</Button>14 .add('with some emoji', () => (15 <Button onClick={action('clicked')}>16 .add('with knobs', () => (17 disabled={boolean('Disabled', false)}18 onClick={action('clicked')}19 style={{ backgroundColor: color('Background', 'red') }}20 {text('Label', 'Hello Button')}21 ));22storiesOf('Button', module)23 .add('with text', () => (24 <Button onClick={action('clicked')}>Hello Button</Button>25 .add('with some emoji', () => (26 <Button onClick={action('clicked')}>27 .add('with knobs', () => (28 disabled={boolean('Disabled', false)}29 onClick={action('clicked')}30 style={{ backgroundColor: color('Background', 'red') }}31 {text('Label', 'Hello Button')}32 ));33storiesOf('Button', module)34 .addDecorator(withKnobs)35 .add('with text', () => (36 <Button onClick={action('clicked')}>{text('Label', 'Hello Button')}</Button>37 .add('with some emoji', () => (38 <Button onClick={action('clicked')}>
Using AI Code Generation
1const storybookRoot = require('@storybook/react')._storyStore;2console.log(storybookRoot);3const storybookRoot2 = require('@storybook/react')._storyStore;4console.log(storybookRoot2);5const storybookRoot3 = require('@storybook/react')._storyStore;6console.log(storybookRoot3);7const storybookRoot4 = require('@storybook/react')._storyStore;8console.log(storybookRoot4);9const storybookRoot5 = require('@storybook/react')._storyStore;10console.log(storybookRoot5);11const storybookRoot6 = require('@storybook/react')._storyStore;12console.log(storybookRoot6);13const storybookRoot7 = require('@storybook/react')._storyStore;14console.log(storybookRoot7);15const storybookRoot8 = require('@storybook/react')._storyStore;16console.log(storybookRoot8);17const storybookRoot9 = require('@storybook/react')._storyStore;18console.log(storybookRoot9);19const storybookRoot10 = require('@storybook/react')._storyStore;20console.log(storybookRoot10);21const storybookRoot11 = require('@storybook/react')._storyStore;22console.log(storybookRoot11);23const storybookRoot12 = require('@storybook/react')._storyStore;24console.log(storybookRoot12);25const storybookRoot13 = require('@storybook/react')._storyStore;26console.log(storybookRoot13);27const storybookRoot14 = require('@storybook/react')._storyStore;28console.log(storybookRoot14);29const storybookRoot15 = require('@storybook/react')._storyStore;30console.log(storybookRoot15);
Using AI Code Generation
1import { root } from 'storybook-root';2import { inner } from 'storybook-root/inner';3root();4inner();5import { inner } from './inner';6export const root = () => {7 inner();8};9export const inner = () => {10 console.log('inner');11};12import { inner } from './inner';13jest.mock('./inner');14describe('inner', () => {15 it('should log inner', () => {16 inner();17 expect(inner).toHaveBeenCalled();18 });19});20import { root } from 'storybook-root';21import { inner } from 'storybook-root/inner';22jest.mock('storybook-root/inner');23describe('root', () => {24 it('should log root', () => {25 root();26 expect(inner).toHaveBeenCalled();27 });28});29export const inner = () => {30 console.log('inner');31};32import { inner } from './inner';33jest.mock('./inner');34describe('inner', () => {35 it('should log inner', () => {36 inner();37 expect(inner).toHaveBeenCalled();38 });39});40import { root } from 'storybook-root';41import { inner } from 'storybook-root/inner';42jest.mock('storybook-root/inner');43describe('root', () => {44 it('should log root', () => {45 root();46 expect(inner).toHaveBeenCalled();47 });48});49export const inner = () => {50 console.log('inner');51};52import { inner } from './inner';53jest.mock('./inner');54describe('inner', () => {55 it('should log inner', () => {56 inner();57 expect(inner).toHaveBeenCalled();58 });59});60import { root } from 'storybook-root';61import { inner } from 'storybook-root/inner';62jest.mock('storybook-root/inner');63describe('root', () => {64 it('should log root', () => {65 root();66 expect(inner).toHaveBeenCalled();67 });68});69export const inner = () => {70 console.log('inner');71};
Using AI Code Generation
1import { StorybookRoot, Inner } from 'storybook-root';2export default class Test extends React.Component {3 render() {4 return (5 );6 }7}8import React from 'react';9export class StorybookRoot extends React.Component {10 render() {11 return <div>{this.props.children}</div>;12 }13}14export { StorybookRoot };15import React from 'react';16export class Inner extends React.Component {17 render() {18 return <div>inner</div>;19 }20}21export { Inner };22import React from 'react';23import { storiesOf } from '@storybook/react';24import { StorybookRoot, Inner } from 'storybook-root';25storiesOf('StorybookRoot', module).add('Test', () => (26));27import React from 'react';28import { shallow } from 'enzyme';29import { Storybook
Using AI Code Generation
1import {root} from 'storybook-root';2root.inner();3import inner from './inner';4export const root = {5}6export default function inner() {7 console.log('inner');8}9import {root} from 'storybook-root';10root.inner();11import inner from './inner';12export const root = {13}14export default function inner() {15 console.log('inner');16}17root.inner();
Using AI Code Generation
1import { root } from 'storybook-root';2import { storiesOf } from '@storybook/react';3const Inner = root.inner;4storiesOf('Inner', module)5 .add('inner', () => (6 ));7import Inner from './Inner';8export const root = {9};10import React from 'react';11const Inner = () => (12);13export default Inner;
Using AI Code Generation
1import { createRoot } from 'storybook-root';2import { render } from '@testing-library/react';3import { createRoot } from 'storybook-root';4const root = createRoot();5describe('MyComponent', () => {6 it('renders without crashing', () => {7 render(<MyComponent />, { wrapper: root.wrapper });8 });9});10import { createRoot } from 'storybook-root';11import { addDecorator } from '@storybook/react';12const root = createRoot();13addDecorator(root.decorator);14import { createRoot } from 'storybook-root';15import { addons } from '@storybook/addons';16const root = createRoot();17addons.setConfig({18});19MIT © [davidbentley](
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!