How to use ps method in fMBT

Best Python code snippet using fMBT_python

device_setter.py

Source:device_setter.py Github

copy

Full Screen

1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Device function for replicated training."""16from __future__ import absolute_import17from __future__ import division18from __future__ import print_function19import six20from tensorflow.core.framework import node_def_pb221from tensorflow.python.framework import device as pydev22from tensorflow.python.platform import tf_logging as logging23from tensorflow.python.training import server_lib24from tensorflow.python.util.tf_export import tf_export25# This is a tuple of PS ops used by tf.estimator.Estimator which should work in26# almost all of cases.27STANDARD_PS_OPS = ("Variable", "VariableV2", "AutoReloadVariable",28 "MutableHashTable", "MutableHashTableV2",29 "MutableHashTableOfTensors", "MutableHashTableOfTensorsV2",30 "MutableDenseHashTable", "MutableDenseHashTableV2",31 "VarHandleOp", "BoostedTreesEnsembleResourceHandleOp")32class _RoundRobinStrategy(object):33 """Returns the next ps task index for placement in round-robin order.34 This class is not to be used directly by users. See instead35 `replica_device_setter()` below.36 """37 def __init__(self, num_tasks):38 """Create a new `_RoundRobinStrategy`.39 Args:40 num_tasks: Number of ps tasks to cycle among.41 """42 self._num_tasks = num_tasks43 self._next_task = 044 def __call__(self, unused_op):45 """Choose a ps task index for the given `Operation`.46 Args:47 unused_op: An `Operation` to be placed on ps.48 Returns:49 The next ps task index to use for the `Operation`. Returns the next50 index, in the range `[offset, offset + num_tasks)`.51 """52 task = self._next_task53 self._next_task = (self._next_task + 1) % self._num_tasks54 return task55class _ReplicaDeviceChooser(object):56 """Class to choose devices for Ops in a replicated training setup.57 This class is not to be used directly by users. See instead58 `replica_device_setter()` below.59 """60 def __init__(self, ps_tasks, ps_device, worker_device, merge_devices, ps_ops,61 ps_strategy):62 """Create a new `_ReplicaDeviceChooser`.63 Args:64 ps_tasks: Number of tasks in the `ps` job.65 ps_device: String. Name of the `ps` job.66 worker_device: String. Name of the `worker` job.67 merge_devices: Boolean. Set to True to allow merging of device specs.68 ps_ops: List of strings representing `Operation` types that need to be69 placed on `ps` devices.70 ps_strategy: A callable invoked for every ps `Operation` (i.e. matched by71 `ps_ops`), that takes the `Operation` and returns the ps task index to72 use.73 """74 self._ps_tasks = ps_tasks75 self._ps_device = ps_device76 self._worker_device = worker_device77 self._merge_devices = merge_devices78 self._ps_ops = ps_ops79 self._ps_strategy = ps_strategy80 def device_function(self, op):81 """Choose a device for `op`.82 Args:83 op: an `Operation`.84 Returns:85 The device to use for the `Operation`.86 """87 # If we don't return early here, either merge_devices is True, or op.device88 # is empty (in which case merging is a no-op). So we can always merge below.89 if not self._merge_devices and op.device:90 return op.device91 current_device = pydev.DeviceSpec.from_string(op.device or "")92 # The ps_device will be used for specified ops (ps_ops) whenever it is93 # present and ps_tasks is non-zero. However, its task number will only be94 # set (using ps_strategy) if there is a job field in ps_device that won't be95 # changed by the job field (if present) in current_device.96 node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def97 if self._ps_tasks and self._ps_device and node_def.op in self._ps_ops:98 ps_device = pydev.DeviceSpec.from_string(self._ps_device)99 current_job, ps_job = current_device.job, ps_device.job100 if ps_job and (not current_job or current_job == ps_job):101 ps_device.task = self._ps_strategy(op)102 ps_device.merge_from(current_device)103 return ps_device.to_string()104 worker_device = pydev.DeviceSpec.from_string(self._worker_device or "")105 worker_device.merge_from(current_device)106 return worker_device.to_string()107@tf_export("train.replica_device_setter")108def replica_device_setter(ps_tasks=0, ps_device="/job:ps",109 worker_device="/job:worker", merge_devices=True,110 cluster=None, ps_ops=None, ps_strategy=None):111 """Return a `device function` to use when building a Graph for replicas.112 Device Functions are used in `with tf.device(device_function):` statement to113 automatically assign devices to `Operation` objects as they are constructed,114 Device constraints are added from the inner-most context first, working115 outwards. The merging behavior adds constraints to fields that are yet unset116 by a more inner context. Currently the fields are (job, task, cpu/gpu).117 If `cluster` is `None`, and `ps_tasks` is 0, the returned function is a no-op.118 Otherwise, the value of `ps_tasks` is derived from `cluster`.119 By default, only Variable ops are placed on ps tasks, and the placement120 strategy is round-robin over all ps tasks. A custom `ps_strategy` may be used121 to do more intelligent placement, such as122 `tf.contrib.training.GreedyLoadBalancingStrategy`.123 For example,124 ```python125 # To build a cluster with two ps jobs on hosts ps0 and ps1, and 3 worker126 # jobs on hosts worker0, worker1 and worker2.127 cluster_spec = {128 "ps": ["ps0:2222", "ps1:2222"],129 "worker": ["worker0:2222", "worker1:2222", "worker2:2222"]}130 with tf.device(tf.train.replica_device_setter(cluster=cluster_spec)):131 # Build your graph132 v1 = tf.Variable(...) # assigned to /job:ps/task:0133 v2 = tf.Variable(...) # assigned to /job:ps/task:1134 v3 = tf.Variable(...) # assigned to /job:ps/task:0135 # Run compute136 ```137 Args:138 ps_tasks: Number of tasks in the `ps` job. Ignored if `cluster` is139 provided.140 ps_device: String. Device of the `ps` job. If empty no `ps` job is used.141 Defaults to `ps`.142 worker_device: String. Device of the `worker` job. If empty no `worker`143 job is used.144 merge_devices: `Boolean`. If `True`, merges or only sets a device if the145 device constraint is completely unset. merges device specification rather146 than overriding them.147 cluster: `ClusterDef` proto or `ClusterSpec`.148 ps_ops: List of strings representing `Operation` types that need to be149 placed on `ps` devices. If `None`, defaults to `STANDARD_PS_OPS`.150 ps_strategy: A callable invoked for every ps `Operation` (i.e. matched by151 `ps_ops`), that takes the `Operation` and returns the ps task index to152 use. If `None`, defaults to a round-robin strategy across all `ps`153 devices.154 Returns:155 A function to pass to `tf.device()`.156 Raises:157 TypeError if `cluster` is not a dictionary or `ClusterDef` protocol buffer,158 or if `ps_strategy` is provided but not a callable.159 """160 if cluster is not None:161 if isinstance(cluster, server_lib.ClusterSpec):162 cluster_spec = cluster.as_dict()163 else:164 cluster_spec = server_lib.ClusterSpec(cluster).as_dict()165 # Get ps_job_name from ps_device by stripping "/job:".166 ps_job_name = pydev.DeviceSpec.from_string(ps_device).job167 if ps_job_name not in cluster_spec or cluster_spec[ps_job_name] is None:168 return None169 ps_tasks = len(cluster_spec[ps_job_name])170 if ps_tasks == 0:171 return None172 if ps_ops is None:173 # TODO(sherrym): Variables in the LOCAL_VARIABLES collection should not be174 # placed in the parameter server.175 ps_ops = list(STANDARD_PS_OPS)176 if not merge_devices:177 logging.warning(178 "DEPRECATION: It is recommended to set merge_devices=true in "179 "replica_device_setter")180 if ps_strategy is None:181 ps_strategy = _RoundRobinStrategy(ps_tasks)182 if not six.callable(ps_strategy):183 raise TypeError("ps_strategy must be callable")184 chooser = _ReplicaDeviceChooser(185 ps_tasks, ps_device, worker_device, merge_devices, ps_ops, ps_strategy)...

Full Screen

Full Screen

config.py

Source:config.py Github

copy

Full Screen

1#!/usr/bin/env python2# -*- coding: utf-8 -*-3"""4@Author : Ryan Fan 5@E-Mail : ryanfan0528@gmail.com6@Version : v1.07"""8import os9import time10import sys11import re12import datetime13import pandas as pd 14import numpy as np15TRAIN_FILE = "data/train.csv"16TEST_FILE = "data/test.csv"17SUB_DIR = "output"18NUM_SPLITS = 319RANDOM_SEED = 201720# types of columns of the dataset dataframe21CATEGORICAL_COLS = [22 'ps_ind_02_cat', 'ps_ind_04_cat', 'ps_ind_05_cat',23 'ps_car_01_cat', 'ps_car_02_cat', 'ps_car_03_cat',24 'ps_car_04_cat', 'ps_car_05_cat', 'ps_car_06_cat',25 'ps_car_07_cat', 'ps_car_08_cat', 'ps_car_09_cat',26 'ps_car_10_cat', 'ps_car_11_cat',27]28NUMERIC_COLS = [29 # # binary30 # "ps_ind_06_bin", "ps_ind_07_bin", "ps_ind_08_bin",31 # "ps_ind_09_bin", "ps_ind_10_bin", "ps_ind_11_bin",32 # "ps_ind_12_bin", "ps_ind_13_bin", "ps_ind_16_bin",33 # "ps_ind_17_bin", "ps_ind_18_bin",34 # "ps_calc_15_bin", "ps_calc_16_bin", "ps_calc_17_bin",35 # "ps_calc_18_bin", "ps_calc_19_bin", "ps_calc_20_bin",36 # numeric37 "ps_reg_01", "ps_reg_02", "ps_reg_03",38 "ps_car_12", "ps_car_13", "ps_car_14", "ps_car_15",39 # feature engineering40 "missing_feat", "ps_car_13_x_ps_reg_03",41]42IGNORE_COLS = [43 "id", "target",44 "ps_calc_01", "ps_calc_02", "ps_calc_03", "ps_calc_04",45 "ps_calc_05", "ps_calc_06", "ps_calc_07", "ps_calc_08",46 "ps_calc_09", "ps_calc_10", "ps_calc_11", "ps_calc_12",47 "ps_calc_13", "ps_calc_14",48 "ps_calc_15_bin", "ps_calc_16_bin", "ps_calc_17_bin",49 "ps_calc_18_bin", "ps_calc_19_bin", "ps_calc_20_bin"...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful