Best Python code snippet using tox_python
file_io_test.py
Source:file_io_test.py
1# Copyright 2018 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Tests for binary data file utilities."""16from __future__ import absolute_import17from __future__ import division18from __future__ import print_function19import contextlib20import multiprocessing21# pylint: disable=wrong-import-order22import numpy as np23import pandas as pd24import tensorflow as tf25# pylint: enable=wrong-import-order26from official.utils.data import file_io27_RAW_ROW = "raw_row"28_DUMMY_COL = "column_0"29_DUMMY_VEC_COL = "column_1"30_DUMMY_VEC_LEN = 431_ROWS_PER_CORE = 432_TEST_CASES = [33 # One batch of one34 dict(row_count=1, cpu_count=1, expected=[35 [[0]]36 ]),37 dict(row_count=10, cpu_count=1, expected=[38 [[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9]]39 ]),40 dict(row_count=21, cpu_count=1, expected=[41 [[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9, 10, 11]],42 [[12, 13, 14, 15]], [[16, 17, 18, 19]], [[20]]43 ]),44 dict(row_count=1, cpu_count=4, expected=[45 [[0]]46 ]),47 dict(row_count=10, cpu_count=4, expected=[48 [[0, 1], [2, 3, 4], [5, 6], [7, 8, 9]]49 ]),50 dict(row_count=21, cpu_count=4, expected=[51 [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]],52 [[16], [17], [18], [19, 20]]53 ]),54 dict(row_count=10, cpu_count=8, expected=[55 [[0], [1], [2], [3, 4], [5], [6], [7], [8, 9]]56 ]),57 dict(row_count=40, cpu_count=8, expected=[58 [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15],59 [16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27],60 [28, 29, 30, 31]],61 [[32], [33], [34], [35], [36], [37], [38], [39]]62 ]),63]64_FEATURE_MAP = {65 _RAW_ROW: tf.FixedLenFeature([1], dtype=tf.int64),66 _DUMMY_COL: tf.FixedLenFeature([1], dtype=tf.int64),67 _DUMMY_VEC_COL: tf.FixedLenFeature([_DUMMY_VEC_LEN], dtype=tf.float32)68}69@contextlib.contextmanager70def fixed_core_count(cpu_count):71 """Override CPU count.72 file_io.py uses the cpu_count function to scale to the size of the instance.73 However, this is not desirable for testing because it can make the test flaky.74 Instead, this context manager fixes the count for more robust testing.75 Args:76 cpu_count: How many cores multiprocessing claims to have.77 Yields:78 Nothing. (for context manager only)79 """80 old_count_fn = multiprocessing.cpu_count81 multiprocessing.cpu_count = lambda: cpu_count82 yield83 multiprocessing.cpu_count = old_count_fn84class BaseTest(tf.test.TestCase):85 def _test_sharding(self, row_count, cpu_count, expected):86 df = pd.DataFrame({_DUMMY_COL: list(range(row_count))})87 with fixed_core_count(cpu_count):88 shards = list(file_io.iter_shard_dataframe(df, _ROWS_PER_CORE))89 result = [[j[_DUMMY_COL].tolist() for j in i] for i in shards]90 self.assertAllEqual(expected, result)91 def test_tiny_rows_low_core(self):92 self._test_sharding(**_TEST_CASES[0])93 def test_small_rows_low_core(self):94 self._test_sharding(**_TEST_CASES[1])95 def test_large_rows_low_core(self):96 self._test_sharding(**_TEST_CASES[2])97 def test_tiny_rows_medium_core(self):98 self._test_sharding(**_TEST_CASES[3])99 def test_small_rows_medium_core(self):100 self._test_sharding(**_TEST_CASES[4])101 def test_large_rows_medium_core(self):102 self._test_sharding(**_TEST_CASES[5])103 def test_small_rows_large_core(self):104 self._test_sharding(**_TEST_CASES[6])105 def test_large_rows_large_core(self):106 self._test_sharding(**_TEST_CASES[7])107 def _serialize_deserialize(self, num_cores=1, num_rows=20):108 np.random.seed(1)109 df = pd.DataFrame({110 # Serialization order is only deterministic for num_cores=1. raw_row is111 # used in validation after the deserialization.112 _RAW_ROW: np.array(range(num_rows), dtype=np.int64),113 _DUMMY_COL: np.random.randint(0, 35, size=(num_rows,)),114 _DUMMY_VEC_COL: [115 np.array([np.random.random() for _ in range(_DUMMY_VEC_LEN)])116 for i in range(num_rows) # pylint: disable=unused-variable117 ]118 })119 with fixed_core_count(num_cores):120 buffer_path = file_io.write_to_temp_buffer(121 df, self.get_temp_dir(), [_RAW_ROW, _DUMMY_COL, _DUMMY_VEC_COL])122 with self.test_session(graph=tf.Graph()) as sess:123 dataset = tf.data.TFRecordDataset(buffer_path)124 dataset = dataset.batch(1).map(125 lambda x: tf.parse_example(x, _FEATURE_MAP))126 data_iter = dataset.make_one_shot_iterator()127 seen_rows = set()128 for i in range(num_rows+5):129 row = data_iter.get_next()130 try:131 row_id, val_0, val_1 = sess.run(132 [row[_RAW_ROW], row[_DUMMY_COL], row[_DUMMY_VEC_COL]])133 row_id, val_0, val_1 = row_id[0][0], val_0[0][0], val_1[0]134 assert row_id not in seen_rows135 seen_rows.add(row_id)136 self.assertEqual(val_0, df[_DUMMY_COL][row_id])137 self.assertAllClose(val_1, df[_DUMMY_VEC_COL][row_id])138 self.assertLess(i, num_rows, msg="Too many rows.")139 except tf.errors.OutOfRangeError:140 self.assertGreaterEqual(i, num_rows, msg="Too few rows.")141 file_io._GARBAGE_COLLECTOR.purge()142 assert not tf.gfile.Exists(buffer_path)143 def test_serialize_deserialize_0(self):144 self._serialize_deserialize(num_cores=1)145 def test_serialize_deserialize_1(self):146 self._serialize_deserialize(num_cores=2)147 def test_serialize_deserialize_2(self):148 self._serialize_deserialize(num_cores=8)149if __name__ == "__main__":...
multi_process_prime_number.py
Source:multi_process_prime_number.py
...42 return list43if __name__ == '__main__':44 N = int(input())45 # å¤è¿ç¨46 CPU_COUNT = cpu_count() ##CPUå
æ ¸æ° æ¬æºä¸º847 pool = Pool(CPU_COUNT)48 sepList = seprateNum(N, CPU_COUNT)49 result = []50 for i in range(CPU_COUNT):51 result.append(pool.apply_async(howMany, (sepList[i],)))52 pool.close()53 pool.join()54 ans = 055 list = [res.get() for res in result]56 print(sum(list), end='') # end='' 表示åæ¶ /n57# ä¸é¢ä¸é´åä¸ä»£ç å58end = time.time()...
gcp_machines.py
Source:gcp_machines.py
1machine_list = [2 {3 "machine_type":"n1-standard-4",4 "cpu_count":"4",5 "memory":"15"6 },7 {8 "machine_type":"n1-standard-8",9 "cpu_count":"8",10 "memory":"30"11 },12 {13 "machine_type":"n1-standard-16",14 "cpu_count":"16",15 "memory":"60"16 },17 {18 "machine_type":"n1-standard-32",19 "cpu_count":"32",20 "memory":"120"21 },22 {23 "machine_type":"n1-standard-64",24 "cpu_count":"64",25 "memory":"240"26 },27 {28 "machine_type":"n1-standard-96",29 "cpu_count":"96",30 "memory":"360"31 },32 {33 "machine_type":"n1-highmem-2",34 "cpu_count":"2",35 "memory":"13"36 },37 {38 "machine_type":"n1-highmem-4",39 "cpu_count":"4",40 "memory":"26"41 },42 {43 "machine_type":"n1-highmem-8",44 "cpu_count":"8",45 "memory":"52"46 },47 {48 "machine_type":"n1-highmem-16",49 "cpu_count":"16",50 "memory":"104"51 },52 {53 "machine_type":"n1-highmem-32",54 "cpu_count":"32",55 "memory":"208"56 },57 {58 "machine_type":"n1-highmem-64",59 "cpu_count":"64",60 "memory":"416"61 },62 {63 "machine_type":"n1-highmem-96",64 "cpu_count":"96",65 "memory":"624"66 },67 {68 "machine_type":"n1-highcpu-16",69 "cpu_count":"16",70 "memory":"14.4"71 },72 {73 "machine_type":"n1-highcpu-32",74 "cpu_count":"32",75 "memory":"28.8"76 },77 {78 "machine_type":"n1-highcpu-64",79 "cpu_count":"64",80 "memory":"57.6"81 },82 {83 "machine_type":"n1-highcpu-96",84 "cpu_count":"96",85 "memory":"86.4"86 }...
game.py
Source:game.py
1from main import cpu, check2user_count = 03cpu_count = 04count = 05while count <3:6 cpu_choice = cpu()7 choice = input("your choice: ").upper()8 if choice.upper() == cpu_choice.upper():9 print(cpu_choice)10 print(f"it's a Tie, scores: player-{user_count}, cpu-{cpu_count}")11 elif choice == "P" and cpu_choice=="R":12 user_count = user_count+113 print(cpu_choice)14 print(f"scores: player-{user_count}, cpu-{cpu_count}")15 elif choice == "S" and cpu_choice == "P":16 user_count = user_count+117 print(cpu_choice)18 print(f"scores: player-{user_count}, cpu-{cpu_count}")19 elif choice == "R" and cpu_choice == "S":20 user_count = user_count+121 print(cpu_choice)22 print(f"scores: player-{user_count}, cpu-{cpu_count}")23 elif choice == "P" and cpu_choice=="S":24 cpu_count = cpu_count+125 print(cpu_choice)26 print(f"scores: player-{user_count}, cpu-{cpu_count}")27 elif choice == "R" and cpu_choice == "P":28 cpu_count = cpu_count+129 print(cpu_choice)30 print(f"scores: player-{user_count}, cpu-{cpu_count}")31 elif choice == "S" and cpu_choice == "R":32 cpu_count = cpu_count+133 print(cpu_choice)34 print(f"scores: player-{user_count}, cpu-{cpu_count}")35 count = count+1...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!