Best Python code snippet using autotest_python
clusterperf.py
Source:clusterperf.py
...63 in a command line, such as "--name1 value1 --name2 value2"64 """65 return " ".join(["%s %s" % (name, value)66 for name, value in args.iteritems()])67def get_client_log(68 index = 1 # Client index (1 for first client,69 # which is usually the one that's wanted)70 ):71 """72 Given the index of a client, read the client's log file73 from the current log directory and return its contents,74 ignoring RAMCloud log messages (what's left should be a75 summary of the results from a test.76 """77 globResult = glob.glob('%s/latest/client%d.*.log' %78 (options.log_dir, index))79 if len(globResult) == 0:80 raise Exception("couldn't find log file for client %d" % (index))81 result = "";82 for line in open(globResult[0], 'r'):83 if not re.match('([0-9]+\.[0-9]+) ', line):84 result += line85 return result86def print_cdf_from_log(87 index = 1 # Client index (0 for first client,88 # which is usually the one that's wanted)89 ):90 """91 Given the index of a client, print in gnuplot format a cumulative92 distribution of the data in the client's log file (where "data" consists93 of comma-separated numbers stored in all of the lines of the log file94 that are not RAMCloud log messages). Each line in the printed output95 will contain a fraction and a number, such that the given fraction of all96 numbers in the log file have values less than or equal to the given number.97 """98 # Read the log file into an array of numbers.99 numbers = []100 globResult = glob.glob('%s/latest/client%d*.log' %101 (options.log_dir, index))102 if len(globResult) == 0:103 raise Exception("couldn't find log file for client %d" % (index))104 result = "";105 for line in open(globResult[0], 'r'):106 if not re.match('([0-9]+\.[0-9]+) ', line):107 for value in line.split(","):108 numbers.append(float(value))109 # Generate a CDF from the array.110 numbers.sort()111 result = []112 print("%8.2f %8.3f" % (0.0, 0.0))113 print("%8.2f %8.3f" % (numbers[0], 1/len(numbers)))114 for i in range(1, 100):115 print("%8.2f %8.3f" % (numbers[int(len(numbers)*i/100)], i/100))116 print("%8.2f %8.3f" % (numbers[int(len(numbers)*999/1000)], .999))117 print("%8.2f %9.4f" % (numbers[int(len(numbers)*9999/10000)], .9999))118 print("%8.2f %8.3f" % (numbers[-1], 1.0))119def run_test(120 test, # Test object describing the test to run.121 options # Command-line options.122 ):123 """124 Run a given test. The main value provided by this function is to125 prepare a candidate set of options for cluster.run and another set126 for the ClusterPerf clients, based on the command-line options.127 """128 cluster_args = {129 'debug': options.debug,130 'log_dir': options.log_dir,131 'log_level': options.log_level,132 'backups_per_server': options.backups_per_server,133 'num_servers': options.num_servers,134 'replicas': options.replicas,135 'timeout': options.timeout,136 'share_hosts': True,137 'transport': options.transport,138 'replicas': options.replicas,139 'disjunct': options.disjunct,140 'verbose': options.verbose141 }142 client_args = {}143 # Provide a default value for num_servers here. This is better144 # than defaulting it in the OptionParser below, because tests can145 # see whether or not an actual value was specified and provide a146 # test-specific default.147 if cluster_args['num_servers'] == None:148 # Make sure there are enough servers to meet replica requirements.149 cluster_args['num_servers'] = options.replicas+1150 if options.num_clients != None:151 cluster_args['num_clients'] = options.num_clients152 if options.master_args != None:153 cluster_args['master_args'] = options.master_args154 if options.count != None:155 client_args['--count'] = options.count156 if options.size != None:157 client_args['--size'] = options.size158 if options.warmup != None:159 client_args['--warmup'] = options.warmup160 if options.numIndexlet != None:161 client_args['--numIndexlet'] = options.numIndexlet162 if options.numIndexes != None:163 client_args['--numIndexes'] = options.numIndexes164 test.function(test.name, options, cluster_args, client_args)165#-------------------------------------------------------------------166# Driver functions follow below. These functions are responsible for167# invoking ClusterPerf via cluster.py, and they collect and print168# result data. Simple tests can just use the "default" driver function.169#-------------------------------------------------------------------170def default(171 name, # Name of this test; passed through172 # to ClusterPerf verbatim.173 options, # The full set of command-line options.174 cluster_args, # Proposed set of arguments to pass to175 # cluster.run (extracted from options).176 # Individual tests can override as177 # appropriate for the test.178 client_args, # Proposed set of arguments to pass to179 # ClusterPerf (via cluster.run).180 # Individual tests can override as181 # needed for the test.182 ):183 """184 This function is used as the invocation function for most tests;185 it simply invokes ClusterPerf via cluster.run and prints the result.186 """187 cluster.run(client='%s/ClusterPerf %s %s' %188 (obj_path, flatten_args(client_args), name), **cluster_args)189 print(get_client_log(), end='')190def basic(name, options, cluster_args, client_args):191 if 'master_args' not in cluster_args:192 cluster_args['master_args'] = '-t 4000'193 if cluster_args['timeout'] < 250:194 cluster_args['timeout'] = 250195 cluster.run(client='%s/ClusterPerf %s %s' %196 (obj_path, flatten_args(client_args), name), **cluster_args)197 print(get_client_log(), end='')198def broadcast(name, options, cluster_args, client_args):199 if 'num_clients' not in cluster_args:200 cluster_args['num_clients'] = 10201 cluster.run(client='%s/ClusterPerf %s %s' %202 (obj_path, flatten_args(client_args), name), **cluster_args)203 print(get_client_log(), end='')204def indexBasic(name, options, cluster_args, client_args):205 if 'master_args' not in cluster_args:206 cluster_args['master_args'] = '--masterServiceThreads 1'207 if cluster_args['timeout'] < 200:208 cluster_args['timeout'] = 200209 # Ensure at least 5 hosts for optimal performance210 if options.num_servers == None:211 cluster_args['num_servers'] = len(hosts)212 # using 20GB for servers so that we don't run out of memory when inserting213 # 10 million objects/index entries214 cluster.run(client='%s/ClusterPerf %s %s' %215 (obj_path, flatten_args(client_args), name), **cluster_args)216 print(get_client_log(), end='')217def indexRange(name, options, cluster_args, client_args):218 if 'master_args' not in cluster_args:219 cluster_args['master_args'] = '--masterServiceThreads 1'220 if cluster_args['timeout'] < 200:221 cluster_args['timeout'] = 200222 # Ensure at least 5 hosts for optimal performance223 if options.num_servers == None:224 cluster_args['num_servers'] = len(hosts)225 # using 20GB for servers so that we don't run out of memory when inserting226 # 10 million objects/index entries227 cluster.run(client='%s/ClusterPerf %s %s' %228 (obj_path, flatten_args(client_args), name), **cluster_args)229 print(get_client_log(), end='')230def indexMultiple(name, options, cluster_args, client_args):231 if 'master_args' not in cluster_args:232 cluster_args['master_args'] = '--masterServiceThreads 1'233 if cluster_args['timeout'] < 2100:234 cluster_args['timeout'] = 200235 # Ensure atleast 15 hosts for optimal performance236 if options.num_servers == None:237 cluster_args['num_servers'] = len(hosts)238 # use a maximum of 10 secondary keys239 if len(hosts) <= 10:240 # Hack until synchronization bug in write RPC handler241 # in MasterService is resolved. This bug prevents us from using more242 # than 1 MasterSerivice thread. However, we need to use more than 1243 # service thread, otherwise if a tablet and its corresponding244 # indexlet end up on the same server, we will have a deadlock.245 # For now, make sure that we never wrap around the server list246 # Once the bug is resolved, we should be able to use len(hosts)247 # for numIndexes248 client_args['--numIndexes'] = len(hosts) - 1249 else:250 client_args['--numIndexes'] = 10251 cluster.run(client='%s/ClusterPerf %s %s' %252 (obj_path, flatten_args(client_args), name), **cluster_args)253 print(get_client_log(), end='')254def indexScalability(name, options, cluster_args, client_args):255 if 'master_args' not in cluster_args:256 cluster_args['master_args'] = '--masterServiceThreads 2'257 if cluster_args['timeout'] < 100:258 cluster_args['timeout'] = 100259 cluster_args['backups_per_server'] = 0260 cluster_args['replicas'] = 0261 # Ensure at least 15 hosts for optimal performance262 if options.num_servers == None:263 cluster_args['num_servers'] = len(hosts)264 if 'num_clients' not in cluster_args:265 cluster_args['num_clients'] = 10266 cluster.run(client='%s/ClusterPerf %s %s' %267 (obj_path, flatten_args(client_args), name), **cluster_args)268 print(get_client_log(), end='')269def multiOp(name, options, cluster_args, client_args):270 if cluster_args['timeout'] < 100:271 cluster_args['timeout'] = 100272 if options.num_servers == None:273 cluster_args['num_servers'] = len(hosts)274 client_args['--numTables'] = cluster_args['num_servers'];275 cluster.run(client='%s/ClusterPerf %s %s' %276 (obj_path, flatten_args(client_args), name),277 **cluster_args)278 print(get_client_log(), end='')279def netBandwidth(name, options, cluster_args, client_args):280 if 'num_clients' not in cluster_args:281 cluster_args['num_clients'] = 2*len(config.hosts)282 if options.num_servers == None:283 cluster_args['num_servers'] = cluster_args['num_clients']284 if cluster_args['num_servers'] > len(config.hosts):285 cluster_args['num_servers'] = len(config.hosts)286 if options.size != None:287 client_args['--size'] = options.size288 else:289 client_args['--size'] = 1024*1024;290 cluster.run(client='%s/ClusterPerf %s %s' %291 (obj_path, flatten_args(client_args), name), **cluster_args)292 print(get_client_log(), end='')293def readAllToAll(name, options, cluster_args, client_args):294 cluster_args['backups_per_server'] = 0295 cluster_args['replicas'] = 0296 if 'num_clients' not in cluster_args:297 cluster_args['num_clients'] = len(hosts)298 if options.num_servers == None:299 cluster_args['num_servers'] = len(hosts)300 client_args['--numTables'] = cluster_args['num_servers'];301 cluster.run(client='%s/ClusterPerf %s %s' %302 (obj_path, flatten_args(client_args), name), **cluster_args)303 print(get_client_log(), end='')304def readDist(name, options, cluster_args, client_args):305 cluster.run(client='%s/ClusterPerf %s %s' %306 (obj_path, flatten_args(client_args), name),307 **cluster_args)308 print("# Cumulative distribution of time for a single client to read a\n"309 "# single %d-byte object from a single server. Each line indicates\n"310 "# that a given fraction of all reads took at most a given time\n"311 "# to complete.\n"312 "# Generated by 'clusterperf.py readDist'\n#\n"313 "# Time (usec) Cum. Fraction\n"314 "#---------------------------"315 % options.size)316 print_cdf_from_log()317def readDistRandom(name, options, cluster_args, client_args):318 cluster.run(client='%s/ClusterPerf %s %s' %319 (obj_path, flatten_args(client_args), name),320 **cluster_args)321 print("# Cumulative distribution of time for a single client to read a\n"322 "# random %d-byte object from a single server. Each line indicates\n"323 "# that a given fraction of all reads took at most a given time\n"324 "# to complete.\n"325 "# Generated by 'clusterperf.py readDist'\n#\n"326 "# Time (usec) Cum. Fraction\n"327 "#---------------------------"328 % options.size)329 print_cdf_from_log()330def readLoaded(name, options, cluster_args, client_args):331 if 'num_clients' not in cluster_args:332 cluster_args['num_clients'] = 20333 cluster.run(client='%s/ClusterPerf %s %s' %334 (obj_path, flatten_args(client_args), name), **cluster_args)335 print(get_client_log(), end='')336def readRandom(name, options, cluster_args, client_args):337 cluster_args['backups_per_server'] = 0338 cluster_args['replicas'] = 0339 if 'num_clients' not in cluster_args:340 cluster_args['num_clients'] = 16 341 if options.num_servers == None:342 cluster_args['num_servers'] = 1343 client_args['--numTables'] = cluster_args['num_servers'];344 cluster.run(client='%s/ClusterPerf %s %s' %345 (obj_path, flatten_args(client_args), name), **cluster_args)346 print(get_client_log(), end='')347# This method is also used for multiReadThroughput348def readThroughput(name, options, cluster_args, client_args):349 if 'master_args' not in cluster_args:350 cluster_args['master_args'] = '-t 2000'351 if cluster_args['timeout'] < 250:352 cluster_args['timeout'] = 250353 if 'num_clients' not in cluster_args:354 cluster_args['num_clients'] = len(hosts) - cluster_args['num_servers']355 if cluster_args['num_clients'] < 2:356 print("Not enough machines in the cluster to run the '%s' benchmark"357 % name)358 print("Need at least %d machines in this configuration" %359 (cluster_args['num_servers'] + 2))360 return361 cluster.run(client='%s/ClusterPerf %s %s' %362 (obj_path, flatten_args(client_args), name), **cluster_args)363 print(get_client_log(), end='')364def writeDist(name, options, cluster_args, client_args):365 if 'master_args' not in cluster_args:366 cluster_args['master_args'] = '-t 2000'367 cluster_args['disjunct'] = True368 cluster.run(client='%s/ClusterPerf %s %s' %369 (obj_path, flatten_args(client_args), name),370 **cluster_args)371 print("# Cumulative distribution of time for a single client to write a\n"372 "# single %d-byte object from a single server. Each line indicates\n"373 "# that a given fraction of all writes took at most a given time\n"374 "# to complete.\n"375 "# Generated by 'clusterperf.py %s'\n#\n"376 "# Time (usec) Cum. Fraction\n"377 "#---------------------------"...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!