Best Python code snippet using autotest_python
benchmark.py
Source:benchmark.py
...168 def reset(self):169 self.simulator.remove_all_servers()170 self._api_flush_servers()171 self._api_flush_controllers()172 def _wait_for_commands(self, command_uris):173 TIMEOUT = 600174 i = 0175 for uri in command_uris:176 while True:177 response = self.GET(uri)178 assert response.ok179 command = response.json()180 if command['complete']:181 if command['errored'] or command['cancelled']:182 raise RuntimeError("Command failed: %s" % command)183 else:184 break185 else:186 time.sleep(1)187 i += 1188 if i > TIMEOUT:189 raise RuntimeError("Timeout on %s" % command['message'])190 def _api_flush_controllers(self):191 for resource in self.GET("/api/storage_resource?plugin_name=simulator_controller&class_name=Couplet").json()['objects']:192 response = self.DELETE(resource['resource_uri'])193 assert response.ok194 def _add_controller(self, controller_id):195 response = self.POST("/api/storage_resource/", data=json.dumps({196 'plugin_name': self.PLUGIN_NAME,197 'class_name': 'Couplet',198 'attrs': {199 'controller_id': controller_id200 }201 }))202 assert response.ok203 def _api_flush_servers(self):204 # Avoid trying to remove all the servers at once to work around HYD-1741205 GROUP_SIZE = 4206 host_count = -1207 while host_count:208 command_uris = []209 for host in self.GET("/api/host/", params = {'limit': GROUP_SIZE}).json()['objects']:210 remove_job = [j for j in host['available_jobs'] if j['class_name'] == "ForceRemoveHostJob"][0]211 response = self.POST("/api/command/", data = json.dumps({212 'message': "Benchmark clearing %s" % host['fqdn'],213 'jobs': [214 {215 'class_name': "ForceRemoveHostJob",216 'args': remove_job['args']217 }218 ]}))219 assert response.ok220 command_uris.append(response.json()['resource_uri'])221 self._wait_for_commands(command_uris)222 response = self.GET("/api/host/", params = {'limit': GROUP_SIZE})223 host_count = response.json()['meta']['total_count']224 def _authenticated_session(self, username, password):225 session = requests.session()226 session.headers = {"Accept": "application/json",227 "Content-type": "application/json"}228 session.verify = False229 response = session.get("%s/api/session/" % self.url)230 if not response.ok:231 raise RuntimeError("Failed to open session")232 session.headers['X-CSRFToken'] = response.cookies['csrftoken']233 session.cookies['csrftoken'] = response.cookies['csrftoken']234 session.cookies['sessionid'] = response.cookies['sessionid']235 response = session.post("%s/api/session/" % self.url, data = json.dumps({'username': username, 'password': password}))236 if not response.ok:237 raise RuntimeError("Failed to authenticate")238 return session239 def get_queues(self):240 response = self.GET("/api/system_status")241 assert response.ok242 return response.json()['rabbitmq']['queues']243 def _get_queue(self, queue_name):244 queue = [q for q in self.get_queues() if q['name'] == queue_name][0]245 return queue246 def _connection_count(self):247 response = self.GET("/api/system_status")248 assert response.ok249 return len(response.json()['postgres']['pg_stat_activity']['rows'])250class timed(object):251 def __init__(self, tag):252 self.tag = tag253 def __enter__(self):254 self.ts = time.time()255 def __exit__(self, *args):256 te = time.time()257 log.info("%s: %2.2fs" % (self.tag, te - self.ts))258class FilesystemSizeLimit(Benchmark):259 def run(self):260 """Create increasingly large filesystems on large numbers of server and controllers until an error occurs."""261 SU_SIZE = 4262 log.debug("Connection count initially: %s" % self._connection_count())263 n = self.args.servers264 VOLUMES_PER_SERVER = 4265 while True:266 ost_count = (n * VOLUMES_PER_SERVER - 2)267 log.info("n = %s (ost count = %s)" % (n, ost_count))268 secret = self.get_registration_secret(n, datetime.timedelta(seconds=3600))269 command_uris = []270 fqdns = []271 serials = []272 log.debug("Creating servers...")273 for i in range(0, n, SU_SIZE):274 su_result = self.simulator.add_su(SU_SIZE, SU_SIZE * VOLUMES_PER_SERVER, 1)275 fqdns.extend(su_result['fqdns'])276 serials.extend(su_result['serials'])277 self._add_controller(su_result['controller_id'])278 log.debug("Registering servers...")279 for fqdn in fqdns:280 registration_result = self.simulator.register(fqdn, secret)281 command_uris.append("/api/command/%s/" % (registration_result['command_id']))282 log.debug("Waiting for setup...")283 with timed("Setup commands for %s servers" % n):284 try:285 self._wait_for_commands(command_uris)286 except RuntimeError, e:287 log.error("Failed registering %s servers: %s" % (n, e))288 log.error("Connection count: %s" % self._connection_count())289 break290 # Resolve serials to volume IDs291 response = self.GET("/api/volume/", params = {'limit': 0})292 assert response.ok293 serial_to_id = {}294 for volume in response.json()['objects']:295 serial_to_id[volume['label']] = volume['id']296 log.debug("Requesting filesystem creation...")297 with timed("Filesystem creation POST (%d OSTs)" % ost_count):298 response = self.POST("/api/filesystem/",299 data=json.dumps({300 'name': 'testfs',301 'mgt': {'volume_id': serial_to_id[serials[0]]},302 'mdts': [{303 'volume_id': serial_to_id[serials[1]],304 'conf_params': {}305 }],306 'osts': [307 {308 'volume_id': v_id,309 'conf_params': {}310 } for v_id in [serial_to_id[serial] for serial in serials[2:]]],311 'conf_params': {}312 })313 )314 if not response.ok:315 log.error(response.text)316 assert response.ok317 command_uri = response.json()['command']['resource_uri']318 log.debug("Awaiting filesystem creation...")319 assert response.status_code == 202, response.status_code320 self._wait_for_commands([command_uri])321 log.info("Success for n = %s" % n)322 n += self.args.servers323class ConcurrentRegistrationLimit(Benchmark):324 def run(self):325 """326 Increase the number of concurrent server registrations until server327 setup commands start failing.328 What we're testing here is that not only can N servers exist, but that329 they can all register at the same instant without causing anything to fall over.330 """331 SU_SIZE = 4332 log.debug("Connection count initially: %s" % self._connection_count())333 n = self.args.servers334 while True:335 log.info("n = %s" % n)336 command_uris = []337 secret = self.get_registration_secret(n, duration = datetime.timedelta(seconds = 3600))338 fqdns = []339 for i in range(0, n, SU_SIZE):340 fqdns.extend(self.simulator.add_su(SU_SIZE, SU_SIZE * 2, 1)['fqdns'])341 registration_results = self.simulator.register_many(fqdns, secret)342 for result in registration_results:343 command_uris.append("/api/command/%s/" % (result['command_id']))344 try:345 self._wait_for_commands(command_uris)346 except RuntimeError, e:347 log.error("Failed registering %s servers: %s" % (n, e))348 log.debug("Connection count: %s" % self._connection_count())349 break350 else:351 log.info("Success registering %s servers" % n)352 log.debug("Connection count: %s" % self._connection_count())353 self.reset()354 log.debug("Connection count after flush: %s" % self._connection_count())355 n += self.args.servers356class ServerCountLimit(Benchmark):357 def run(self):358 """359 Increase the number of servers being monitored until queues start backing up360 """361 # Some arbitrary nonzero amount of log data from each server362 LOG_RATE = 10363 add_group_size = 4364 volumes_per_server = 4365 i = 0366 baseline = dict((queue['name'], queue['messages']) for queue in self.get_queues())367 while True:368 log.info("i = %s, adding %s servers" % (i, add_group_size))369 time.sleep(1)370 registration_command_uris = []371 i += add_group_size372 result = self.simulator.add_su(add_group_size, add_group_size * volumes_per_server, 1)373 for fqdn in result['fqdns']:374 self.simulator.set_log_rate(fqdn, LOG_RATE)375 secret = self.get_registration_secret(1)376 result = self.simulator.register(fqdn, secret)377 registration_command_uris.append('/api/command/%s/' % (result['command_id']))378 self._wait_for_commands(registration_command_uris)379 backed_up_queues = []380 for queue in self.get_queues():381 if queue['messages'] - baseline.get(queue['name'], 0) > max(queue['message_stats_ack_details_rate'], i):382 backed_up_queues.append(queue['name'])383 log.info("Queue %s is backed up (%s-%s>%s in=%.2f out=%.2f)" % (384 queue['name'],385 queue['messages'],386 baseline.get(queue['name'], 0),387 max(queue['message_stats_ack_details_rate'], i),388 queue['message_stats_publish_details_rate'],389 queue['message_stats_ack_details_rate']))390 if backed_up_queues:391 break392 # TODO: additional check: that there are no contact alerts393 # TODO: additional check: periodically, that we can stop and start lnet394 # on each server with a sensible latency (responsiveness to actions)395class LogIngestRate(Benchmark):396 def run(self):397 """Increase the rate of log messages from a fixed number of servers until the398 log RX queue starts to back up"""399 # Actual log messages per second is (log_rate / Session.POLL_INTERVAL) * server_count400 server_count = self.args.servers401 server_fqdns = []402 registration_command_uris = []403 for n in range(0, server_count):404 fqdn = self.simulator.add_server(1)405 secret = self.get_registration_secret(1)406 result = self.simulator.register(fqdn, secret)407 server_fqdns.append(fqdn)408 registration_command_uris.append("/api/command/%s/" % (result['command_id']))409 self._wait_for_commands(registration_command_uris)410 response = self.GET("/api/log/")411 assert response.ok412 log_message_count = response.json()['meta']['total_count']413 log.debug("Initially DB contains %s log messages" % log_message_count)414 tap_out = 0415 saturated_samples = []416 log_rate = 8417 while True:418 log.info("log_rate = %s" % log_rate)419 for fqdn in server_fqdns:420 self.simulator.set_log_rate(fqdn, log_rate)421 time.sleep(10)422 systemd_journal_queue = self._get_queue('agent_systemd_journal_rx')423 if systemd_journal_queue['messages'] > max(systemd_journal_queue['message_stats_ack_details_rate'] * 4, server_count):...
launchpad.py
Source:launchpad.py
...74 self._logger.debug("lp worker thread run")75 # login76 self._lp_login()77 # loop78 self._wait_for_commands()79 def shutdown(self):80 """Request shutdown"""81 self._shutdown = True82 def queue_request(self, func, args, result_callback):83 # FIXME: add support to pass strings instead of callable84 self._pending_requests.put((func, args, result_callback))85 def _wait_for_commands(self):86 """internal helper that waits for commands"""87 while True:88 while not self._pending_requests.empty():89 self._logger.debug("found pending request")90 (func, args, result_callback) = self._pending_requests.get()91 # run func async92 res = func(*args)93 # provide result to the callback94 result_callback(res)95 self._pending_requests.task_done()96 # wait a bit97 time.sleep(0.1)98 if (self._shutdown and99 self._pending_requests.empty()):...
restfulclient.py
Source:restfulclient.py
...89 self.error = "ERROR_SERVICE_ROOT"90 self._shutdown = True91 return92 # loop93 self._wait_for_commands()94 def shutdown(self):95 """Request shutdown"""96 self._shutdown = True97 def queue_request(self, func, args, kwargs, result_callback, error_callback):98 """99 queue a (remote) command for execution, the result_callback will100 call with the result_list when done (that function will be101 called async)102 """103 self._pending_requests.put((func, args, kwargs, result_callback, error_callback))104 def _wait_for_commands(self):105 """internal helper that waits for commands"""106 while True:107 while not self._pending_requests.empty():108 LOG.debug("found pending request")109 (func_str, args, kwargs, result_callback, error_callback) = self._pending_requests.get()110 # run func async111 try:112 func = self.service113 for part in func_str.split("."):114 func = getattr(func, part)115 res = func(*args, **kwargs)116 except Exception ,e:117 error_callback(e)118 else:...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!