Best Python code snippet using localstack_python
xSpark_bench.py
Source:xSpark_bench.py
...254 run_xspark(current_cluster='spark', num_instance=0, num_run=num_run,255 cluster_id=cluster_id, run=1, terminate=0, reboot=0)256 #raise NotImplementedError()257 258def reboot_cluster(cluster):259 cluster_id = c.CLUSTER_MAP[cluster]260 print(bold('Reboot {}...'.format(cluster_id)))261 run_xspark(current_cluster=cluster, num_instance=0, cluster_id=cluster_id, run=0, terminate=0, reboot=1)262def reboot(args):263 cluster = args.cluster264 if cluster == 'all':265 reboot_cluster('hdfs')266 reboot_cluster('spark')267 else:268 reboot_cluster(cluster)269def terminate(args):270 cluster = args.cluster271 if cluster == 'all':272 kill_cluster('spark')273 kill_cluster('hdfs')274 else:275 kill_cluster(cluster)276def launch_exp(args):277 cluster_id = c.CLUSTER_MAP['spark']278 var_par = args.var_par279 bench = args.benchmark280 num_run = args.num_runs281 reuse_dataset = args.reuse_dataset282 max_executors = args.max_executors...
cluster_nodes.py
Source:cluster_nodes.py
...51 neutron_client.delete_neutron(neutron_cluster_node.host_address,52 conn.username,53 conn.password)54 time.sleep(30)55 self.reboot_cluster()56 time.sleep(60)57 return result_obj58 def reboot_cluster(self):59 ''' Method for reboot all NSXAPI servers that are in a cluster with this node60 '''61 neutron_result_list_schema = self.query()62 for neutron_result_schema in neutron_result_list_schema.results:63 conn = self.get_connection()64 vsm = VSM(neutron_result_schema.host_address,65 conn.username, conn.password, "", "1.0")66 appliance = NSXAPIApplianceManagement(vsm)67 appliance_schema = NSXAPIApplianceManagementSchema({})68 appliance.stop()69 #TODO: Add polling mechanism70 time.sleep(30)71 appliance.create(appliance_schema)72 appliance.read()...
setup_cluster.py
Source:setup_cluster.py
...77 abort("Couldn't find SPARK_HOME.")78 # tear down cluster79 local('%s/ec2/spark-ec2 destroy %s' % (spark_home,80 name))81def reboot_cluster(workers=None):82 """83 Stops the cluster and restarts it. Has an optional workers parameter; if84 this parameter is defined, we only start that many workers.85 """86 87 # stop the cluster88 run('./spark/sbin/stop-all.sh')89 90 # read the worker node URLs91 workerUrls = run('cat ./spark/conf/slaves').split()92 # if we have a defined number of workers, trim the worker list down93 if isinstance(workers, int) and workers <= len(workerUrls):94 workerUrls = workerUrls[0:workers - 1]95 elif (isinstance(workers, int) and...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!