Best Python code snippet using lisa_python
cloudman.py
Source:cloudman.py
...34 _configure_logrotate(env)35 _configure_ec2_autorun(env, use_repo_autorun)36 _configure_sge(env)37 _configure_hadoop(env)38 _configure_nfs(env)39 _configure_novnc(env)40 _configure_desktop(env)41 install_s3fs(env)42def _configure_desktop(env):43 """44 Configure a desktop manager to work with VNC. Note that `xfce4` (or `jwm`)45 and `vnc4server` packages need to be installed for this to have effect.46 """47 if not _read_boolean(env, "configure_desktop", False):48 return49 # Set nginx PAM module to allow logins for any system user50 if env.safe_exists("/etc/pam.d"):51 env.safe_sudo('echo "@include common-auth" > /etc/pam.d/nginx')52 env.safe_sudo('usermod -a -G shadow galaxy')53 # Create a start script for X54 _setup_conf_file(env, "/home/ubuntu/.vnc/xstartup", "xstartup", default_source="xstartup")55 # Create jwmrc config file (uncomment this if using jwm window manager)56 # _setup_conf_file(env, "/home/ubuntu/.jwmrc", "jwmrc.xml",57 # default_source="jwmrc.xml", mode="0644")58 env.logger.info("----- Done configuring desktop -----")59def _configure_novnc(env):60 if not _read_boolean(env, "configure_novnc", False):61 # Longer term would like this enabled by default. -John62 return63 if not "novnc_install_dir" in env:64 env.novnc_install_dir = "/opt/novnc"65 if not "vnc_password" in env:66 env.vnc_password = "cl0udbi0l1nux"67 if not "vnc_user" in env:68 env.vnc_user = env.user69 if not "vnc_display" in env:70 env.vnc_display = "1"71 if not "vnc_depth" in env:72 env.vnc_depth = "16"73 if not "vnc_geometry" in env:74 env.vnc_geometry = "1024x768"75 _configure_vncpasswd(env)76 novnc_dir = env.novnc_install_dir77 env.safe_sudo("mkdir -p '%s'" % novnc_dir)78 env.safe_sudo("chown %s '%s'" % (env.user, novnc_dir))79 clone_cmd = "NOVNC_DIR='%s'; rm -rf $NOVNC_DIR; git clone https://github.com/kanaka/noVNC.git $NOVNC_DIR" % novnc_dir80 run(clone_cmd)81 ## Move vnc_auto.html which takes vnc_password as query argument82 ## to index.html and rewrite it so that password is autoset, no83 ## need to specify via query parameter.84 run("sed s/password\\ =\\ /password\\ =\\ \\\'%s\\\'\\;\\\\\\\\/\\\\\\\\// '%s/vnc_auto.html' > '%s/index.html'" % (env.vnc_password, novnc_dir, novnc_dir))85 _setup_conf_file(env, "/etc/init.d/novnc", "novnc_init", default_source="novnc_init")86 _setup_conf_file(env, "/etc/default/novnc", "novnc_default", default_source="novnc_default.template")87 _setup_conf_file(env, "/etc/init.d/vncserver", "vncserver_init", default_source="vncserver_init")88 _setup_conf_file(env, "/etc/default/vncserver", "vncserver_default", default_source="vncserver_default.template")89 _setup_simple_service("novnc")90 _setup_simple_service("vncserver")91def _configure_vncpasswd(env):92 with cd("~"):93 run("mkdir -p ~/.vnc")94 run("rm -rf vncpasswd")95 run("git clone https://github.com/trinitronx/vncpasswd.py vncpasswd")96 run("python vncpasswd/vncpasswd.py '%s' -f ~/.vnc/passwd" % env.vnc_password)97 run("chmod 600 ~/.vnc/passwd")98 run("rm -rf vncpasswd")99def _setup_env(env):100 """101 Setup the system environment required to run CloudMan. This means102 installing required system-level packages (as defined in CBL's103 ``packages.yaml``, or a flavor thereof) and Python dependencies104 (i.e., libraries) as defined in CloudMan's ``requirements.txt`` file.105 """106 # Get and install required system packages107 if env.distribution in ["debian", "ubuntu"]:108 config_file = get_config_file(env, "packages.yaml")109 (packages, _) = _yaml_to_packages(config_file.base, 'cloudman')110 # Allow flavors to modify the package list111 packages = env.flavor.rewrite_config_items("packages", packages)112 _setup_apt_automation()113 _apt_packages(pkg_list=packages)114 elif env.distribution in ["centos", "scientificlinux"]:115 env.logger.warn("No CloudMan system package dependencies for CentOS")116 pass117 # Get and install required Python libraries118 with _make_tmp_dir() as work_dir:119 with cd(work_dir):120 url = os.path.join(CM_REPO_ROOT_URL, 'requirements.txt')121 _create_python_virtualenv(env, 'CM', reqs_url=url)122 # Add a custom vimrc123 vimrc_url = os.path.join(MI_REPO_ROOT_URL, 'conf_files', 'vimrc')124 remote_file = '/etc/vim/vimrc'125 if env.safe_exists("/etc/vim"):126 env.safe_sudo("wget --output-document=%s %s" % (remote_file, vimrc_url))127 env.logger.debug("Added a custom vimrc to {0}".format(remote_file))128 # Setup profile129 aliases = ['alias lt="ls -ltr"', 'alias ll="ls -l"']130 for alias in aliases:131 _add_to_profiles(alias, ['/etc/bash.bashrc'])132 env.logger.info("Done setting up CloudMan's environment")133def _configure_logrotate(env):134 """135 Add logrotate config file, which will automatically rotate CloudMan's log136 """137 conf_file = "cloudman.logrotate"138 remote = '/etc/logrotate.d/cloudman'139 url = os.path.join(MI_REPO_ROOT_URL, 'conf_files', conf_file)140 env.safe_sudo("wget --output-document=%s %s" % (remote, url))141 env.logger.info("----- Added logrotate file to {0} -----".format(remote))142def _configure_ec2_autorun(env, use_repo_autorun=False):143 """144 ec2autorun.py is a script that launches CloudMan on instance boot145 and is thus required on an instance. See the script itself for the146 details of what it does.147 This script also adds a cloudman service to ``/etc/init``, which148 actually runs ec2autorun.py as a system-level service at system boot.149 """150 script = "ec2autorun.py"151 remote = os.path.join(env.install_dir, "bin", script)152 if not env.safe_exists(os.path.dirname(remote)):153 env.safe_sudo('mkdir -p {0}'.format(os.path.dirname(remote)))154 if use_repo_autorun:155 # Is this used, can we eliminate use_repo_autorun?156 url = os.path.join(MI_REPO_ROOT_URL, script)157 env.safe_sudo("wget --output-document=%s %s" % (remote, url))158 else:159 install_file_dir = os.path.join(env.config_dir, os.pardir, "installed_files")160 tmp_remote = os.path.join("/tmp", os.path.basename(remote))161 env.safe_put(os.path.join(install_file_dir, script), tmp_remote)162 env.safe_sudo("mv %s %s" % (tmp_remote, remote))163 env.safe_sudo("chmod 0777 %s" % remote)164 # Create upstart configuration file for boot-time script165 cloudman_boot_file = 'cloudman.conf'166 remote_file = '/etc/init/%s' % cloudman_boot_file167 _write_to_file(cm_upstart % (remote, os.path.splitext(remote)[0]), remote_file, mode="0644")168 # Setup default image user data (if configured by image_user_data_path or169 # image_user_data_template_path). This specifies defaults for CloudMan when170 # used with resulting image, normal userdata supplied by user will override171 # these defaults.172 image_user_data_path = os.path.join(env.install_dir, "bin", "IMAGE_USER_DATA")173 if "image_user_data_dict" in env:174 # Explicit YAML contents defined in env, just dump them as is.175 import yaml176 _write_to_file(yaml.dump(env.get("image_user_data_dict")), image_user_data_path, mode="0644")177 else:178 # Else use file or template file.179 _setup_conf_file(env, image_user_data_path, "image_user_data", default_source="image_user_data")180 env.logger.info("Done configuring CloudMan's ec2_autorun")181def _configure_sge(env):182 """183 This method sets up the environment for SGE w/o184 actually setting up SGE; it basically makes sure system paths expected185 by CloudMan exist on the system.186 TODO: Merge this with ``install_sge`` method in ``custom/cloudman.py``.187 """188 sge_root = '/opt/sge'189 if not env.safe_exists(sge_root):190 env.safe_sudo("mkdir -p %s" % sge_root)191 env.safe_sudo("chown sgeadmin:sgeadmin %s" % sge_root)192 # Link our installed SGE to CloudMan's expected directory193 sge_package_dir = "/opt/galaxy/pkg"194 sge_dir = "ge6.2u5"195 if not env.safe_exists(os.path.join(sge_package_dir, sge_dir)):196 env.safe_sudo("mkdir -p %s" % sge_package_dir)197 if not env.safe_exists(os.path.join(sge_package_dir, sge_dir)):198 env.safe_sudo("ln --force -s %s/%s %s/%s" % (env.install_dir, sge_dir, sge_package_dir, sge_dir))199 env.logger.info("Done configuring SGE for CloudMan")200def _configure_hadoop(env):201 """202 Grab files required by CloudMan to setup a Hadoop cluster atop SGE.203 """204 hadoop_root = '/opt/hadoop'205 url_root = 'https://s3.amazonaws.com/cloudman'206 hcm_file = 'hadoop.1.0.4__1.0.tar.gz'207 si_file = 'sge_integration.1.0.tar.gz'208 # Make sure we're working with a clean hadoop_home dir to avoid any version conflicts209 env.safe_sudo("rm -rf {0}".format(hadoop_root))210 env.safe_sudo("mkdir -p %s" % hadoop_root)211 with cd(hadoop_root):212 env.safe_sudo("wget --output-document={0} {1}/{0}".format(hcm_file, url_root))213 env.safe_sudo("wget --output-document={0} {1}/{0}".format(si_file, url_root))214 env.safe_sudo("chown -R {0} {1}".format(env.user, hadoop_root))215 env.logger.info("Done configuring Hadoop for CloudMan")216def _configure_nfs(env):217 """218 Edit ``/etc/exports`` to append paths that are shared over NFS by CloudMan.219 In addition to the hard coded paths listed here, additional paths220 can be included by setting ``extra_nfs_exports`` in ``fabricrc.txt`` as221 a comma-separated list of directories.222 """223 nfs_dir = "/export/data"224 cloudman_dir = "/mnt/galaxy/export"225 if not env.safe_exists(nfs_dir):226 # For the case of rerunning this script, ensure the nfs_dir does227 # not exist (exists() method does not recognize it as a file because228 # by default it points to a non-existing dir/file).229 with settings(warn_only=True):230 env.safe_sudo('rm -rf {0}'.format(nfs_dir))...
storageperf.py
Source:storageperf.py
...173 ),174 )175 def perf_storage_over_nfs_synthetic_udp_4k(self, result: TestResult) -> None:176 self._perf_nfs(result, protocol="udp")177 def _configure_nfs(178 self,179 server: RemoteNode,180 client: RemoteNode,181 server_raid_disk_name: str = "/dev/md0",182 server_raid_disk_mount_dir: str = "/mnt",183 client_nfs_mount_dir: str = "/mnt/nfs_client_share",184 protocol: str = "tcp",185 ) -> None:186 # mount raid disk on server187 server.shell.mkdir(PurePosixPath(server_raid_disk_mount_dir), exist_ok=True)188 server.tools[Mkfs].format_disk(server_raid_disk_name, FileSystem.ext4)189 server.tools[Mount].mount(190 server_raid_disk_name, server_raid_disk_mount_dir, options="nobarrier"191 )192 # setup nfs on server193 server.tools[NFSServer].create_shared_dir(194 [client.internal_address], server_raid_disk_mount_dir195 )196 # setup raid on client197 client.tools[NFSClient].setup(198 server.internal_address,199 server_raid_disk_mount_dir,200 client_nfs_mount_dir,201 f"proto={protocol},vers=3",202 )203 def _run_fio_on_nfs(204 self,205 test_result: TestResult,206 server: RemoteNode,207 client: RemoteNode,208 server_data_disk_count: int,209 client_nfs_mount_dir: str,210 core_count: int,211 num_jobs: List[int],212 start_iodepth: int = 1,213 max_iodepth: int = 1024,214 filename: str = "fiodata",215 block_size: int = 4,216 ) -> None:217 origin_value: Dict[str, str] = {}218 for node in [server, client]:219 origin_value[node.name] = node.tools[Sysctl].get("fs.aio-max-nr")220 node.tools[Sysctl].write("fs.aio-max-nr", "1048576")221 perf_disk(222 client,223 start_iodepth,224 max_iodepth,225 filename,226 test_name=inspect.stack()[1][3],227 core_count=core_count,228 disk_count=server_data_disk_count,229 disk_setup_type=DiskSetupType.raid0,230 disk_type=DiskType.premiumssd,231 num_jobs=num_jobs,232 block_size=block_size,233 size_mb=256,234 overwrite=True,235 cwd=PurePosixPath(client_nfs_mount_dir),236 test_result=test_result,237 )238 for node in [server, client]:239 node.tools[Sysctl].write("fs.aio-max-nr", origin_value[node.name])240 def _perf_nfs(241 self,242 test_result: TestResult,243 server_raid_disk_name: str = "/dev/md0",244 server_raid_disk_mount_dir: str = "/mnt/nfs_share",245 client_nfs_mount_dir: str = "/mnt/nfs_client_share",246 protocol: str = "tcp",247 filename: str = "fiodata",248 block_size: int = 4,249 start_iodepth: int = 1,250 max_iodepth: int = 1024,251 ) -> None:252 environment = test_result.environment253 assert environment, "fail to get environment from testresult"254 server_node = cast(RemoteNode, environment.nodes[0])255 client_node = cast(RemoteNode, environment.nodes[1])256 # Run test only on Debian, SLES and Redhat distributions257 if (258 not isinstance(server_node.os, Redhat)259 and not isinstance(server_node.os, Debian)260 and not isinstance(server_node.os, SLES)261 ):262 raise SkippedException(f"{server_node.os.name} not supported")263 # refer below link, in RHEL 8, NFS over UDP is no longer supported.264 # https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/deploying_different_types_of_servers/exporting-nfs-shares_deploying-different-types-of-servers#the-tcp-and-udp-protocols-in-nfsv3-and-nfsv4_exporting-nfs-shares # noqa: E501265 if (266 "udp" == protocol267 and isinstance(server_node.os, Redhat)268 and server_node.os.information.version >= "8.0.0"269 ):270 raise SkippedException(271 f"udp mode not supported on {server_node.os.information.vendor} "272 f"{server_node.os.information.release}"273 )274 # Each fio process start jobs equal to the iodepth to read/write from275 # the disks. The max number of jobs can be equal to the core count of276 # the node.277 # Examples:278 # iodepth = 4, core count = 8 => max_jobs = 4279 # iodepth = 16, core count = 8 => max_jobs = 8280 num_jobs = []281 iodepth_iter = start_iodepth282 core_count = client_node.tools[Lscpu].get_core_count()283 while iodepth_iter <= max_iodepth:284 num_jobs.append(min(iodepth_iter, core_count))285 iodepth_iter = iodepth_iter * 2286 # setup raid on server287 server_data_disks = server_node.features[Disk].get_raw_data_disks()288 server_data_disk_count = len(server_data_disks)289 server_partition_disks = reset_partitions(server_node, server_data_disks)290 reset_raid(server_node, server_partition_disks)291 try:292 self._configure_nfs(293 server_node,294 client_node,295 server_raid_disk_name=server_raid_disk_name,296 server_raid_disk_mount_dir=server_raid_disk_mount_dir,297 client_nfs_mount_dir=client_nfs_mount_dir,298 protocol=protocol,299 )300 # run fio test301 self._run_fio_on_nfs(302 test_result,303 server_node,304 client_node,305 server_data_disk_count,306 client_nfs_mount_dir,...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!