Best Python code snippet using avocado_python
load_statistics.py
Source:load_statistics.py
...153 "Unsupported period '%s'" % kwds['period']154 except (AssertionError, ValueError):155 LOG.warning(helper.exc_info())156 raise Plotter.BadRequestError('Bad request')157 def _get_relative_dir(self, kwds):158 if 'index' in kwds:159 tmp = 'INSTANCE_%s_%s' % (kwds['farmRoleId'], kwds['index'])160 return os.path.join('%s' % kwds['farmId'], tmp)161 elif 'farmRoleId' in kwds:162 return os.path.join('%s' % kwds['farmId'], 'FR_%s' % kwds['farmRoleId'])163 else:164 return os.path.join('%s' % kwds['farmId'], 'FARM')165 def _get_rrd_dir(self, kwds):166 base_rrd_dir = os.path.join(self.config['rrd']['dir'], helper.x1x2(kwds['farmId']))167 relative_dir = self._get_relative_dir(kwds)168 rrd_dir = os.path.join(base_rrd_dir, relative_dir)169 return rrd_dir170 def _get_rrd_files(self, kwds, metric):171 rrd_dir = self._get_rrd_dir(kwds)172 if metric == 'io':173 m = 'IO'174 elif metric == 'snum':175 m = 'SERVERS'176 else:177 m = '%sSNMP' % metric.upper()178 try:179 rrd_files = [os.path.join(rrd_dir, m, f) for f in180 os.walk(os.path.join(rrd_dir, m)).next()[2]]181 except StopIteration:182 rrd_files = []183 for rrd_file in rrd_files:184 if not os.path.exists(rrd_file):185 raise IOError('No such file or directory: %s' % rrd_file)186 return rrd_files187 def _get_image_dir(self, kwds):188 relative_dir = self._get_relative_dir(kwds)189 img_dir = os.path.join(self.config['img']['dir'], relative_dir)190 if not os.path.exists(img_dir):191 try:192 os.makedirs(img_dir, 0755)193 except OSError as e:194 if e.errno != 17: # File exists195 raise196 return img_dir197 def _get_url_dir(self, kwds):198 relative_dir = self._get_relative_dir(kwds)199 if self.config['img']['port']:200 url_dir = '{scheme}://{host}:{port}/{path}'.format(201 scheme=self.config['img']['scheme'],202 host=self.config['img']['host'],203 port=self.config['img']['port'],204 path=os.path.join(self.config['img']['path'], relative_dir)205 )206 else:207 url_dir = '{scheme}://{host}/{path}'.format(208 scheme=self.config['img']['scheme'],209 host=self.config['img']['host'],210 path=os.path.join(self.config['img']['path'], relative_dir)211 )212 return url_dir...
typingvid.py
Source:typingvid.py
...31 for c in word:32 c = _remap_special(c)33 res += mapping[c] if c in mapping else c34 return res35def _get_relative_dir(relative_dir):36 """37 Return the full path of a relative directory.38 Parameters39 ----------40 relative_dir: string41 The relative directory name.42 Returns43 -------44 string45 The full path of the directory.46 """47 main_dir_name = os.path.dirname(os.path.abspath(__file__))48 return os.path.join(main_dir_name, relative_dir)49def _set_property(soup, object_id, prop, value):50 """51 Update a property of an object within an svg file.52 Parameters53 ----------54 soup: bs4.BeautifulSoup55 A BeautifulSoup object containing the contents of the svg (modified in place)56 object_id: string57 The id of the svg object to update. Either a single character (e.g. 'A') or58 a special string (e.g. "Space") based on convetion.59 prop: string60 The property to update (e.g. "fill" or "fill-opacity").61 value: string62 The target value of the given property (e.g. "black", "0.1").63 Notes64 -----65 Parameter `soup` is modified in place.66 """67 obj = soup.find(id=object_id)68 obj["style"] = re.sub(f"{prop}:.+?;", f"{prop}:{value};", obj["style"])69def _remap_special(c):70 """71 Remap special characters to valid svg object ids based on convetion.72 Parameters73 ----------74 c: string75 The character to be remapped.76 Returns77 -------78 string79 A valid svg object id. Either the original character `c` or an expressive string80 for special characters.81 """82 m = {83 " ": "space",84 "\n": "enter",85 }86 if c in m:87 c = m[c]88 return c89def _generate_frame(char, soup, properties, temp_dir_name, frame_num):90 """91 Generate a single frame of the (keyboard-only) animation.92 Parameters93 ----------94 char: string95 The character currently being animated.96 soup: bs4.BeautifulSoup97 The BeautifulSoup containing the keyboard svg.98 properties: dict99 A dictionary of property/value pairs to be updated for this frame.100 temp_dir_name: string101 The directory in which the frames are temporary being stored.102 frame_number: int103 The number of the current frame.104 Notes105 -----106 Writes directly to file '{temp_dir_name}/frame{frame_num}.png'107 """108 for prop in properties:109 _set_property(soup, char, prop, properties[prop])110 svg2png(111 bytestring=str(soup),112 write_to=f"{temp_dir_name}/frame{frame_num}.png",113 )114def _create_frames(keyboard_svg, text):115 """116 Generate all frames of keyboard animation and store them in a temporary folder.117 Parameters118 ----------119 keyboard_svg: string120 Path to a keyboard svg file.121 text: string122 The text to be animated onto the keyboard.123 Returns124 -------125 temp_dir: tempfile.TemporaryDirectory126 A reference to the (automatically generated) temporary directory containing all frames.127 """128 with open(keyboard_svg) as f:129 data = f.read()130 keyboard_soup = BeautifulSoup(data, "xml")131 temp_dir = tempfile.TemporaryDirectory()132 temp_dir_name = temp_dir.name133 frame_num = 0134 _generate_frame(None, keyboard_soup, {}, temp_dir_name, frame_num)135 frame_num += 1136 for char in text:137 char = _remap_special(char)138 _generate_frame(139 char,140 keyboard_soup,141 {"fill": "black", "fill-opacity": "0.2"},142 temp_dir_name,143 frame_num,144 )145 frame_num += 1146 _generate_frame(147 char,148 keyboard_soup,149 {"fill": "none", "fill-opacity": "1"},150 temp_dir_name,151 frame_num,152 )153 frame_num += 1154 return temp_dir155def _generate_keyboard_clip(temp_dir, T):156 """157 Combine previously generated keyboard frames to a single MoviePy clip.158 Parameters159 ----------160 temp_dir: tempfile.TemporaryDirectory161 A reference to the (automatically generated) temporary directory containing all frames.162 T: float163 The duration of each frame in seconds.164 Returns165 -------166 moviepy.video.VideoClip.VideoClip167 The resulting MoviePy clip.168 """169 clips = [170 mp.ImageClip(f"{temp_dir.name}/frame{n}.png").set_duration(T)171 for n in range(len(os.listdir(temp_dir.name)))172 ]173 keyboard_clip = mp.concatenate_videoclips(clips)174 return keyboard_clip175def _generate_text_clip(text, T, font):176 """177 Create a simple MoviePy clip of text slowly appearing to be used on the display(s).178 Parameters179 ----------180 text: string181 The string to be animated.182 T: float183 The duration of single keypress.184 font: string185 The font to be used for the text.186 Returns187 -------188 moviepy.video.VideoClip.VideoClip189 The resulting MoviePy clip.190 """191 return mp.concatenate_videoclips(192 [193 mp.TextClip(194 f"> {text[:i]}|",195 color="black",196 kerning=5,197 fontsize=31,198 font=font,199 ).set_duration(2 * T)200 for i in range(0, len(text) + 1)201 ]202 )203def _generate_composite_clip(background_clip, keyboard_clip, txt_clips):204 """205 Create the final clip containing the keyboard and either one or two displays.206 Parameters207 ----------208 background_clip: moviepy.video.VideoClip.ImageClip209 A simple image clip of the background to be used for the video.210 keyboard_clip: moviepy.video.VideoClip.VideoClip211 The animated keyboard clip generated by `_generate_keyboard_clip`.212 txt_clips: list213 A list containing either one or two text clips generated by `_generate_text_clip`214 Returns215 -------216 moviepy.video.VideoClip.VideoClip217 The resulting (composite) video clip.218 """219 if len(txt_clips) == 2:220 cvc = mp.CompositeVideoClip(221 [222 background_clip,223 keyboard_clip.resize(0.69).set_pos(("center", 0.4), relative=True),224 txt_clips[0].set_pos((351, 230)),225 txt_clips[1].set_pos((351, 335)),226 ]227 ).set_duration(keyboard_clip.duration)228 cvc = vfx.crop(cvc, x1=247.72, y1=132.38, width=1424.56, height=815.24)229 elif len(txt_clips) == 1:230 cvc = mp.CompositeVideoClip(231 [232 background_clip,233 keyboard_clip.resize(0.69).set_pos(("center", 380)),234 txt_clips[0].set_pos((351, 282)),235 ]236 ).set_duration(keyboard_clip.duration)237 cvc = vfx.crop(cvc, x1=269.5, y1=199.5, width=1381, height=681)238 return cvc239def _export_clip(clip, filename):240 """241 Export a clip to a media file based on its extension.242 Parameters243 ----------244 clip: moviepy.video.VideoClip.VideoClip245 The clip to be written.246 filename: string247 The path to the output file. Extension can be either `.mp4` or `.gif`.248 """249 ext = filename.split(".")[1]250 if ext == "gif":251 clip.write_gif(filename, fps=10, logger=None)252 elif ext == "mp4":253 clip.write_videofile(filename, fps=24, logger=None)254def _create_video(temp_dir, layout, args):255 """256 Create a fully-fledged keyboard animation video using the previously generated keyboard frames.257 258 Parameters259 ----------260 temp_dir: tempfile.TemporaryDirectory261 A reference to the (automatically generated) temporary directory containing all frames.262 layout: dict263 A dictionary resulting from reading an appropriate yaml layout file.264 Should at least include the `file` and `fonts` keys.265 args: argparse.Namespace266 The arguments generated by argparse.267 Required: args.text268 """269 T = 1 / args.speed270 keyboard_clip = _generate_keyboard_clip(temp_dir, T)271 if args.no_display:272 final_clip = keyboard_clip273 elif len(layout["fonts"]) == 2:274 background_clip = mp.ImageClip(f"{_get_relative_dir('assets')}/dual_display_background.png")275 upper_text_clip = _generate_text_clip(276 args.text if not args.force_lowercase else args.text.lower(),277 T,278 layout["fonts"][0],279 )280 remapped_text = _layout_remap(args.text, layout["mapping"])281 lower_text_clip = _generate_text_clip(282 remapped_text if not args.force_lowercase else remapped_text.lower(),283 T,284 layout["fonts"][1],285 )286 final_clip = _generate_composite_clip(287 background_clip, keyboard_clip, [upper_text_clip, lower_text_clip]288 )289 elif len(layout["fonts"]) == 1:290 background_clip = mp.ImageClip(f"{_get_relative_dir('assets')}/mono_display_background.png")291 txt_clip = _generate_text_clip(292 args.text if not args.force_lowercase else args.text.lower(),293 T,294 layout["fonts"][0],295 )296 final_clip = _generate_composite_clip(297 background_clip, keyboard_clip, [txt_clip]298 )299 if args.invert_colors:300 final_clip = final_clip.fx(vfx.invert_colors)301 if args.hold_last_frame > 0:302 final_clip = final_clip.fx(303 vfx.freeze,304 t="end",305 padding_end=1 / 100,306 freeze_duration=args.hold_last_frame,307 )308 _export_clip(final_clip, args.output)309 temp_dir.cleanup()310def _show_all_layouts():311 layouts = []312 for f in os.listdir(_get_relative_dir("layouts/")):313 if f.endswith(".yml") or f.endswith(".yaml"):314 layouts.append(f)315 print("Available layouts: ", end="")316 print(", ".join([l.split(".")[0] for l in layouts]))317def _parse_arguments():318 """319 Parses the arguments from the command line using argparse.320 Returns321 -------322 args: argparse.Namespace323 The arguments generated by argparse.324 Required: args.text325 """326 parser = argparse.ArgumentParser(327 description="A customizable typing animation generator with multi-layout support."328 )329 group = parser.add_mutually_exclusive_group(required=True)330 group.add_argument(331 "-t",332 "--text",333 help="the text (only in the first language) to be typed",334 )335 parser.add_argument(336 "-l",337 "--layout",338 default="en",339 help="the layout to use for the keyboard (default: en)",340 )341 parser.add_argument(342 "-o",343 "--output",344 default="output.mp4",345 help="location of output file (default: output.mp4)",346 )347 parser.add_argument(348 "-s",349 "--speed",350 default=5.0,351 type=float,352 help="speed of output media file (default: 5.0)",353 )354 group.add_argument(355 "--all-layouts",356 action="store_true",357 help="print all available layouts and exit",358 )359 parser.add_argument(360 "--no-display",361 action="store_true",362 help="keep only the keyboard for final animation",363 )364 parser.add_argument(365 "--invert-colors", action="store_true", help="invert colors of final media file"366 )367 parser.add_argument(368 "--force-lowercase",369 action="store_true",370 help="show display text in lowercase (instead of uppercase)",371 )372 parser.add_argument(373 "--hold-last-frame",374 default=-1,375 type=float,376 metavar="N",377 help="keep last frame on screen for N seconds, 0 for normal screen time (default: 0)",378 )379 args = parser.parse_args()380 if args.text:381 args.text = args.text.upper() # TODO add support for case-sensitivity (animating the Shift key)382 return args383def animate(layout, args):384 """385 Create a keyboard animation video based on the provided arguments.386 Parameters387 ----------388 layout: dict389 A dictionary resulting from reading an appropriate yaml layout file.390 Should at least include the `file` and `fonts` keys.391 args: argparse.Namespace392 The arguments generated by argparse.393 Required: args.text394 """395 print("Generating frames... ", end="", flush=True)396 asset = os.path.join(_get_relative_dir("assets/"), layout['file'])397 frames_dir = _create_frames(asset, args.text)398 print("frames successfully generated.")399 print("Generating output file... ", end="", flush=True)400 _create_video(frames_dir, layout, args)401 print(f"output file {args.output} successfully generated.")402def main():403 args = _parse_arguments()404 if args.all_layouts:405 _show_all_layouts()406 quit()407 layouts_dir = _get_relative_dir("layouts/")408 layout_file = os.path.join(layouts_dir, args.layout) + ".yml"409 with open(layout_file) as f:410 layout = yaml.safe_load(f)411 animate(layout, args)412if __name__ == "__main__":...
tasks.py
Source:tasks.py
...12 elif context.get_current_instance().settings["file_delivery_mode"] == "fetch":13 return _get_file_fetch(src, **kwargs)14 else:15 raise ValueError("Unknown file delivery mode")16def _get_relative_dir():17 path = os.path.relpath(context.get_current_runnable().runnable_path, context.get_current_instance().base_path)18 return path.replace("\\", "/")19def _get_file_bundle(src, **kwargs):20 base_path = _get_relative_dir()21 return copy(22 src=f"bundle-out/{base_path}/{src}",23 **kwargs24 )25def _get_file_fetch(src, **kwargs):26 base_path = _get_relative_dir()27 base_url = context.get_current_instance().settings["fetch_base_url"]28 return get_url(29 url=f"{base_url}/{base_path}/{src}",30 **kwargs31 )32def _get_file_repo(src, **kwargs):33 base_path = _get_relative_dir()34 return copy(35 src=f"{base_path}/{src}",36 **kwargs...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!