Best JavaScript code snippet using wpt
TestVSCode_stackTrace.py
Source:TestVSCode_stackTrace.py
1"""2Test lldb-vscode setBreakpoints request3"""4from __future__ import print_function5import unittest26import vscode7from lldbsuite.test.decorators import *8from lldbsuite.test.lldbtest import *9from lldbsuite.test import lldbutil10import lldbvscode_testcase11import os12class TestVSCode_stackTrace(lldbvscode_testcase.VSCodeTestCaseBase):13 mydir = TestBase.compute_mydir(__file__)14 name_key_path = ['name']15 source_key_path = ['source', 'path']16 line_key_path = ['line']17 def verify_stackFrames(self, start_idx, stackFrames):18 frame_idx = start_idx19 for stackFrame in stackFrames:20 # Don't care about frame above main21 if frame_idx > 20:22 return23 self.verify_stackFrame(frame_idx, stackFrame)24 frame_idx += 125 def verify_stackFrame(self, frame_idx, stackFrame):26 frame_name = self.get_dict_value(stackFrame, self.name_key_path)27 frame_source = self.get_dict_value(stackFrame, self.source_key_path)28 frame_line = self.get_dict_value(stackFrame, self.line_key_path)29 if frame_idx == 0:30 expected_line = self.recurse_end31 expected_name = 'recurse'32 elif frame_idx < 20:33 expected_line = self.recurse_call34 expected_name = 'recurse'35 else:36 expected_line = self.recurse_invocation37 expected_name = 'main'38 self.assertTrue(frame_name == expected_name,39 'frame #%i name "%s" == "%s"' % (40 frame_idx, frame_name, expected_name))41 self.assertTrue(frame_source == self.source_path,42 'frame #%i source "%s" == "%s"' % (43 frame_idx, frame_source, self.source_path))44 self.assertTrue(frame_line == expected_line,45 'frame #%i line %i == %i' % (frame_idx, frame_line,46 expected_line))47 @skipIfWindows48 @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots49 @no_debug_info_test50 def test_stackTrace(self):51 '''52 Tests the 'stackTrace' packet and all its variants.53 '''54 program = self.getBuildArtifact("a.out")55 self.build_and_launch(program)56 source = 'main.c'57 self.source_path = os.path.join(os.getcwd(), source)58 self.recurse_end = line_number(source, 'recurse end')59 self.recurse_call = line_number(source, 'recurse call')60 self.recurse_invocation = line_number(source, 'recurse invocation')61 lines = [self.recurse_end]62 # Set breakoint at a point of deepest recuusion63 breakpoint_ids = self.set_source_breakpoints(source, lines)64 self.assertTrue(len(breakpoint_ids) == len(lines),65 "expect correct number of breakpoints")66 self.continue_to_breakpoints(breakpoint_ids)67 startFrame = 068 # Verify we get all stack frames with no arguments69 stackFrames = self.get_stackFrames()70 frameCount = len(stackFrames)71 self.assertTrue(frameCount >= 20,72 'verify we get at least 20 frames for all frames')73 self.verify_stackFrames(startFrame, stackFrames)74 # Verify all stack frames by specifying startFrame = 0 and levels not75 # specified76 stackFrames = self.get_stackFrames(startFrame=startFrame)77 self.assertTrue(frameCount == len(stackFrames),78 ('verify same number of frames with startFrame=%i') % (79 startFrame))80 self.verify_stackFrames(startFrame, stackFrames)81 # Verify all stack frames by specifying startFrame = 0 and levels = 082 levels = 083 stackFrames = self.get_stackFrames(startFrame=startFrame,84 levels=levels)85 self.assertTrue(frameCount == len(stackFrames),86 ('verify same number of frames with startFrame=%i and'87 ' levels=%i') % (startFrame, levels))88 self.verify_stackFrames(startFrame, stackFrames)89 # Get only the first stack frame by sepcifying startFrame = 0 and90 # levels = 191 levels = 192 stackFrames = self.get_stackFrames(startFrame=startFrame,93 levels=levels)94 self.assertTrue(levels == len(stackFrames),95 ('verify one frame with startFrame=%i and'96 ' levels=%i') % (startFrame, levels))97 self.verify_stackFrames(startFrame, stackFrames)98 # Get only the first 3 stack frames by sepcifying startFrame = 0 and99 # levels = 3100 levels = 3101 stackFrames = self.get_stackFrames(startFrame=startFrame,102 levels=levels)103 self.assertTrue(levels == len(stackFrames),104 ('verify %i frames with startFrame=%i and'105 ' levels=%i') % (levels, startFrame, levels))106 self.verify_stackFrames(startFrame, stackFrames)107 # Get only the first 15 stack frames by sepcifying startFrame = 5 and108 # levels = 16109 startFrame = 5110 levels = 16111 stackFrames = self.get_stackFrames(startFrame=startFrame,112 levels=levels)113 self.assertTrue(levels == len(stackFrames),114 ('verify %i frames with startFrame=%i and'115 ' levels=%i') % (levels, startFrame, levels))116 self.verify_stackFrames(startFrame, stackFrames)117 # Verify we cap things correctly when we ask for too many frames118 startFrame = 5119 levels = 1000120 stackFrames = self.get_stackFrames(startFrame=startFrame,121 levels=levels)122 self.assertTrue(len(stackFrames) == frameCount - startFrame,123 ('verify less than 1000 frames with startFrame=%i and'124 ' levels=%i') % (startFrame, levels))125 self.verify_stackFrames(startFrame, stackFrames)126 # Verify level=0 works with non-zerp start frame127 startFrame = 5128 levels = 0129 stackFrames = self.get_stackFrames(startFrame=startFrame,130 levels=levels)131 self.assertTrue(len(stackFrames) == frameCount - startFrame,132 ('verify less than 1000 frames with startFrame=%i and'133 ' levels=%i') % (startFrame, levels))134 self.verify_stackFrames(startFrame, stackFrames)135 # Verify we get not frames when startFrame is too high136 startFrame = 1000137 levels = 1138 stackFrames = self.get_stackFrames(startFrame=startFrame,139 levels=levels)140 self.assertTrue(0 == len(stackFrames),...
get_raw_skes_data.py
Source:get_raw_skes_data.py
1# Copyright (c) Microsoft Corporation. All rights reserved.2# Licensed under the MIT License.3import os.path as osp4import os5import numpy as np6import pickle7import logging8import gc910def get_raw_bodies_data(skes_path, ske_name, frames_drop_skes, frames_drop_logger):11 """12 Get raw bodies data from a skeleton sequence.1314 Each body's data is a dict that contains the following keys:15 - joints: raw 3D joints positions. Shape: (num_frames x 25, 3)16 - colors: raw 2D color locations. Shape: (num_frames, 25, 2)17 - interval: a list which stores the frame indices of this body.18 - motion: motion amount (only for the sequence with 2 or more bodyIDs).1920 Return:21 a dict for a skeleton sequence with 3 key-value pairs:22 - name: the skeleton filename.23 - data: a dict which stores raw data of each body.24 - num_frames: the number of valid frames.25 """26 ske_file = osp.join(skes_path, ske_name + '.skeleton')27 assert osp.exists(ske_file), 'Error: Skeleton file %s not found' % ske_file28 # Read all data from .skeleton file into a list (in string format)29 # print('Reading data from %s' % ske_file[-29:])30 with open(ske_file, 'r') as fr:31 str_data = fr.readlines()3233 num_frames = int(str_data[0].strip('\r\n'))34 frames_drop = []35 bodies_data = dict()36 valid_frames = -1 # 0-based index37 current_line = 13839 for f in range(num_frames):40 num_bodies = int(str_data[current_line].strip('\r\n'))41 current_line += 14243 if num_bodies == 0: # no data in this frame, drop it44 frames_drop.append(f) # 0-based index45 continue4647 valid_frames += 148 joints = np.zeros((num_bodies, 25, 3), dtype=np.float32)49 colors = np.zeros((num_bodies, 25, 2), dtype=np.float32)5051 for b in range(num_bodies):52 bodyID = str_data[current_line].strip('\r\n').split()[0]53 current_line += 154 num_joints = int(str_data[current_line].strip('\r\n')) # 25 joints55 current_line += 15657 for j in range(num_joints):58 temp_str = str_data[current_line].strip('\r\n').split()59 joints[b, j, :] = np.array(temp_str[:3], dtype=np.float32)60 colors[b, j, :] = np.array(temp_str[5:7], dtype=np.float32)61 current_line += 16263 if bodyID not in bodies_data: # Add a new body's data64 body_data = dict()65 body_data['joints'] = joints[b] # ndarray: (25, 3)66 body_data['colors'] = colors[b, np.newaxis] # ndarray: (1, 25, 2)67 body_data['interval'] = [valid_frames] # the index of the first frame68 else: # Update an already existed body's data69 body_data = bodies_data[bodyID]70 # Stack each body's data of each frame along the frame order71 body_data['joints'] = np.vstack((body_data['joints'], joints[b]))72 body_data['colors'] = np.vstack((body_data['colors'], colors[b, np.newaxis]))73 pre_frame_idx = body_data['interval'][-1]74 body_data['interval'].append(pre_frame_idx + 1) # add a new frame index7576 bodies_data[bodyID] = body_data # Update bodies_data7778 num_frames_drop = len(frames_drop)79 assert num_frames_drop < num_frames, \80 'Error: All frames data (%d) of %s is missing or lost' % (num_frames, ske_name)81 if num_frames_drop > 0:82 frames_drop_skes[ske_name] = np.array(frames_drop, dtype=np.int)83 frames_drop_logger.info('{}: {} frames missed: {}\n'.format(ske_name, num_frames_drop,84 frames_drop))8586 # Calculate motion (only for the sequence with 2 or more bodyIDs)87 if len(bodies_data) > 1:88 for body_data in bodies_data.values():89 body_data['motion'] = np.sum(np.var(body_data['joints'], axis=0))9091 return {'name': ske_name, 'data': bodies_data, 'num_frames': num_frames - num_frames_drop}929394def get_raw_skes_data():95 skes_name = np.loadtxt(skes_name_file, dtype=str)9697 num_files = skes_name.size98 print('Found %d available skeleton files.' % num_files) # 56578 for rgbd-6099100 raw_skes_data = []101 frames_cnt = np.zeros(num_files, dtype=np.int)102103 for (idx, ske_name) in enumerate(skes_name):104 ##############################################105 gc.collect()106 if idx == 0:107 skes_path = '/content/skeleton1/nturgb+d_skeletons/'108 if idx == 56578:109 skes_path = '/content/skeleton2/'110 ##############################################111 bodies_data = get_raw_bodies_data(skes_path, ske_name, frames_drop_skes, frames_drop_logger)112 raw_skes_data.append(bodies_data)113 frames_cnt[idx] = bodies_data['num_frames']114 if (idx + 1) % 1000 == 0:115 print('Processed: %.2f%% (%d / %d)' % \116 (100.0 * (idx + 1) / num_files, idx + 1, num_files))117118 with open(save_data_pkl, 'wb') as fw:119 pickle.dump(raw_skes_data, fw, pickle.HIGHEST_PROTOCOL)120 np.savetxt(osp.join(save_path, 'raw_data', 'frames_cnt.txt'), frames_cnt, fmt='%d')121122 print('Saved raw bodies data into %s' % save_data_pkl)123 print('Total frames: %d' % np.sum(frames_cnt))124125 with open(frames_drop_pkl, 'wb') as fw:126 pickle.dump(frames_drop_skes, fw, pickle.HIGHEST_PROTOCOL)127128if __name__ == '__main__':129 save_path = './'130131 stat_path = osp.join(save_path, 'statistics')132 if not osp.exists('./raw_data'):133 os.makedirs('./raw_data')134135 skes_name_file = osp.join(stat_path, 'skes_available_name.txt')136 save_data_pkl = osp.join(save_path, 'raw_data', 'raw_skes_data.pkl')137 frames_drop_pkl = osp.join(save_path, 'raw_data', 'frames_drop_skes.pkl')138139 frames_drop_logger = logging.getLogger('frames_drop')140 frames_drop_logger.setLevel(logging.INFO)141 frames_drop_logger.addHandler(logging.FileHandler(osp.join(save_path, 'raw_data', 'frames_drop.log')))142 frames_drop_skes = dict()143144 get_raw_skes_data()145146 with open(frames_drop_pkl, 'wb') as fw:
...
pydevd_custom_frames.py
Source:pydevd_custom_frames.py
1from _pydevd_bundle.pydevd_constants import get_thread_id, Null2from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame3from _pydev_imps._pydev_saved_modules import thread, threading4import sys5DEBUG = False6#=======================================================================================================================7# CustomFramesContainer8#=======================================================================================================================9class CustomFramesContainer:10 # Actual Values initialized later on.11 custom_frames_lock = None #: :type custom_frames_lock: threading.Lock12 custom_frames = None13 _next_frame_id = None14 _py_db_command_thread_event = None15def custom_frames_container_init(): #Note: no staticmethod on jython 2.1 (so, use free-function)16 CustomFramesContainer.custom_frames_lock = thread.allocate_lock()17 # custom_frames can only be accessed if properly locked with custom_frames_lock!18 # Key is a string identifying the frame (as well as the thread it belongs to).19 # Value is a CustomFrame.20 #21 CustomFramesContainer.custom_frames = {}22 # Only to be used in this module23 CustomFramesContainer._next_frame_id = 024 # This is the event we must set to release an internal process events. It's later set by the actual debugger25 # when we do create the debugger.26 CustomFramesContainer._py_db_command_thread_event = Null()27#Initialize it the first time (it may be reinitialized later on when dealing with a fork).28custom_frames_container_init()29#=======================================================================================================================30# CustomFrame31#=======================================================================================================================32class CustomFrame:33 def __init__(self, name, frame, thread_id):34 # 0 = string with the representation of that frame35 self.name = name36 # 1 = the frame to show37 self.frame = frame38 # 2 = an integer identifying the last time the frame was changed.39 self.mod_time = 040 # 3 = the thread id of the given frame41 self.thread_id = thread_id42def add_custom_frame(frame, name, thread_id):43 CustomFramesContainer.custom_frames_lock.acquire()44 try:45 curr_thread_id = get_thread_id(threading.currentThread())46 next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 147 # Note: the frame id kept contains an id and thread information on the thread where the frame was added48 # so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id).49 frame_id = '__frame__:%s|%s' % (next_id, curr_thread_id)50 if DEBUG:51 sys.stderr.write('add_custom_frame: %s (%s) %s %s\n' % (52 frame_id, get_abs_path_real_path_and_base_from_frame(frame)[-1], frame.f_lineno, frame.f_code.co_name))53 CustomFramesContainer.custom_frames[frame_id] = CustomFrame(name, frame, thread_id)54 CustomFramesContainer._py_db_command_thread_event.set()55 return frame_id56 finally:57 CustomFramesContainer.custom_frames_lock.release()58addCustomFrame = add_custom_frame # Backward compatibility59def update_custom_frame(frame_id, frame, thread_id, name=None):60 CustomFramesContainer.custom_frames_lock.acquire()61 try:62 if DEBUG:63 sys.stderr.write('update_custom_frame: %s\n' % frame_id)64 try:65 old = CustomFramesContainer.custom_frames[frame_id]66 if name is not None:67 old.name = name68 old.mod_time += 169 old.thread_id = thread_id70 except:71 sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_id,))72 import traceback;traceback.print_exc()73 CustomFramesContainer._py_db_command_thread_event.set()74 finally:75 CustomFramesContainer.custom_frames_lock.release()76def get_custom_frame(thread_id, frame_id):77 '''78 :param thread_id: This should actually be the frame_id which is returned by add_custom_frame.79 :param frame_id: This is the actual id() of the frame80 '''81 CustomFramesContainer.custom_frames_lock.acquire()82 try:83 frame_id = int(frame_id)84 f = CustomFramesContainer.custom_frames[thread_id].frame85 while f is not None:86 if id(f) == frame_id:87 return f88 f = f.f_back89 finally:90 f = None91 CustomFramesContainer.custom_frames_lock.release()92def remove_custom_frame(frame_id):93 CustomFramesContainer.custom_frames_lock.acquire()94 try:95 if DEBUG:96 sys.stderr.write('remove_custom_frame: %s\n' % frame_id)97 CustomFramesContainer.custom_frames.pop(frame_id, None)98 CustomFramesContainer._py_db_command_thread_event.set()99 finally:100 CustomFramesContainer.custom_frames_lock.release()...
calculateExtractiveSummaryAccuracy.py
Source:calculateExtractiveSummaryAccuracy.py
1import sys2import os3import csv4startFrames = []5endFrames = []6writer = 07FL =["FL"]8DM = ["DM"]9FB1 = ["FB1"]10FB2 = ["FB2"]11FB3 = ["FB3"]12def runAlgo(folder, experiment):13 print folder14 print experiment15 path = os.path.join(folder, experiment)16 path = os.path.join(path, "keyFrames")17 imageFrames = []18 imageExtension = ".jpg"19 for image in os.listdir(path):20 if image.endswith(imageExtension):21 image = image[:-len(imageExtension)]22 imageFrames.append(int(image))23 imageFrames.sort()24 print "Keyframes:"25 print "\t" + str(imageFrames)26 shotIndex = 027 truePositive = 028 trueNegative = 029 falsePositive = 030 falseNegative = 031 shotScore = 032 for keyframe in imageFrames:33 for i in range(0, len(startFrames)-1):34 if keyframe >= startFrames[i] and keyframe <= startFrames[i+1]:35 shotIndex = i36 break37 # print "Keyframe " + str(keyframe)38 # print "Shot index " + str(shotIndex)39 # print "Start frame " + str(startFrames[shotIndex])40 # print "End frame " + str(endFrames[shotIndex])41 if keyframe >= startFrames[shotIndex] and keyframe <= endFrames[shotIndex]:42 truePositive = truePositive + 143 print "Adding " + str(keyframe)44 else:45 falsePositive = falsePositive + 146 for i in range(0, len(startFrames)):47 shotList = list(range(startFrames[i],endFrames[i]))48 # print "Start: " + str(startFrames[i])49 # print "End: " + str(endFrames[i])50 # print "ShotList " + str(shotList)51 exclusionList = set(shotList) - set(imageFrames)52 print "Exclusion " + str(exclusionList)53 falseNegative = falseNegative + len(exclusionList)54 commonSet = list(set(shotList).intersection(imageFrames))55 print "Common " + str(commonSet)56 if len(commonSet) > 0:57 shotScore = shotScore + 158 # break59 print "True Positive " + str(truePositive)60 print "False Positive " + str(falsePositive)61 print "False Negative " + str(falseNegative)62 shotScoreScaled = float(shotScore)/len(startFrames)63 print "Shot Score " + str(shotScoreScaled)64 if "FL" in experiment:65 FL.append("="+str(shotScore)+"/"+str(len(startFrames)))66 elif "DM" in experiment:67 DM.append("="+str(shotScore)+"/"+str(len(startFrames)))68 elif "FB1" in experiment:69 FB1.append("="+str(shotScore)+"/"+str(len(startFrames)))70 elif "FB2" in experiment:71 FB2.append("="+str(shotScore)+"/"+str(len(startFrames)))72 elif "FB3" in experiment:73 FB3.append("="+str(shotScore)+"/"+str(len(startFrames)))74if __name__ == "__main__":75 if len(sys.argv) < 4:76 print "\n\nUSAGE: calculateExtractiveSummaryAccuracy.py <shot/video/location> <keyframes/folder> <output/csv/file/path>\n\n"77 sys.exit()78 shotFolder = sys.argv[1]79 resultsFolder = sys.argv[2]80 outputFilePath = sys.argv[3]81 csvFile = open(outputFilePath, "wb")82 writer = csv.writer(csvFile, delimiter=",", quotechar='"', quoting=csv.QUOTE_ALL)83 rowData = ["Shot Path", shotFolder]84 writer.writerow(rowData)85 rowData = ["Keyframe folder", resultsFolder]86 writer.writerow(rowData)87 for file in os.listdir(shotFolder):88 if file.endswith(".avi"):89 file = file[:-4]90 splitData = file.split("-")91 startFrames.append(int(splitData[0]))92 endFrames.append(int(splitData[1]))93 startFrames.sort()94 print "Start times:"95 print "\t" + str(startFrames)96 endFrames.sort()97 print "End times:"98 print "\t" + str(endFrames)99 print "Start Frame count " + str(len(startFrames))100 print "End Frame count " + str(len(endFrames))101 print "Shot data ready"102 subFolders = [int(d[:-1]) for d in os.listdir(resultsFolder) if os.path.isdir(os.path.join(resultsFolder, d))]103 # print subFolders104 subFolders.sort()105 # print subFolders106 writer.writerow([""]+subFolders)107 for folder in subFolders:108 folderName = str(folder) + "%"109 folderPath = os.path.join(resultsFolder, folderName)110 experimentFolders = [d for d in os.listdir(folderPath) if os.path.isdir(os.path.join(folderPath, d))]111 print experimentFolders112 for experiment in experimentFolders:113 path = os.path.join(folderPath,experiment)114 path = os.path.join(path, "keyFrames")115 runAlgo(folderPath, experiment)116 writer.writerow(FL)117 writer.writerow(DM)118 writer.writerow(FB1)119 writer.writerow(FB2)...
views.py
Source:views.py
...28 return render(request, 'about.html') #the about page29def character(request):30 #return HttpResponse('character')31 return render(request, 'character.html')32def frames(request):33 #return HttpResponse('frames')34 return render(request,'frames.html')35def KanoHome(request):36 #return HttpResponse('Kano')37 KanoBasicFrames = KanoBasicAttack.objects.all()38 KanoStringFrames = KanoStringAttack.objects.all()39 KanoSpecialFrames = KanoSpecialAttack.objects.all()40 KanoRipperFrames = KanoRipperAttack.objects.all()41 KanoDirtbagFrames = KanoDirtbagAttack.objects.all()42 return render(request, 'KanoHome.html',43 {'KanoBasicFrames': KanoBasicFrames ,'KanoStringFrames': KanoStringFrames ,44 'KanoSpecialFrames': KanoSpecialFrames, 'KanoRipperFrames':KanoRipperFrames, 'KanoDirtbagFrames': KanoDirtbagFrames})45def NightwolfHome(request):46 #return HttpResponse('Nightwolf')...
vpxtest.py
Source:vpxtest.py
...9def main():10 camera = Camera(0, dict(width=640, height=480))11 clock = Clock(RATE)12 # capture frames as scv images13 frames = list(capture_frames(camera, clock, NUM_FRAMES))14 print '=== REALTIME ==='15 decoded_frames = run_test(frames, vpx.VPX_DL_REALTIME)16 playback(clock, decoded_frames)17 print '=== GOOD QUALITY ==='18 decoded_frames = run_test(frames, vpx.VPX_DL_GOOD_QUALITY)19 playback(clock, decoded_frames)20 print '=== BEST QUALITY ==='21 decoded_frames = run_test(frames, vpx.VPX_DL_BEST_QUALITY)22 playback(clock, decoded_frames)23def capture_frames(camera, clock, num_frames):24 for fno in xrange(num_frames):25 clock.tick()26 yield camera.getImage()27def playback(clock, frames):28 for img in frames:29 clock.tick()30 img.show()31def run_test(frames, deadline):32 # Encode frames33 start = time.time()34 w,h = frames[0].width, frames[0].height35 clip = M.Clip.encode(w,h,frames, deadline=deadline)36 elapsed = time.time() - start37 print '%d frames encoded in %fs, %.2f fps (avg) (%d kB)' % (...
VideoFrame.py
Source:VideoFrame.py
1"""Dummy Dataset and data loader for frames in single video."""2import os3import skvideo.io4import torch.utils.data as data5class VideoFrame(data.Dataset):6 """Dummy dataset for frames in single video."""7 def __init__(self, filepath, num_frames, transform=None):8 """Init VideoFrame dataset."""9 super(VideoFrame, self).__init__()10 self.filepath = filepath11 self.num_frames = num_frames12 self.transform = transform13 self.frames = None14 self.decode()15 def __getitem__(self, index):16 """Get frames from video."""17 frame = self.frames[index, ...]18 if self.transform is not None:19 frame = self.transform(frame)20 return frame21 def __len__(self):22 """Get number of the frames."""23 return self.num_frames24 def decode(self):25 """Decode frames from video."""26 if os.path.exists(self.filepath):27 try:28 self.frames = skvideo.io.vread(29 self.filepath, num_frames=self.num_frames)30 except AssertionError:31 self.frames = skvideo.io.vread(self.filepath)32 # return numpy.ndarray (N x H x W x C)33 self.frames = skvideo.utils.vshape(self.frames)34 self.num_frames = self.frames.shape[0]35 else:...
preprocessor.py
Source:preprocessor.py
1import json2import numpy as np3import seaborn as sns4import matplotlib.pylab as plt5# Raw frames are joined into sequences of 16 frames (about 2 seconds each)6# The sequences are then saved as samples for training7raw_data_folder=r"C:\chalmers_thesis\data\TurnAround_201956203615"8destination_data_folder=r"C:\training_data\turn_around"9frames=[]10prefix="\\frame"11frame_rate=1612num_doppler_bins=1613num_range_bins=6414start_index=115end_index=1250+start_index+frame_rate16for i in range(start_index,end_index):17 frames.append(np.nan_to_num(np.array(json.load(open(raw_data_folder+prefix+str(i)+".txt")),dtype="float")[0:frame_rate,18:82]))18 frames[-1]=((frames[-1])/(np.max(frames[-1]))).tolist()19for i in range(len(frames)-frame_rate):20 sequence=[]21 sequence.append(frames[i])22 sequence.append(frames[i+1])23 sequence.append(frames[i+2])24 sequence.append(frames[i+3])25 sequence.append(frames[i+4])26 sequence.append(frames[i+5])27 sequence.append(frames[i+6])28 sequence.append(frames[i+7])29 sequence.append(frames[i+8])30 sequence.append(frames[i+9])31 sequence.append(frames[i+10])32 sequence.append(frames[i+11])33 sequence.append(frames[i+12])34 sequence.append(frames[i+13])35 sequence.append(frames[i+14])36 sequence.append(frames[i+15])...
Using AI Code Generation
1 console.log(data);2});3wpt.getLocations(function(err, data) {4 console.log(data);5});6wpt.getTesters(function(err, data) {7 console.log(data);8});9wpt.getTesters(function(err, data) {10 console.log(data);11});12wpt.getTesters(function(err, data) {13 console.log(data);14});15wpt.getTesters(function(err, data) {16 console.log(data);17});18wpt.getTesters(function(err, data) {19 console.log(data);20});21wpt.getTesters(function(err, data) {22 console.log(data);23});24wpt.getTesters(function(err, data) {25 console.log(data);26});27wpt.getTesters(function(err, data) {28 console.log(data);29});30wpt.getTesters(function(err, data) {31 console.log(data);32});33wpt.getTesters(function(err, data) {34 console.log(data);35});36wpt.getTesters(function(err, data) {37 console.log(data);38});39wpt.getTesters(function(err, data) {40 console.log(data);41});
Using AI Code Generation
1var wpt = require('webpagetest');2var wpt = new WebPageTest('www.webpagetest.org');3wpt.runTest('www.google.com', {4 videoParams: {5 }6}, function (err, data) {7 if (err) return console.error(err);8 console.log('Test status:', data.statusText);9 console.log('Test ID:', data.data.testId);10 wpt.getTestResults(data.data.testId, function (err, data) {11 if (err) return console.error(err);12 console.log('Test completed:', data.data.completed);13 console.log('Test ID:', data.data.testId);14 console.log('Test URL:', data.data.summary);15 console.log('Test Location:', data.data.location);16 console.log('Test First View:', data.data.median.firstView);17 console.log('Test Repeat View:', data.data.median.repeatView);18 });19});20var wpt = require('webpagetest');21var wpt = new WebPageTest('www.webpagetest.org');22wpt.runTest('www.google.com', {23 videoParams: {24 }25}, function (err, data) {26 if (err) return console.error(err);27 console.log('Test status:', data.statusText);28 console.log('Test ID:', data.data.testId);29 wpt.getTestResults(data.data.testId, function (err, data) {30 if (err) return console.error(err);31 console.log('Test completed:', data.data.completed);32 console.log('Test ID:', data.data.testId);33 console.log('Test URL:', data.data.summary);34 console.log('Test Location:', data.data.location);35 console.log('Test First View:', data.data.median.firstView);36 console.log('Test Repeat View:', data.data.median.repeatView);37 });38});
Using AI Code Generation
1var wpt = require('webpagetest');2var wpt = new WebPageTest('www.webpagetest.org');3var options = {4};5wpt.runTest(url, options, function(err, data) {6 if (err) return console.error(err);7 console.log('Test ID: %s', data.data.testId);8 wpt.getTestResults(data.data.testId, function(err, data) {9 if (err) return console.error(err);10 console.log('Test completed');11 console.log('First View: %s', data.data.average.firstView.loadTime);12 console.log('Repeat View: %s', data.data.average.repeatView.loadTime);13 });14});
Using AI Code Generation
1var wpt = new WebPageTest('www.webpagetest.org', 'A.3a6a9f6c0b6d3a6c4a7a4e4c4e7c4a6');2 if (err) {3 console.log(err);4 } else {5 console.log('Test completed');6 console.log(data);7 console.log(data.data.runs[1].firstView.SpeedIndex);8 console.log(data.data.runs[1].firstView.TTFB);9 console.log(data.data.runs[1].firstView.fullyLoaded);10 console.log(data.data.runs[1].firstView.fullyLoaded);11 console.log(data.data.runs[1].firstView.TTFB);12 console.log(data.data.runs[1].firstView.fullyLoaded);13 console.log(data.data.runs[1].firstView.fullyLoaded);14 console.log(data.data.runs[1].firstView.TTFB);15 console.log(data.data.runs[1].firstView.fullyLoaded);16 }17});18 if (err) {19 console.log(err);20 } else {21 console.log('Test completed');22 console.log(data);23 console.log(data.data.runs[1].firstView.SpeedIndex);24 console.log(data.data.runs[1].firstView.TTFB);25 console.log(data.data.runs[1].firstView.fullyLoaded);26 }27});28wpt.getTestResults('140
Using AI Code Generation
1var wpt = require('webpagetest');2var wpt = new WebPageTest('www.webpagetest.org');3var options = {4 lighthouseConfig: {5 settings: {6 }7 }8};9wpt.runTest(url, options, function(err, data) {10 if (err) return console.error(err);11 var testId = data.data.testId;12 console.log('Test ID: ' + testId);13 wpt.getTestResults(testId, function(err, data) {14 if (err) return console.error(err);15 console.log('Speed Index: ' + data.data.average.firstView.SpeedIndex);16 console.log('First Meaningful Paint: ' + data.data.average.firstView.fMPFMP);17 console.log('First CPU Idle: ' + data.data.average.firstView.fCPUIlde);18 console.log('First Interactive: ' + data.data.average.firstView.fI);19 console.log('Estimated Input Latency: ' + data.data.average.firstView.eIL);20 console.log('Time to Interactive: ' + data.data.average.firstView.tTI);21 });22});23var wpt = require('webpagetest');24var wpt = new WebPageTest('www.webpagetest.org');25var options = {26 lighthouseConfig: {27 settings: {
Using AI Code Generation
1var location = 'Dulles:Chrome';2var runs = 1;3var timeout = 10000;4var pollInterval = 1000;5var firstViewOnly = false;6var pollResults = 1;7var video = 1;8var videoParams = {9};10wpt.runTest(url, {11}, function(err, data) {12 if (err) return console.error(err);13 console.log('Test Results for: ' + data.data.summary);14 console.log('View the test at: ' + data.data.userUrl);15 console.log('View the video at: ' + data.data.userUrl + '&video=1');16 console.log('View the waterfall at: ' + data.data.userUrl + '&waterfall=1');17});18#### new WebPageTest(url)19#### new WebPageTest(url, options)20#### new WebPageTest(url, options, auth)21#### new WebPageTest(url, options, auth, headers)22#### new WebPageTest(url, options, auth, headers, version)23#### new WebPageTest(url, options, auth, headers, version, debug)24#### new WebPageTest(url,
Using AI Code Generation
1var wpt = require('webpagetest');2var api = new wpt('www.webpagetest.org');3 if (err) {4 console.log(err);5 } else {6 console.log(data);7 }8});9var wpt = require('webpagetest');10var api = new wpt('www.webpagetest.org');11 if (err) {12 console.log(err);13 } else {14 console.log(data);15 }16});17var wpt = require('webpagetest');18var api = new wpt('www.webpagetest.org');19 if (err) {20 console.log(err);21 } else {22 console.log(data);23 }24});25var wpt = require('webpagetest');26var api = new wpt('www.webpagetest.org');27 if (err) {28 console.log(err);29 } else {30 console.log(data);31 }32});33var wpt = require('webpagetest');34var api = new wpt('www.webpagetest.org');35 if (err) {36 console.log(err);37 } else
Using AI Code Generation
1var wptdriver = require('wptdriver');2wptdriver.frames(function(err, frames) {3 if (err) {4 console.log(err);5 } else {6 console.log(frames);7 }8});9var wptdriver = require('wptdriver');10wptdriver.frames(function(err, frames) {11 if (err) {12 console.log(err);13 } else {14 console.log(frames[0].id);15 }16});17var wptdriver = require('wptdriver');18wptdriver.frames(function(err, frames) {19 if (err) {20 console.log(err);21 } else {22 console.log(frames[0].name);23 }24});25var wptdriver = require('wptdriver');26wptdriver.frames(function(err, frames) {27 if (err) {28 console.log(err);29 } else {30 console.log(frames[1].id);31 }32});33var wptdriver = require('wptdriver');34wptdriver.frames(function(err, frames) {35 if (err) {36 console.log(err);37 } else {38 console.log(frames[1].name);39 }40});41var wptdriver = require('wptdriver');42wptdriver.frames(function(err, frames) {43 if (err) {44 console.log(err);
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!