Best Python code snippet using tox_python
protocol_analysis.py
Source:protocol_analysis.py
1class Protocol:2 """3 ä¸ç§éç¨çç½ç»å议解ææ¹æ³,æ¯å¦tls,ä¸è¬æ ¼å¼ä¸ºï¼å
容é¿åº¦(åºå®åèæ°)+å
容ï¼4 ä¿®æ¹æ¥æ 202202115 """6 def __init__(self, struct: dict, content: bytes = None, strict=False):7 if content: # è¾å
¥å
容ï¼å¯¹å
容è¿è¡è§£æ8 # åå«è¿å解æçå
容以åå
容对åºçå±æ§9 self.parsed_content, self._parsed_content = self._parse(struct, content, strict)10 else: # 没æå
容ï¼æ ¹æ®structçæåè串11 self.generated_content = self.generate(struct)12 self.parsed_content = None # ä¸è½å°ï¼ç¨äºå¤ææ¯è§£æè¿æ¯çæåè串13 self.__prefix = "" # ç¨äºrepræ ¼å¼åæ¾ç¤º14 def __bool__(self):15 return bool(self.parsed_content)16 @classmethod17 def generate(cls, struct):18 """19 æ ¹æ®struct,çæåè串20 :param struct: è¾å
¥çåå
¸21 :return: è¾åºåè串22 """23 generated_content = b"" # æåçæçåè串24 length_num = None # æ ¹æ®å
容ç大å°ï¼çæå
容çé¿åº¦ï¼ç¨lengthNum个åè表示ãNone表示ä¸éè¦è®¡ç®é¿åº¦25 for k, v in struct.items():26 if isinstance(v, tuple):27 num, content = v # 对åºçåèæ°åå®é
å
容28 if content is None:29 if not isinstance(num, int):30 raise TypeError(f"ç±»åé误ï¼{k}对åºçå
ç»ç第ä¸ä½ï¼å¿
é¡»æ¯ä¸ä¸ªæ´æ°å¼ï¼æ¥éå®åèæ°")31 length_num = num # ä¸é¢å
容çé¿åº¦æªç¥ï¼ä¹ååæ·»å ,å
è®°ä¸ä½ç½®ååèæ°32 elif isinstance(content, bytes): # contentæ¯bytesç±»åï¼æ²¡æé®é¢ï¼å¯¹numè¿è¡éªè¯33 if num is not None:34 if isinstance(num, int):35 if len(content) != num:36 # numä¸æ¯None,è¯æå
容å
·æåºå®é¿åº¦,å æ¤å¯¹å
容é¿åº¦è¿è¡æ£éª37 raise ValueError(38 f"å¼é误ï¼{k} 对åºçå¼(å
ç»ç¬¬äºä½)çåèæ°åºè¯¥æ¯{num},å®é
åèæ°æ¯{len(content)}")39 else: # numä¸æ¯None,ä¹ä¸æ¯intç±»åï¼æé®é¢40 raise TypeError(f"ç±»åé误ï¼{k} éè¦éå®åèæ°,å¿
é¡»æ¯ä¸ä¸ªæ´æ°å¼")41 if length_num: # éè¦è¡¥ä¸æ¬å
容çé¿åº¦ï¼ç¨lengthNum个åèæ°è¡¨ç¤º42 if len(content) > 256 ** length_num - 1: # å
容é¿åº¦è¶
åºlengthNum个åèæ°è¡¨ç¤ºèå´43 raise ValueError(44 f"å
容è¶
åºéå¶ï¼{k} å
容(å
ç»ç¬¬äºä½)æ大å
许é¿åº¦æ¯{256 ** length_num - 1},å®é
å
容é¿åº¦æ¯{len(content)}")45 else:46 generated_content += bytes.fromhex(47 hex(len(content))[2:].zfill(length_num * 2)) # å
å ä¸æ¬æ¬¡å
容çé¿åº¦48 length_num = None # è¿ä¸ªåèçå
容已ç»ç¡®å®äºï¼lengthNumè¯å®æ¯None49 generated_content += content # å ä¸æ¬æ¬¡å
容50 else: # å¿
é¡»æ¯åèç±»åæè
None51 raise TypeError(f"ç±»åé误ï¼{k} 对åºçå¼(å
ç»ç¬¬äºä½)åºè¯¥æ¯å
容åºè¯¥æ¯bytesç±»åæè
æ¯None,å®é
ç±»åæ¯{type(content)}")52 elif isinstance(v, dict): # 对åºçå¼æ¯åå
¸53 content = cls.generate(v)54 if length_num: # éè¦å¡«å
ä¹åçé¿åº¦æ®µ55 if len(content) > 256 ** length_num - 1: # å
容é¿åº¦è¶
åºlengthNum个åèæ°è¡¨ç¤ºèå´56 raise ValueError(f"å
容è¶
åºéå¶ï¼{k} å
容æ大é¿åº¦æ¯{256 ** length_num - 1},å®é
å
容é¿åº¦æ¯{len(content)}")57 else:58 generated_content += bytes.fromhex(hex(len(content))[2:].zfill(num * 2)) # å
å ä¸æ¬æ¬¡å
容çé¿åº¦59 length_num = None # è¿ä¸ªåèçå
容已ç»ç¡®å®äºï¼lengthNumè¯å®æ¯None60 generated_content += content # å ä¸æ¬æ¬¡å
容61 else:62 raise TypeError(f"ç±»åé误ï¼{k} 对åºçåºè¯¥æ¯æ°ç»æè
åå
¸,è¾å
¥çç±»åæ¯{type(v)}")63 return generated_content64 @classmethod65 def _parse(cls, struct, content, strict=False, lastone=True):66 """67 å°åè串(content)解æ为å
·æä¸å®ç»æ(struct)çåå
¸,建议使ç¨match68 :param struct:åå
¸ï¼è¡¨ç¤ºåè®®çç»æ69 :param content: åè串å
容70 :param strict: ä¸¥æ ¼æ¨¡å¼ä¸ï¼éè¦æ ¹æ®åèå¼å¯¹å
容çå¼è¿è¡æ ¡éª71 :param lastone: æä¸å±ï¼åªè¿åéè¦çå
容,ä¸æ¯æä¸å±ï¼è¦è¿åæ´å¤åæ°ä¾ä¸å±ä½¿ç¨72 :return: è¿åcontent解æåçåå
¸ç»æ,以å带æ详ç»å
容çåå
¸ç»æ73 """74 try:75 if isinstance(content, bytes):76 parsed_content = {} # æåè¿åçcontent解æåçåå
¸ç»æ77 _parsed_content = {} # å
·æ详ç»çå±æ§ï¼ä½ä½¿ç¨ä¸æ¹ä¾¿ï¼ä¸»è¦ç¨äºæ¾ç¤º78 _length = hex(len(content))[2:] # ç¨åå
è¿å¶è¡¨ç¤ºå
容çé¿åº¦79 if len(_length) % 2 != 0: # åè串é¿åº¦ä¸å®æ¯8ä½çæ´é¤ï¼ä¸å®æ¯æå¶æ°ä¸ªåå
è¿å¶å¼80 _length = "0" + _length # è¡¥0ï¼å½¢ææ£ç¡®ç16è¿å¶81 _length = bytes.fromhex(_length) # 16è¿å¶åè串82 for k, v in struct.items():83 if isinstance(v, tuple):84 _num, _content_reference = v # æå çä½æ°ååèå¼ï¼å¦æstrict为True,ä¼æ ¹æ®åèå¼å¯¹å
容è¿è¡å¤æ85 if _num is None: # è¯æè¿ä¸ªé¿åº¦ä¸æ¯åºå®çï¼èæ¯ç±ä¹åçå¼æå®ç86 _length = int.from_bytes(_length, "big") # lengthæ¯ä¸ä¸ªåè串çå
容ï¼è¡¨ç¤ºè¿æ¬¡çå
容é¿åº¦87 parsed_content[k] = content[:_length]88 _parsed_content[k] = (content[:_length], _length)89 content = content[_length:]90 elif isinstance(_num, int):91 _length = _temp_content = content[:_num] # contentå¯è½æ¯ä¸ä¸ä¸ªç段çé¿åº¦,ä¿åå°_length92 if strict and _content_reference is not None: # ä¸¥æ ¼æ¨¡å¼ä¸ä¸æåèå¼çæ¶åï¼éè¦å¤æå
容æ¯å¦æ£ç¡®93 if isinstance(_content_reference, list): # åèå
容æ¯å表ï¼å¯¹åºçå¼åºè¯¥æ¯å
¶ä¸ä¸ä¸ª94 if _temp_content not in _content_reference:95 raise ValueError(96 f"å
容é误,{k} å
容åºè¯¥æ¯ {_content_reference} ä¸çä¸ä¸ª,å®é
å
å®¹æ¯ {_temp_content}")97 elif isinstance(_content_reference, bytes): # åèå
容ä¸æ¯å表èæ¯åä¸çåè串98 if _temp_content != _content_reference:99 raise ValueError(f"å
容é误,{k} å
容åºè¯¥æ¯ {_content_reference},å®é
å
å®¹æ¯ {_temp_content}")100 else:101 raise TypeError(f"ç»æé误,{k} çåèå¼ç±»ååºè¯¥æ¯å表ãåè串ï¼å®é
ç±»åæ¯ {type(_content_reference)}")102 parsed_content[k] = _temp_content # è®°å½å
容é¿åº¦åå
容103 _parsed_content[k] = (_temp_content, _num) # è®°å½å
容é¿åº¦åå
容104 content = content[_num:]105 else:106 raise TypeError(f"ç»æé误,{k} çé¿åº¦éå®å¼,å¿
é¡»æ¯ä¸ä¸ªæ´æ°å¼æè
æ¯None,å®é
æ¯{type(_num)}")107 elif isinstance(v, dict): # ä¸ä¸çº§åè®®æè
ä¹åé¿åº¦å¼æå
å«çå
容108 _length = int.from_bytes(_length, "big") # lengthæ¯ä¹åä¿åçæ¬ç段é¿åº¦ï¼è½¬åææ´æ°109 _pc, __pc, content = cls._parse(v, content[:_length], lastone=False)110 parsed_content[k] = _pc111 _parsed_content[k] = (__pc, _length)112 elif isinstance(v, list): # ä¸å®é¿å
ç´ ï¼ä¸å®å¨ææ«å°¾113 list_parsed_content = []114 _list_parsed_content = {}115 _ele_num = 0 # 表示解æåºçå个å
ç´ ï¼ä»0å¼å§è®¡æ°116 _clength = _length = len(content)117 if len(v) != 1 or not isinstance(v[0], dict):118 raise TypeError(f"ç»æé误,{k} åèå¼liståºè¯¥åªæä¸ä¸ªå
ç´ ï¼ä¸å
ç´ ç±»åæ¯åå
¸")119 while True:120 _pc, __pc, content = cls._parse(v[0], content, lastone=False)121 ll = len(content)122 list_parsed_content.append((_pc))123 _list_parsed_content[str(_ele_num)] = (__pc, _length - ll)124 _length = ll125 _ele_num += 1126 if not content:127 break128 parsed_content[k] = list_parsed_content129 _parsed_content[k] = (_list_parsed_content, _clength)130 else:131 raise TypeError(f"ç»æé误,{k} åèå¼çç±»ååºè¯¥æ¯dictãtupleãlistä¸çä¸ä¸ª,å®é
å
å®¹æ¯ {type(v)}")132 if lastone:133 return (parsed_content, _parsed_content)134 else:135 return (parsed_content, _parsed_content, content)136 else:137 raise TypeError(f"content 对åºçå¼åºè¯¥æ¯bytesç±»å,è¾å
¥çç±»åæ¯{type(content)}")138 except Exception:139 return (None, None)140 @classmethod141 def match(cls, struct, content, strict=False):142 """143 æ¥çåè串(content)æ¯å¦ç¬¦åstructçç»æ144 :param struct: åå
¸ï¼è¡¨ç¤ºåè®®çç»æ145 :param content: åè串å
容146 :param strict: ä¸¥æ ¼æ¨¡å¼ä¸ï¼éè¦æ ¹æ®åèå¼å¯¹å
容çå¼è¿è¡æ ¡éª147 :return: å¦æå¹é
è¿ååå
¸ï¼å¦æå¹é
失败ï¼è¿åFalse148 """149 try:150 parsed_content = cls._parse(struct, content, strict)[0]151 return parsed_content152 except Exception:153 return False154 def _to_string(self, _parsed_content):155 repr_str = ""156 self.__prefix += " "157 for k, v in _parsed_content.items():158 repr_str += f"\n{self.__prefix}{k}:"159 if isinstance(v[0], dict):160 repr_str += f"({v[1]}) {self._to_string(v[0])}"161 else:162 repr_str += f"({v[1]}) {v[0]}"163 self.__prefix = self.__prefix[2:]164 return repr_str165 def __repr__(self):166 return self._to_string(self._parsed_content)167 def __bytes__(self):...
metagenerate.py
Source:metagenerate.py
1from presenter import *2import wiki3import sys4from outbuffer import *5from visitor import *6from entities import SpecificEnabler, DeprecatedSpecificEnabler, Application, PrettyPrinter7import logging8from fidoc import FIdoc9def generate_page(dw, outpage, meta): 10 # out = FileBuffer(outfile)11 out = PageBuffer(dw, outpage)12 out << dw.heading(1, "Generated output from FIcontent's Meta-Structure")13 14 generated_content = []15 16 pp = PrettyPrinter()17 18 # Overall timeline of experiments19 #######################################20 21 generated_content += [22 ("Timeline of Experiments", ExperimentTimelinePresenter()),23 ]24 25 26 # Experiments per site27 #######################################28 29 sites = ["Zurich", "Brittany", "Lancaster", "Cologne", "Berlin", "Barcelona"]30 generated_content += [31 ("Experiments in %s" % s, ExperimentTimelinePresenter(s)) for s in sites32 ]33 34 # All tested scenarios35 #######################################36 37 generated_content += [38 ("All Tested Scenarios", ListPresenter(TestedScenariosVisitor(), pp.print_Scenario)),39 ]40 41 # All SEs and their relations42 #######################################43 44 generated_content += [(45 "Relations of %s SE" % se.get_name(),46 SEGraphPresenter(se, pp.dispatch)47 ) for se in meta.get_specific_enablers()48 ]49 50 # All SEs and their descriptions51 #######################################52 53 generated_content += [(54 "Description of %s SE" % se.get_name(),55 PropertyPresenter(se, '/spec/documentation/what-it-does')56 ) for se in meta.get_specific_enablers()57 ]58 # All SEs and their resources59 #######################################60 61 generated_content += [(62 "Resources of %s SE" % se.get_name(),63 ResourcesPresenter(dw, se, pp.dispatch)64 ) for se in meta.get_specific_enablers()65 ]66 # All SEs and their release cycle67 #######################################68 69 generated_content += [(70 "Release cycle of %s SE" % se.get_name(),71 ReleaseCyclePresenter(dw, se, pp.dispatch)72 ) for se in meta.get_specific_enablers()73 ]74 # Dependencies per scenario75 #######################################76 77 # v = ExperimentsVisitor()78 # v.visit(meta_structure)79 80 # experiments = list(set([(e.scenario, e.site) for e in v.result]))81 82 # Dependencies per scenario (only actual usage)83 # generated_content += [84 # ('Scenario "%s" on Site %s - USES' % e, DependencyPresenter(e[0], e[1], ['USES'])) for e in experiments85 # ]86 # Dependencies per scenario (actual and planned usage)87 # relations = ['USES', 'WILL USE', 'MAY USE']88 # generated_content += [89 # ('Scenario "%s" on Site %s - ALL' % e, DependencyPresenter(e[0], e[1], relations)) for e in experiments90 # ]91 92 # Enablers used in experiments93 # niceenabler = lambda e: e.identifier + ' ' + e.entity94 95 # experiments = v.result # [e for e in v.result if (e.site == "Barcelona") and (e.application.identifier == "Smart City Guide (Android App)")]96 # generated_content += [(97 # 'Enablers tested in Scenario "%s" on Site %s at %s' % (e.scenario, e.site, e.date),98 # ListPresenter(99 # EnablersTestedVisitor(e.application, ts = e.date),100 # niceenabler101 # )102 # ) for e in experiments103 # ]104 105 # GE Utilization106 #######################################107 108 generated_content += [(109 "Utilization of %s GE" % ge.get_name(),110 ListPresenter(UsedByVisitor(111 ge,112 follow_relations = ['USES'],113 collect_entities = [SpecificEnabler, DeprecatedSpecificEnabler, Application]114 ), pp.dispatch)115 ) for ge in meta.get_generic_enablers()116 ]117 118 119 # Overall Uptake of Generic Enablers120 #######################################121 122 generated_content += [123 ("Overall Uptake of Generic Enablers", UptakePresenter(pp.dispatch, hideunused=True))124 ]125 126 127 # FI-PPP SEis Usage and General Information128 #######################################129 col_fippp = ['name', 'owner', 'product', 'open-source', 'mode', 'last-update', 'next-update', 'assets', 'catalog']130 col_overview = ['name', 'owner', 'final-release']131 generated_content += [132 ("FI-PPP SEis Usage and General Information", CockpitPresenter(col_fippp, pp.dispatch)),133 ("Overview of FIcontent SEs", CockpitPresenter(col_overview, pp.dispatch, sort = ['name']))134 ]135 136 # SE Discovery Summary137 #######################################138 139 generated_content += [140 ("SE Discovery Summary", SummaryPresenter())141 ]142 143 # Incomplete/invalid SEis144 #######################################145 generated_content += [146 ("Incomplete and/or invalid SEs", ListPresenter(InvalidEntitiesVisitor('SE'), pp.dispatch))147 ]148 149 # GE Validation Survey150 #######################################151 # generated_content += [152 # ("GE Validation Survey", GESurveyPresenter())153 # ]154 155 # Roadmap Releases156 #######################################157 # releases = set([rel.get_name() for rel in meta.get_releases()])158 roadmaps = ['socialtv', 'smartcity', 'gaming', 'common']159 160 for rel in meta.get_releases():161 generated_content += [(162 "Roadmap %s - %s" % (road, rel.get_name()),163 RoadmapPresenter(dw, road, rel)164 ) for road in roadmaps165 ]166 167 #######################################168 # main generation loop169 #######################################170 171 for h, p in generated_content:172 logging.info('Generating -> %s ...' % h)173 p.present(meta)174 out << dw.heading(2, h)175 p.dump(out)176 out << ''177 178 logging.info("Flushing generated content ...")179 out.flush()180def generate_meta_information(fidoc, generatedpage):181 dw = fidoc.get_wiki()182 meta = fidoc.get_meta_structure()183 # pub = fidoc.get_publisher()184 185 if meta is None:186 logging.fatal("Invalid meta structure.")187 188 generate_page(dw, generatedpage, meta)189 190 191 192 193if __name__ == "__main__":194 195 import wikiconfig196 metapage = ":FIcontent:private:meta:"197 if len(sys.argv) > 1:198 metapage = sys.argv[1]199 generatedpage = ":FIcontent:private:meta:generated"200 if len(sys.argv) > 2:201 generatedpage = sys.argv[2]202 try:203 logging.info("Connecting to remote DokuWiki at %s" % wikiconfig.url)204 # dw = wiki.DokuWikiLocal(url, 'pages', 'media')205 dw = wiki.DokuWikiRemote(wikiconfig.url, wikiconfig.user, wikiconfig.passwd)206 skipchecks = [207 # tv208 # 'Content Similarity', 'Audio Fingerprinting',209 # city210 # 'Local Information', 'Recommendation Services',211 # gaming212 # 'Visual Agent Design', 'Augmented Reality - Marker Tracking', 'Networked Virtual Character',213 # common214 # 'POI Storage', 'Content Sharing'215 ]216 217 logging.info("Loading FIdoc object ...")218 fidoc = FIdoc(dw, skipchecks)219 220 generate_meta_information(fidoc, generatedpage)221 222 logging.info("Finished")223 224 except logging.FatalError:225 pass...
batch_functions.py
Source:batch_functions.py
1import numpy as np2from PIL import Image3import itertools4from tensorflow.keras.utils import Sequence5import numpy as np6class OCR_generator(Sequence):7 """Generator for the input data to the OCR model. We're also preparing 8 arrays for the CTC loss which are related to the output dimensions"""9 def __init__(self, base_generator, batch_size, char_to_lbl_dict,10 img_h , keras_augmentor, epoch_size=500, validation=False):11 """Inputs12 base_generator: the base trdg generator13 batch_size: number of examples fed to the NN simultaneously14 char_to_lbl_dict: mapping from character to its label (int number)15 img_h: we assume that the input here is already scaled to the correct height16 keras_augmentor: Keras augmentor to add more augmenting, the current base generator doesn'17 for example zoom, translate etc"""18 self.base_generator = base_generator19 self.batch_size = batch_size20 self.char_to_lbl_dict = char_to_lbl_dict21 self.img_h = img_h22 self.epoch_size = epoch_size23 self.validation = validation24 self.keras_augmentor = keras_augmentor25 # total number of unique characters26 self.num_chars = len(char_to_lbl_dict)27 def __len__(self):28 """Denotes the number of batches per epoch29 :return: number of batches per epoch """30 return self.epoch_size31 def __getitem__(self, index):32 """Generate one batch of data"""33 # stores the length (number of characters) of each word in a batch34 label_lens = np.zeros((self.batch_size),dtype=np.float32)35 # generate content for the batch as a list of lists36 generated_content = list(list(tup) for tup in itertools.islice(self.base_generator,self.batch_size))37 # preprocess the batch content38 generated_content, img_w, max_word_len_batch = \39 self.preprocess_batch_imgs(generated_content)40 # allocate the vectors for batch labels (integers for each character in a word)41 # and the padded + preprocessed images42 batch_labels = np.zeros((self.batch_size, max_word_len_batch),dtype=np.float32)43 batch_imgs = np.zeros((self.batch_size, img_w, self.img_h, 3),dtype=np.float32)44 # the number of time distributed values, or another words the length of the time axis in the output,45 # or equivalently the width of the image after convolutions. Needed to input in the CTC loss46 # each maxpooling halves the width dimension so in our model scaling is 1/4 with 2 maxpoolings47 t_dist_dim = int(img_w / 4)48 # we need to give it for every entry49 input_length = np.full((self.batch_size),t_dist_dim,dtype=np.float32)50 # fill the batch51 for batch_ind in range(self.batch_size):52 # get a new image and a the content word for it53 img_arr, word = generated_content[batch_ind]54 batch_imgs[batch_ind,:,:] = img_arr55 # the labels for each word, even if the max number of characters is say for example 1056 # and the word is just 5 characters, the first 5 positions are filled by the character labels57 # and the rest are whatever (zeros in our implementation), however in the real loss theyre ignored58 # because of the label_length input59 labels_arr = np.array([self.char_to_lbl_dict[char] for char in word])60 batch_labels[batch_ind,0:len(labels_arr)] = labels_arr61 label_lens[batch_ind] = len(word)62 # now the hacky part63 # keras requires in the loss function to y_pred and y_true to be the same shape64 # but the ctc losses use y_pred of shape (batchsize, tdistdim, num_chars) from NN65 # and batch_labels, input_length, label_lens which are the "y_true" but these are66 # different dimension so pack them to (batchsize, tdistdim, num_chars) and later67 # unpack in the loss to stop the whining.68 y_true = np.zeros((self.batch_size, t_dist_dim, self.num_chars),dtype=np.float32)69 y_true[:, 0:max_word_len_batch, 0] = batch_labels70 y_true[:, 0, 1] = label_lens71 y_true[:, 0, 2] = input_length72 if self.validation:73 # for validation we return slightly different things so we can do fancy74 # stuff at callback75 return batch_imgs, batch_labels, input_length, label_lens76 else: #return x, y for the model77 return batch_imgs, y_true78 def preprocess_batch_imgs(self,generated_content):79 """Function to do augmentations, padd images, return longest word len etc"""80 # check the largest image width and word len in the batch81 pil_images = [img for img, word in generated_content]82 max_width = max([img.size[0] for img in pil_images])83 max_word_len_batch = max([len(word) for img, word in generated_content])84 # expand img with to mod 4_ds so that the maxpoolings wil result into85 # well defined integer length for the mapped tdist dimension ("new width")86 if max_width % 4 == 0:87 img_w = max_width88 else:89 img_w = max_width + 4 - (max_width % 4)90 #augment batch images91 for batch_ind in range(self.batch_size):92 # pad the image width with to the largest (fixed) image width93 pil_img = pil_images[batch_ind]94 width, height = pil_img.size95 new_img = Image.new(pil_img.mode, (img_w, self.img_h), (255,255,255))96 new_img.paste(pil_img, ((img_w - width) // 2, 0))97 # convert to numpy array98 img_arr = np.array(new_img)99 100 #some additional augmentation101 img_arr = self.keras_augmentor.random_transform(img_arr)102 # scale with 255 so that the values are between 0 and 1103 # and save to batch, also transpose because the "time axis" is width104 generated_content[batch_ind][0] = img_arr.transpose((1,0,2)) / 255...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!