Best Python code snippet using gherkin-python
jack_parser.py
Source:jack_parser.py
1import os2import xml.etree.ElementTree as ET3number_of_tab_spaces = 04def match_token(input_file, output_file, token_type, token_text_list, peek_only=False):5 line = peek_line(input_file)6 line_root = ET.ElementTree(ET.fromstring(line)).getroot()7 file_token_type = line_root.tag8 file_token_text = line_root.text.strip()9 if not file_token_type == token_type or (len(token_text_list) != 0 and file_token_text not in token_text_list):10 return False11 token_str = ET.tostring(line_root).decode()12 if not peek_only:13 print_token(output_file, token_str)14 input_file.readline()15 return True16def match_next_token(input_file, token_type, token_text_list):17 line = peek_next_line(input_file)18 line_root = ET.ElementTree(ET.fromstring(line)).getroot()19 file_token_type = line_root.tag20 file_token_text = line_root.text.strip()21 if not file_token_type == token_type or (len(token_text_list) != 0 and file_token_text not in token_text_list):22 return False23 return True24def peek_line(f):25 pos = f.tell()26 line = f.readline()27 f.seek(pos)28 return line29def peek_next_line(f):30 pos = f.tell()31 f.readline()32 line = f.readline()33 f.seek(pos)34 return line35def print_token(output_file, token):36 formatted_token = "{spaces}{token}\n".format(spaces=" " * number_of_tab_spaces, token=token)37 output_file.write(formatted_token)38def parse_directory(directory_to_tokenize):39 if os.path.isdir(directory_to_tokenize):40 for file in os.listdir(directory_to_tokenize):41 if file.endswith("_T.xml"):42 print("converting: " + directory_to_tokenize + '\\' + file)43 parse_file(directory_to_tokenize + '\\' + file)44 else:45 print("no such directory {}".format(directory_to_tokenize))46def parse_file(file_to_tokenize_path_name):47 output_file_path_name = file_to_tokenize_path_name[:-6] + "_test.xml"48 with open(file_to_tokenize_path_name, 'r') as input_file:49 with open(output_file_path_name, 'w') as output_file:50 input_file.readline()51 global number_of_tab_spaces52 number_of_tab_spaces = 053 parse_class(input_file, output_file)54def parse_class(input_file, output_file):55 is_class = match_token(input_file, output_file, "keyword", ["class"], True)56 if is_class:57 print_token(output_file, "<class>")58 global number_of_tab_spaces59 number_of_tab_spaces += 160 match_token(input_file, output_file, "keyword", ["class"])61 parse_class_name(input_file, output_file)62 match_token(input_file, output_file, "symbol", ["{"])63 parse_class_var_dec(input_file, output_file)64 parse_sub_routine_dec(input_file, output_file)65 match_token(input_file, output_file, "symbol", "}")66 number_of_tab_spaces -= 167 print_token(output_file, "</class>")68def parse_class_var_dec(input_file, output_file):69 is_class_var_dec = match_token(input_file, output_file, "keyword", ["static", "field"], True)70 if is_class_var_dec:71 print_token(output_file, "<classVarDec>")72 global number_of_tab_spaces73 number_of_tab_spaces += 174 match_token(input_file, output_file, "keyword", ["static", "field"])75 parse_var(input_file, output_file)76 number_of_tab_spaces -= 177 print_token(output_file, "</classVarDec>")78 parse_class_var_dec(input_file, output_file)79def parse_type(input_file, output_file):80 if match_token(input_file, output_file, "keyword", ["int", "char", "boolean"]):81 return True82 elif match_token(input_file, output_file, "identifier", []):83 return True84 return False85def parse_sub_routine_dec(input_file, output_file):86 is_class_sub_routine_dec = match_token(input_file, output_file, "keyword", ["constructor", "function", "method"],87 True)88 if is_class_sub_routine_dec:89 print_token(output_file, "<subroutineDec>")90 global number_of_tab_spaces91 number_of_tab_spaces += 192 match_token(input_file, output_file, "keyword", ["constructor", "function", "method"])93 if not parse_type(input_file, output_file):94 match_token(input_file, output_file, "keyword", ["void"])95 parse_sub_routine_name(input_file, output_file)96 match_token(input_file, output_file, "symbol", ["("])97 parse_parameter_list(input_file, output_file)98 match_token(input_file, output_file, "symbol", [")"])99 parse_sub_routine_body(input_file, output_file)100 number_of_tab_spaces -= 1101 print_token(output_file, "</subroutineDec>")102 parse_sub_routine_dec(input_file, output_file)103def parse_parameter_list(input_file, output_file):104 print_token(output_file, "<parameterList>")105 global number_of_tab_spaces106 number_of_tab_spaces += 1107 parse_type(input_file, output_file)108 parse_var_name(input_file, output_file)109 while match_token(input_file, output_file, "symbol", [","]):110 parse_type(input_file, output_file)111 parse_var_name(input_file, output_file)112 number_of_tab_spaces -= 1113 print_token(output_file, "</parameterList>")114def parse_sub_routine_body(input_file, output_file):115 is_sub_routine_body = match_token(input_file, output_file, "symbol", ["{"], True)116 if is_sub_routine_body:117 print_token(output_file, "<subroutineBody>")118 global number_of_tab_spaces119 number_of_tab_spaces += 1120 match_token(input_file, output_file, "symbol", ["{"])121 parse_var_dec(input_file, output_file)122 parse_statements(input_file, output_file)123 match_token(input_file, output_file, "symbol", ["}"])124 number_of_tab_spaces -= 1125 print_token(output_file, "</subroutineBody>")126def parse_var_dec(input_file, output_file):127 is_var_dec = match_token(input_file, output_file, "keyword", ["var"], True)128 if is_var_dec:129 print_token(output_file, "<varDec>")130 global number_of_tab_spaces131 number_of_tab_spaces += 1132 match_token(input_file, output_file, "keyword", ["var"])133 parse_var(input_file, output_file)134 number_of_tab_spaces -= 1135 print_token(output_file, "</varDec>")136 parse_var_dec(input_file, output_file)137def parse_class_name(input_file, output_file):138 if match_token(input_file, output_file, "identifier", []):139 return True140 return False141def parse_sub_routine_name(input_file, output_file):142 if match_token(input_file, output_file, "identifier", []):143 return True144 return False145def parse_var_name(input_file, output_file):146 if match_token(input_file, output_file, "identifier", []):147 return True148 return False149def parse_statements(input_file, output_file):150 print_token(output_file, "<statements>")151 global number_of_tab_spaces152 number_of_tab_spaces += 1153 parse_statement(input_file, output_file)154 number_of_tab_spaces -= 1155 print_token(output_file, "</statements>")156def parse_statement(input_file, output_file):157 is_statement = parse_let_statement(input_file, output_file) \158 or parse_if_statement(input_file, output_file) \159 or parse_while_statement(input_file, output_file) \160 or parse_do_statement(input_file, output_file) \161 or parse_return_statement(input_file, output_file)162 if not is_statement:163 return164 parse_statement(input_file, output_file)165def parse_let_statement(input_file, output_file):166 is_let_statement = match_token(input_file, output_file, "keyword", ["let"], True)167 if is_let_statement:168 print_token(output_file, "<letStatement>")169 global number_of_tab_spaces170 number_of_tab_spaces += 1171 match_token(input_file, output_file, "keyword", ["let"])172 parse_var_name(input_file, output_file)173 if match_token(input_file, output_file, "symbol", ["["]):174 parse_expression(input_file, output_file)175 match_token(input_file, output_file, "symbol", ["]"])176 match_token(input_file, output_file, "symbol", ["="])177 parse_expression(input_file, output_file)178 match_token(input_file, output_file, "symbol", [";"])179 number_of_tab_spaces -= 1180 print_token(output_file, "</letStatement>")181 return True182 return False183def parse_if_statement(input_file, output_file):184 is_if_statement = match_token(input_file, output_file, "keyword", ["if"], True)185 if is_if_statement:186 print_token(output_file, "<ifStatement>")187 global number_of_tab_spaces188 number_of_tab_spaces += 1189 match_token(input_file, output_file, "keyword", ["if"])190 match_token(input_file, output_file, "symbol", ["("])191 parse_expression(input_file, output_file)192 match_token(input_file, output_file, "symbol", [")"])193 match_token(input_file, output_file, "symbol", ["{"])194 parse_statements(input_file, output_file)195 match_token(input_file, output_file, "symbol", ["}"])196 if match_token(input_file, output_file, "keyword", ["else"]):197 match_token(input_file, output_file, "symbol", ["{"])198 parse_statements(input_file, output_file)199 match_token(input_file, output_file, "symbol", ["}"])200 number_of_tab_spaces -= 1201 print_token(output_file, "</ifStatement>")202 return True203 return False204def parse_while_statement(input_file, output_file):205 is_while_statement = match_token(input_file, output_file, "keyword", ["while"], True)206 if is_while_statement:207 print_token(output_file, "<whileStatement>")208 global number_of_tab_spaces209 number_of_tab_spaces += 1210 match_token(input_file, output_file, "keyword", ["while"])211 match_token(input_file, output_file, "symbol", ["("])212 parse_expression(input_file, output_file)213 match_token(input_file, output_file, "symbol", [")"])214 match_token(input_file, output_file, "symbol", ["{"])215 parse_statements(input_file, output_file)216 match_token(input_file, output_file, "symbol", ["}"])217 number_of_tab_spaces -= 1218 print_token(output_file, "</whileStatement>")219 return True220 return False221def parse_do_statement(input_file, output_file):222 is_do_statement = match_token(input_file, output_file, "keyword", ["do"], True)223 if is_do_statement:224 print_token(output_file, "<doStatement>")225 global number_of_tab_spaces226 number_of_tab_spaces += 1227 match_token(input_file, output_file, "keyword", ["do"])228 parse_sub_routine_call(input_file, output_file)229 match_token(input_file, output_file, "symbol", [";"])230 number_of_tab_spaces -= 1231 print_token(output_file, "</doStatement>")232 return True233 return False234def parse_return_statement(input_file, output_file):235 is_return_statement = match_token(input_file, output_file, "keyword", ["return"], True)236 if is_return_statement:237 print_token(output_file, "<returnStatement>")238 global number_of_tab_spaces239 number_of_tab_spaces += 1240 match_token(input_file, output_file, "keyword", ["return"])241 if not match_token(input_file, output_file, "symbol", [";"]):242 parse_expression(input_file, output_file)243 match_token(input_file, output_file, "symbol", [";"])244 number_of_tab_spaces -= 1245 print_token(output_file, "</returnStatement>")246 return True247 return False248def parse_expression(input_file, output_file):249 print_token(output_file, "<expression>")250 global number_of_tab_spaces251 number_of_tab_spaces += 1252 parse_term(input_file, output_file)253 while parse_op(input_file, output_file):254 parse_term(input_file, output_file)255 number_of_tab_spaces -= 1256 print_token(output_file, "</expression>")257 return True258def parse_term(input_file, output_file):259 print_token(output_file, "<term>")260 global number_of_tab_spaces261 number_of_tab_spaces += 1262 is_parsed = match_token(input_file, output_file, "integerConstant", [])263 if not is_parsed:264 is_parsed = match_token(input_file, output_file, "stringConstant", [])265 if not is_parsed:266 is_parsed = parse_keyword_constant(input_file, output_file)267 if not is_parsed:268 if match_token(input_file, output_file, "identifier", [], True):269 if match_next_token(input_file, "symbol", ["("]) or match_next_token(input_file, "symbol", ["."]):270 is_parsed = parse_sub_routine_call(input_file, output_file)271 else:272 is_parsed = parse_term_var_name(input_file, output_file)273 else:274 is_parsed = False275 if not is_parsed and match_token(input_file, output_file, "symbol", ["("]):276 parse_expression(input_file, output_file)277 match_token(input_file, output_file, "symbol", [")"])278 is_parsed = True279 if not is_parsed and parse_unary_op(input_file, output_file):280 parse_term(input_file, output_file)281 number_of_tab_spaces -= 1282 print_token(output_file, "</term>")283 return True284def parse_sub_routine_call(input_file, output_file):285 if match_next_token(input_file, "symbol", ["("]):286 parse_sub_routine_name(input_file, output_file)287 match_token(input_file, output_file, "symbol", ["("])288 parse_expression_list(input_file, output_file)289 match_token(input_file, output_file, "symbol", [")"])290 elif match_next_token(input_file, "symbol", ["."]):291 match_token(input_file, output_file, "identifier", [])292 match_token(input_file, output_file, "symbol", ["."])293 parse_sub_routine_name(input_file, output_file)294 match_token(input_file, output_file, "symbol", ["("])295 parse_expression_list(input_file, output_file)296 match_token(input_file, output_file, "symbol", [")"])297 return True298def parse_expression_list(input_file, output_file):299 print_token(output_file, "<expressionList>")300 global number_of_tab_spaces301 number_of_tab_spaces += 1302 if not match_token(input_file, output_file, "symbol", [")"], True):303 parse_expression(input_file, output_file)304 while match_token(input_file, output_file, "symbol", [","]):305 parse_expression(input_file, output_file)306 number_of_tab_spaces -= 1307 print_token(output_file, "</expressionList>")308 return True309def parse_op(input_file, output_file):310 match_token(input_file, output_file, "symbol", ["<"], True)311 return match_token(input_file, output_file, "symbol", ["+"]) \312 or match_token(input_file, output_file, "symbol", ["-"]) \313 or match_token(input_file, output_file, "symbol", ["*"]) \314 or match_token(input_file, output_file, "symbol", ["/"]) \315 or match_token(input_file, output_file, "symbol", ["&"]) \316 or match_token(input_file, output_file, "symbol", ["|"]) \317 or match_token(input_file, output_file, "symbol", ["<"]) \318 or match_token(input_file, output_file, "symbol", [">"]) \319 or match_token(input_file, output_file, "symbol", ["="])320def parse_unary_op(input_file, output_file):321 return match_token(input_file, output_file, "symbol", ["-"]) \322 or match_token(input_file, output_file, "symbol", ["~"])323def parse_keyword_constant(input_file, output_file):324 return match_token(input_file, output_file, "keyword", ["true"]) \325 or match_token(input_file, output_file, "keyword", ["false"]) \326 or match_token(input_file, output_file, "keyword", ["null"]) \327 or match_token(input_file, output_file, "keyword", ["this"])328def parse_term_var_name(input_file, output_file):329 if not parse_var_name(input_file, output_file):330 return False331 if match_token(input_file, output_file, "symbol", ["["]):332 parse_expression(input_file, output_file)333 match_token(input_file, output_file, "symbol", ["]"])334 return True335def parse_var(input_file, output_file):336 parse_type(input_file, output_file)337 parse_var_name(input_file, output_file)338 while match_token(input_file, output_file, "symbol", [","], True):339 match_token(input_file, output_file, "symbol", [","])340 parse_var_name(input_file, output_file)341 match_token(input_file, output_file, "symbol", [";"])...
compiler.py
Source:compiler.py
...127 def next_token(self):128 t = self.token129 self.token = self.lexer.next_token()130 return t131 def match_token(self, type):132 if type != self.token.type:133 error(self.token, "unexpected token %r (should probably be %r)" % (self.token.type, type))134 return self.next_token()135 def parse(self):136 self.enum = {}137 prog = Node("program")138 self.next_token()139 while self.token.type:140 if self.token.type == "enum":141 self.next_token()142 self.match_token("{")143 i = 0144 while self.token.type != "}":145 n = self.token.name146 self.match_token("id")147 if self.token.type == "=":148 self.next_token()149 i = self.match_token("number").number150 self.enum[n] = i151 i += 1152 if self.token.type == ",":153 self.next_token()154 continue155 break156 self.match_token("}")157 self.match_token(";")158 # read type and name159 t = self.match_token("id")160 n = self.match_token("id")161 # function162 if self.token.type == "(":163 for k in prog.kids:164 if k.type == "func" and k.name == n.name:165 error(n, "redefinition of function %r" % n.name)166 self.next_token()167 node = Node("func")168 if t.name not in ("void", "int"): error(t, "invalid type")169 node.t = t.name170 node.name = n.name171 # TODO: arguments172 self.match_token(")")173 if self.token.type != "{": error(self.token, "no function block")174 node.add(self.stmt())175 prog.add(node)176 continue177 # variable178 if t.name not in ("int"): error(t, "invalid type")179 for k in prog.kids:180 if k.type == "var" and k.name == n.name:181 error(n, "redefinition of variable %r" % n.name)182 node = Node("var")183 node.t = t.name184 node.name = n.name185 self.match_token(";")186 prog.add(node)187 continue188 return prog189 def stmt(self):190 if self.token.type == "return":191 node = Node("return")192 node.pos = self.token.pos193 self.next_token()194 if self.token.type != ";": node.add(self.expr())195 self.match_token(";")196 return node197 elif self.token.type == "for":198 self.next_token()199 # fake for loop by using a while loop200 node = Node("block")201 self.match_token("(")202 if self.token.type != ";":203 node.add(self.expr())204 self.match_token(";")205 loop = Node("while")206 node.add(loop)207 if self.token.type != ";":208 cond = self.expr()209 else:210 cond = Node("number")211 cond.number = 1212 self.match_token(";")213 loop.add(cond)214 if self.token.type != ")":215 inc = self.expr()216 else:217 inc = None218 self.match_token(")")219 body = Node("block")220 loop.add(body)221 body.add(self.stmt())222 if inc: body.add(inc)223 return node224 elif self.token.type == "while":225 self.next_token()226 node = Node("while")227 self.match_token("(")228 node.add(self.expr())229 self.match_token(")")230 node.add(self.stmt())231 return node232 elif self.token.type == "if":233 self.next_token()234 node = Node("if")235 self.match_token("(")236 node.add(self.expr())237 self.match_token(")")238 node.add(self.stmt())239 if self.token.type == "else":240 self.next_token()241 node.add(self.stmt())242 return node243 elif self.token.type == "{":244 self.next_token()245 node = Node("block")246 while self.token.type != "}": node.add(self.stmt())247 self.next_token()248 return node249 else:250 node = self.expr()251 self.match_token(";")252 return node253 def expr(self, level=0):254 # unary stuff255 if self.token.type == "number":256 node = Node("number")257 node.number = self.next_token().number258 elif self.token.type == "+":259 self.next_token()260 node = self.expr(99)261 elif self.token.type == "-":262 self.next_token()263 node = Node("neg").add(self.expr(99))264 elif self.token.type == "(":265 self.next_token()266 node = self.expr()267 self.match_token(")")268 elif self.token.type == "!":269 self.next_token()270 node = Node("not").add(self.expr(99))271 elif self.token.type == "id":272 t = self.next_token()273 if self.token.type == "(":274 self.next_token()275 node = Node("call")276 node.name = t.name277 node.pos = t.pos278 # TODO: args279 self.match_token(")")280 else:281 if t.name in self.enum:282 node = Node("number")283 node.number = self.enum[t.name]284 else:285 node = Node("use")286 node.pos = t.pos287 node.name = t.name288 else:289 error(self.token, "bad expression %r" % self.token.type)290 # infix stuff291 while self.token.type in INFIX_LEVEL and INFIX_LEVEL[self.token.type] >= level:292 tok = self.token.type293 self.next_token()...
Parser.py
Source:Parser.py
...116 # skip comment117 if self.token.type == Token_Type.COMMENT.name:118 self.fetch_token()119 print("-----Exit FetchToken-----")120 def match_token(self, ob):121 print("-----Enter MatchToken-----")122 if self.token.type != ob:123 syntax_error(2, sb=self.token.type, ob=ob)124 print("-----Exit MatchToken-----")125 return False126 print("*****MatchToken " + ob + "*****")127 print("-----Exit MatchToken-----")128 return True129 def program(self):130 print("-----Enter Program-----")131 while self.token.type != Token_Type.NONTOKEN.name:132 self.statement()133 # end with ';'134 self.match_token(Token_Type.SEMICO.name)135 self.fetch_token()136 print("-----Exit Program-----")137 def statement(self):138 print("-----Enter Statement-----")139 if self.token.type == Token_Type.ORIGIN.name:140 self.origin_statement()141 elif self.token.type == Token_Type.SCALE.name:142 self.scale_statement()143 elif self.token.type == Token_Type.ROT.name:144 self.rot_statement()145 elif self.token.type == Token_Type.FOR.name:146 self.for_statement()147 elif self.token.type == Token_Type.COLOR.name:148 self.color_statement()149 else:150 syntax_error(3)151 print("-----Exit Statement-----")152 def origin_statement(self):153 print("-----Enter OriginStatement-----")154 self.match_token(Token_Type.ORIGIN.name)155 self.fetch_token()156 self.match_token(Token_Type.IS.name)157 self.fetch_token()158 self.match_token(Token_Type.L_BRACKET.name)159 self.fetch_token()160 tmp_ptr = self.expression()161 print("--------------------------------------------------")162 print_tree(tmp_ptr)163 print("--------------------------------------------------")164 x = get_expr_value(tmp_ptr)165 self.match_token(Token_Type.COMMA.name)166 self.fetch_token()167 tmp_ptr = self.expression()168 print("--------------------------------------------------")169 print_tree(tmp_ptr)170 print("--------------------------------------------------")171 y = get_expr_value(tmp_ptr)172 self.match_token(Token_Type.R_BRACKET.name)173 self.fetch_token()174 set_origin(x, y)175 print("-----Exit OriginStatement-----")176 def scale_statement(self):177 print("-----Enter ScaleStatement-----")178 self.match_token(Token_Type.SCALE.name)179 self.fetch_token()180 self.match_token(Token_Type.IS.name)181 self.fetch_token()182 self.match_token(Token_Type.L_BRACKET.name)183 self.fetch_token()184 tmp_ptr = self.expression()185 print("--------------------------------------------------")186 print_tree(tmp_ptr)187 print("--------------------------------------------------")188 x = get_expr_value(tmp_ptr)189 self.match_token(Token_Type.COMMA.name)190 self.fetch_token()191 tmp_ptr = self.expression()192 print("--------------------------------------------------")193 print_tree(tmp_ptr)194 print("--------------------------------------------------")195 y = get_expr_value(tmp_ptr)196 self.match_token(Token_Type.R_BRACKET.name)197 self.fetch_token()198 set_scale(x, y)199 print("-----Exit ScaleStatement-----")200 def rot_statement(self):201 print("-----Enter RotStatement-----")202 self.match_token(Token_Type.ROT.name)203 self.fetch_token()204 self.match_token(Token_Type.IS.name)205 self.fetch_token()206 tmp_ptr = self.expression()207 print("--------------------------------------------------")208 print_tree(tmp_ptr)209 print("--------------------------------------------------")210 x = get_expr_value(tmp_ptr)211 # self.fetch_token()212 set_rot(x)213 print("-----Exit RotStatement-----")214 def for_statement(self):215 print("-----Enter ForStatement-----")216 self.match_token(Token_Type.FOR.name)217 self.fetch_token()218 self.match_token(Token_Type.T.name)219 self.fetch_token()220 self.match_token(Token_Type.FROM.name)221 self.fetch_token()222 start_ptr = self.expression()223 print("--------------------------------------------------")224 print_tree(start_ptr)225 print("--------------------------------------------------")226 start = get_expr_value(start_ptr)227 self.match_token(Token_Type.TO.name)228 self.fetch_token()229 end_ptr = self.expression()230 print("--------------------------------------------------")231 print_tree(end_ptr)232 print("--------------------------------------------------")233 end = get_expr_value(end_ptr)234 self.match_token(Token_Type.STEP.name)235 self.fetch_token()236 step_ptr = self.expression()237 print("--------------------------------------------------")238 print_tree(step_ptr)239 print("--------------------------------------------------")240 step = get_expr_value(step_ptr)241 self.match_token(Token_Type.DRAW.name)242 self.fetch_token()243 self.match_token(Token_Type.L_BRACKET.name)244 self.fetch_token()245 x_ptr = self.expression()246 print("--------------------------------------------------")247 print_tree(x_ptr)248 print("--------------------------------------------------")249 self.match_token(Token_Type.COMMA.name)250 self.fetch_token()251 y_ptr = self.expression()252 print("--------------------------------------------------")253 print_tree(y_ptr)254 print("--------------------------------------------------")255 self.match_token(Token_Type.R_BRACKET.name)256 self.fetch_token()257 draw_loop(start, end, step, x_ptr, y_ptr)258 print("-----Exit ForStatement-----")259 def color_statement(self):260 print("-----Enter ColorStatement-----")261 self.match_token(Token_Type.COLOR.name)262 self.fetch_token()263 self.match_token(Token_Type.IS.name)264 self.fetch_token()265 self.match_token(Token_Type.SP_COLOR.name)266 set_color(self.token.lexeme)267 self.fetch_token()268 print("-----Exit ColorStatement-----")269 def expression(self):270 print("-----Enter Expression-----")271 left = self.term()272 while self.token.type == Token_Type.PLUS.name or self.token.type == Token_Type.MINUS.name:273 token_tmp = self.token.type274 self.match_token(token_tmp)275 right = self.term()276 left = ExprNode(token_tmp, lnode=left, rnode=right)277 print("-----Exit Expression-----")278 return left279 def term(self):280 print("-----Enter Term-----")281 left = self.factor()282 while self.token.type == Token_Type.MUL.name or self.token.type == Token_Type.DIV.name:283 token_tmp = self.token.type284 self.match_token(token_tmp)285 self.fetch_token()286 right = self.factor()287 left = ExprNode(token_tmp, lnode=left, rnode=right)288 print("-----Exit Term-----")289 return left290 def factor(self):291 print("-----Enter Factor-----")292 if self.token.type == Token_Type.PLUS.name or self.token.type == Token_Type.MINUS.name:293 token_tmp = self.token.type294 self.match_token(token_tmp)295 left = ExprNode(Token_Type.CONST_ID.name, 0)296 self.fetch_token()297 right = self.factor()298 res = ExprNode(token_tmp, lnode=left, rnode=right)299 print("-----Exit Factor-----")300 return res301 else:302 res = self.component()303 print("-----Exit Factor-----")304 return res305 def component(self):306 print("-----Enter Component-----")307 left = self.atom()308 self.fetch_token()309 while self.token.type == Token_Type.POWER.name:310 token_tmp = self.token.type311 self.match_token(token_tmp)312 self.fetch_token()313 right = self.component()314 left = ExprNode(token_tmp, lnode=left, rnode=right)315 print("-----Exit Component-----")316 return left317 def atom(self):318 print("-----Enter Atom-----")319 if self.token.type == Token_Type.CONST_ID.name:320 print("leaf: " + str(self.token.value))321 print("-----Exit Atom-----")322 return ExprNode(self.token.type, self.token.value) # leaf323 elif self.token.type == Token_Type.T.name:324 print("leaf: " + self.token.type)325 print("-----Exit Atom-----")326 return ExprNode(self.token.type, self.token.value) # leaf327 elif self.token.type == Token_Type.FUNC.name:328 token_tmp = self.token.type329 func_tmp = self.token.func330 self.fetch_token()331 self.match_token(Token_Type.L_BRACKET.name)332 self.fetch_token()333 left = self.expression()334 self.match_token(Token_Type.R_BRACKET.name)335 print("-----Exit Atom-----")336 return ExprNode(token_tmp, lnode=left, func=func_tmp)337 elif self.token.type == Token_Type.L_BRACKET:338 self.match_token(Token_Type.L_BRACKET.name)339 self.fetch_token()340 left = self.expression()341 self.match_token(Token_Type.R_BRACKET.name)342 print("-----Exit Atom-----")343 return left344if __name__ == '__main__':345 # init the parser346 p = Parser("test.txt")347 # run the parser348 p.start()349 plt.xlim(0)350 plt.ylim(0)...
RuleOutput.py
Source:RuleOutput.py
1# defines a RuleOutput class2# and its subclasses3# these perform singular operators on Ingredient objects45from .MyNumber import MyNumber6from .Ingredient import Ingredient7from .FormatCheck import listOfStrings, listOfTupleStringBools89class RuleOutput(object):10 def __init__(self):11 raise Exception("creating an instance of RuleOutput base class is forbidden")12 @staticmethod13 def check_conformity(ig,match_token,match_token_length):14 # check that an input to apply makes sense15 if not isinstance(ig,Ingredient):16 raise TypeError("Expected ig to be an Ingredient")17 if type(match_token) != tuple:18 raise TypeError("Expected tuple")19 if type(match_token_length) != int:20 raise TypeError("Expected int")21 if len(match_token) != match_token_length:22 raise ValueError("Expected tuple of length "+str(match_token_length))23 if not all(map(lambda x: type(x) == int, match_token)):24 raise TypeError("Expected tuple filled with ints")2526class NoneRuleOutput(RuleOutput):27 def __init__(self):28 pass29 @staticmethod30 def apply(ig,match_token):31 return ig32 @staticmethod33 def priority():34 return 13536NoneRuleOutputInstance = NoneRuleOutput()3738class RenamingRuleOutput(RuleOutput):39 def __init__(self,output_name):40 if output_name is not None:41 listOfStrings(output_name)42 self.output_name = output_name43 def apply(self,ig,match_token):44 if self.output_name is None:45 return ig46 # match token must come from SinglePattern47 self.check_conformity(ig,match_token,2)48 output = ig.duplicate()49 if match_token[0] == 1:50 # editing unit51 output.unit = output.unit[:max(0,match_token[1])] + self.output_name52 else:53 #editing name54 output.name = output.name[:max(0,match_token[1])] + self.output_name55 return output56 @staticmethod57 def priority():58 return 25960class PrefixingRuleOutput(RuleOutput):61 def __init__(self,prefix):62 listOfStrings(prefix)63 self.prefix = prefix64 def apply(self,ig,match_token):65 # match token must come from SinglePattern66 self.check_conformity(ig,match_token,2)67 if match_token[1] > 0:68 # if the input already has a prefix,69 # we shouldn't change it70 return ig71 output = ig.duplicate()72 if match_token[0] == 1:73 # editing unit74 output.unit = self.prefix + output.unit75 else:76 # editing name77 output.name = self.prefix + output.name78 return output79 @staticmethod80 def priority():81 return 38283class InsertingRuleOutput(RuleOutput):84 def __init__(self,pattern_size,insertion):85 if type(pattern_size) != int:86 raise TypeError("Expeected int")87 if insertion != []:88 listOfStrings(insertion)89 self.pattern_size = pattern_size90 self.insertion = insertion91 def apply(self,ig,match_token):92 # match token must come from SinglePattern93 self.check_conformity(ig,match_token,2)94 output = ig.duplicate()95 if match_token[0] == 1:96 # editing unit97 output.unit = output.unit[:max(0,match_token[1])] + self.insertion + output.unit[max(0,match_token[1])+self.pattern_size:]98 else:99 # editing name100 output.name = output.name[:max(0,match_token[1])] + self.insertion + output.name[max(0,match_token[1])+self.pattern_size:]101 return output102 @staticmethod103 def priority():104 return 2105106class SingleConvertingRuleOutput(RenamingRuleOutput):107 def __init__(self,ratio,output_name):108 super().__init__(output_name)109 if not isinstance(ratio,MyNumber):110 raise TypeError("Expected MyNumber")111 self.ratio = ratio112 def apply(self,ig,match_token):113 self.check_conformity(ig,match_token,2)114 output = super().apply(ig,match_token).duplicate()115 output.count = output.count * self.ratio116 return output117 @staticmethod118 def priority():119 return 5120121class DoubleConvertingRuleOutput(RuleOutput):122 def __init__(self,ratio,output_unit,output_name):123 # ratio, output_unit, and output_name124 # may each be None to indicate a wildcard125 if ratio is not None:126 if not isinstance(ratio,MyNumber):127 raise TypeError("Expected MyNumber")128 if output_unit is not None:129 output_unit = RenamingRuleOutput(output_unit)130 if output_name is not None:131 output_name = RenamingRuleOutput(output_name)132 self.ratio = ratio133 self.output_unit = output_unit134 self.output_name = output_name135 def apply(self,ig,match_token):136 # match_token must be from DoublePattern137 self.check_conformity(ig,match_token,3)138 if self.output_unit is not None:139 ig = self.output_unit.apply(ig,(1,max(0,match_token[1])))140 if self.output_name is not None:141 ig = self.output_name.apply(ig,(2,max(0,match_token[2])))142 if self.ratio is not None:143 ig = ig.duplicate()144 ig.count = ig.count * self.ratio145 return ig146 @staticmethod147 def priority():148 return 5149150class PropertiesRuleOutput(RuleOutput):151 def __init__(self,base,edits):152 if not isinstance(base,RuleOutput):153 raise TypeError("Expected child class of RuleOutput")154 listOfTupleStringBools(edits)155 self.base = base156 self.edits = edits157 def apply(self,ig,match_token):158 output = self.base.apply(ig,match_token).duplicate()159 for edit in self.edits:160 # edit is (str,bool)161 if edit[1]:162 output.props.add(edit[0])163 else:164 if edit[0] == "$":165 # remove all166 output.props = set()167 elif edit[0] in output.props:168 output.props.remove(edit[0])169 return output170 def priority(self):171 return self.base.priority()172173class DecRuleOutput(RuleOutput):174 def __init__(self):175 pass176 @staticmethod177 def apply(ig,match_token):178 output = ig.duplicate()179 output.count = output.count.as_float()180 return output181 @staticmethod182 def priority():183 return 1184185DecRuleOutputInstance = DecRuleOutput()186187class FracRuleOutput(RuleOutput):188 def __init__(self):189 pass190 @staticmethod191 def apply(ig,match_token):192 output = ig.duplicate()193 output.count = output.count.as_fraction(10)194 return output195 @staticmethod196 def priority():197 return 1198
...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!