Best Python code snippet using green
test_models.py
Source:test_models.py
1# -*- coding: utf-8 -*-2import unittest3import numpy as np4from statLM import statistical_models as sm5class BasicTestSuite(unittest.TestCase):6 """Basic test cases."""7 def __init__(self, *args, **kwargs):8 super(BasicTestSuite, self).__init__(*args, **kwargs)9 self.test_corpus = [10 "let us see were this project leads us",11 "we are having great fun so far",12 "we are actively developing",13 "it is getting tougher but it is still fun",14 "this project teaches us how to construct test cases",15 ]16 self.test_queries = [17 "let us see were that project",18 "we are",19 "it is",20 "it should be",21 "we",22 ]23 self.test_completions = ["leads", "actively", "getting", "us", "are"]24 def test_naive_ngram(self):25 # Naive Ngram26 nn = sm.NaiveNGram(n_max=3, threshold=1)27 nn.fit( self.test_corpus )28 self.assertEqual(29 nn.predict(self.test_queries), 30 ['leads', 'actively', 'getting', np.NaN, "are"]31 )32 self.assertEqual(33 nn.predict_proba(self.test_queries), 34 [('project leads', 0.5), ('we are actively', 0.5),35 ('it is getting', 0.5), np.NaN, ('we are', 1.0)]36 )37 self.assertEqual(38 nn.score( 39 queries=self.test_queries,40 completions=self.test_completions41 ),42 [0.5, 0.5, 0.5, 0, 1.0]43 )44 def test_naive_ngram_no_threshold(self):45 # Naive Ngram46 nn = sm.NaiveNGram(n_max=3, threshold=0)47 nn.fit( self.test_corpus )48 self.assertEqual(49 nn.predict(self.test_queries), 50 ['leads', 'actively', 'getting', "us", "are"]51 )52 self.assertEqual(53 nn.predict_proba(self.test_queries), 54 [('project leads', 0.5), ('we are actively', 0.5),55 ('it is getting', 0.5), ("us", 3/37), ('we are', 1.0)]56 ) 57 def test_stupid_backoff(self):58 sb = sm.StupidBackoff(n_max=3, alpha=0.4, threshold=1)59 sb.fit( self.test_corpus )60 self.assertEqual(61 sb.predict(self.test_queries), 62 ['leads', 'actively', 'getting', np.NaN, "are"]63 )64 self.assertEqual(65 sb.predict_proba(self.test_queries), 66 [('project leads', 0.2), ('we are actively', 0.5), ('it is getting', 0.5), np.NaN, ('we are', 0.4)]67 ) 68 self.assertEqual(69 sb.score( 70 queries=self.test_queries,71 completions=self.test_completions72 ),73 [0.2, 0.5, 0.5, 0, 0.4]74 ) 75 def test_stupid_backoff_no_threshold(self):76 sb = sm.StupidBackoff(n_max=3, alpha=0.4, threshold=0)77 sb.fit( self.test_corpus )78 self.assertEqual(79 sb.predict(self.test_queries), 80 ['leads', 'actively', 'getting', "us", "are"]81 )82 self.assertEqual(83 sb.predict_proba(self.test_queries), 84 [('project leads', 0.2), ('we are actively', 0.5), ('it is getting', 0.5), ("us", 3/37), ('we are', 0.4)]85 ) 86 self.assertEqual(87 sb.score( 88 queries=self.test_queries,89 completions=self.test_completions90 ),91 [0.2, 0.5, 0.5, 3/37, 0.4]92 ) 93# TODO: 94# - construct more test cases95 # - automate test via github action96if __name__ == '__main__':...
d10.py
Source:d10.py
1from functools import reduce2from math import floor3from utils import read_input4openers = '([{<'5closers = ')]}>'6bracket_map = {l: r for l, r in zip(openers, closers)}7wrong_character_scores = {8 ')': 3,9 ']': 57,10 '}': 1197,11 '>': 2513712}13completion_character_scores = {14 ')': 1,15 ']': 2,16 '}': 3,17 '>': 418}19class Parser:20 def __init__(self) -> None:21 self.stack = []22 def parse_character(self, c):23 if c in openers:24 self.stack.append(c)25 return True26 elif self.stack and c == bracket_map[self.stack.pop()]:27 return True28 else:29 return False30 def reset(self):31 self.stack = []32 def finish_line(self):33 yield from map(lambda c: bracket_map[c], self.stack[::-1])34def part_1(lines):35 score = 036 parser = Parser()37 for line in lines:38 for c in line.strip():39 if not parser.parse_character(c):40 score += wrong_character_scores[c]41 break42 parser.reset()43 return score44test_lines = """[({(<(())[]>[[{[]{<()<>>45[(()[<>])]({[<{<<[]>>(46{([(<{}[<>[]}>{[]{[(<()>47(((({<>}<{<{<>}{[]{[]{}48[[<[([]))<([[{}[[()]]]49[{[{({}]{}}([{[{{{}}([]50{<[[]]>}<{[{[{[]{()[[[]51[<(<(<(<{}))><([]([]()52<{([([[(<>()){}]>(<<{{53<{([{{}}[<[[[<>{}]]]>[]]""".splitlines()54real_lines = read_input(10)55assert part_1(test_lines) == 2639756print(part_1(real_lines))57def score_completion(brackets):58 return reduce(lambda acc, c: acc * 5 + completion_character_scores[c], brackets, 0)59test_completions = {60 '}}]])})]': 288957,61 ')}>]})': 5566,62 '}}>}>))))': 1480781,63 ']]}}]}]}>': 995444,64 '])}>': 294,65}66for string, score in test_completions.items():67 assert score_completion(string) == score68def part_2(lines):69 parser = Parser()70 completion_scores = []71 for line in lines:72 if all(parser.parse_character(c) for c in line.strip()):73 completion_scores.append(score_completion(parser.finish_line()))74 parser.reset()75 return sorted(completion_scores)[floor(len(completion_scores) / 2)]76assert part_2(test_lines) == 288957...
run_gptj_mnist.py
Source:run_gptj_mnist.py
1#!/usr/bin/env python2# coding: utf-83import sys4sys.path.append('./')5sys.path.append('./../')6import json7import os8import numpy as np9import torch10from utils.helper import query11from models import lora_gptj as GPTJ12import utils.configs as cfgs13from run_exps_helper import *14import argparse15parser = argparse.ArgumentParser(description='GPT')16parser.add_argument("-d", "--data_name", default='mnist', type=str,choices=['mnist','fmnist'])17parser.add_argument("-g", "--gpu_id", default=0, type=int)18parser.add_argument("--local_rank", default=-1, type=int)19parser.add_argument("--seed", default=12345, type=int)20parser.add_argument("-p", "--is_permuted", action="store_true")21parser.add_argument("-v", "--eval", default=0, type=int)22args = parser.parse_args()23data_name = args.data_name24is_adv = False25is_permuted = False #args.is_permuted #True26permuted = 'permuted_' if is_permuted else ''27adv = '_adv' if is_adv else ''28fname = f'{permuted}{data_name}'29train_js = f'data/{fname}_train.jsonl'30val_js = f'data/{fname}_val.jsonl'31test_js = f'data/{fname}{adv}_test.jsonl'32val_prompts = extract_prompts(val_js,'')33test_prompts = extract_prompts(test_js,'')34val_completions = extract_completion(val_js)35y_val = [prompt2value(x) for x in val_completions]36test_completions = extract_completion(test_js)37y_test = [prompt2value(x) for x in test_completions]38device = torch.device(f'cuda:{args.gpu_id}') if torch.cuda.is_available() else 'cpu'39torch.cuda.set_device(args.gpu_id)40gpt = GPTJ.LoRaQGPTJ(adapter=True, device=device)41model_name = f'results/gpt-j/{fname}_best_model.pth'42pretrained_path = f'results/gpt-j/{fname}/pytorch_model.bin'43if os.path.isfile(pretrained_path):44 gpt.model.load_state_dict(torch.load(pretrained_path))45# #### Training46if args.eval == 0:47 train_configs={'learning_rate': 1e-5, 'batch_size': 2, 'epochs':1, 'weight_decay': 0.01, 'warmup_steps': 6}48 gpt.finetune(train_js, val_js, train_configs, saving_checkpoint=True) #, save_path=model_name, local_rank=args.local_rank)49else:50 gpt.load_networks(model_name)51ans, outputs = query(gpt, test_prompts, bs=16)52y_pred = [prompt2value(x) for x in ans]53acc = get_accuracy(y_pred, y_test)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!