Best Python code snippet using autotest_python
testviews.py
Source:testviews.py
1from django.test import TestCase2from rest_framework.test import APIClient3import os4from ..boards.models import Board, Hashtag5from datetime import datetime6import pandas as pd7import json8curDir = os.path.dirname(os.path.normpath(__file__))9# ë미ë°ì´í° ìì±10def set_dummy(table_list):11 for i in table_list:12 df = pd.read_csv(curDir + "/dummy/" + i + ".csv")13 i_dict_list = df.to_dict("records")14 if i == "board":15 boards = [16 Board(17 index=x["index"],18 title=x["title"],19 content=x["content"],20 created_at=x["created_at"],21 heart_count=x["heart_count"],22 views_count=x["views_count"],23 is_active=x["is_active"],24 writer_id=x["writer_id"],25 )26 for x in i_dict_list27 ]28 Board.objects.bulk_create(boards)29 elif i == "hashtag":30 hashtags = [Hashtag(id=x["id"], tag_content=x["tag_content"]) for x in i_dict_list]31 Hashtag.objects.bulk_create(hashtags)32 print("finish insert table " + i)33class TestViews(TestCase):34 @classmethod35 def setUp(cls):36 pass37 def test_users(self):38 # íìê°ì
í
ì¤í¸39 client = APIClient()40 result = client.post(41 "/api/users/register",42 data={43 "email": "test@google.com",44 "nickname": "test_nick",45 "password": "password1234!",46 "password2": "password1234!",47 },48 )49 exp = {"email": "test@google.com", "nickname": "test_nick"}50 self.assertEqual(result.status_code, 201)51 self.assertEqual(result.data, exp)52 # ë¡ê·¸ì¸ í
ì¤í¸53 # case_1 ì¤í¨54 result = client.post(55 "/api/users/login",56 data={57 "email": "test@google.com",58 "password": "password1234@",59 },60 )61 exp = {"message": "email ëë passwordê° íë ¸ìµëë¤."}62 self.assertEqual(result.status_code, 400)63 self.assertEqual(result.data, exp)64 # case_2 ì±ê³µ65 result = client.post(66 "/api/users/login",67 data={68 "email": "test@google.com",69 "password": "password1234!",70 },71 )72 self.assertEqual(result.status_code, 200)73 self.assertEqual(result.data["msg"], "ë¡ê·¸ì¸ì ì±ê³µíìµëë¤.")74 self.assertTrue("access_token" in result.data["jwt_token"].keys())75 self.assertTrue("refresh_token" in result.data["jwt_token"].keys())76 def test_boards(self):77 # User(email="test@test.com", nickname="test_nick", password="test1234!").save()78 # print(User.objects.all().first())79 client = APIClient()80 # ì ì ë±ë¡_ì ì 181 result = client.post(82 "/api/users/register",83 data={84 "email": "test1@test.com",85 "nickname": "test_nick1",86 "password": "test1234!",87 "password2": "test1234!",88 },89 )90 # ì ì ë±ë¡_ì ì 291 result = client.post(92 "/api/users/register",93 data={94 "email": "test2@test.com",95 "nickname": "test_nick2",96 "password": "test1234!",97 "password2": "test1234!",98 },99 )100 # í í° ì·¨ë101 result = client.post(102 "/api/users/login",103 data={104 "email": "test1@test.com",105 "password": "test1234!",106 },107 )108 access_token_1 = result.data["jwt_token"]["access_token"]109 # í í° ì·¨ë110 result = client.post(111 "/api/users/login",112 data={113 "email": "test2@test.com",114 "password": "test1234!",115 },116 )117 access_token_2 = result.data["jwt_token"]["access_token"]118 # ê²ìê¸ ë±ë¡119 # case_1 í í° ìì120 result = client.post(121 "/api/boards",122 data={"title": "test", "content": "test", "hashtag": "#first"},123 format="json",124 )125 self.assertEqual(result.status_code, 401)126 # case_2 ì ì ë±ë¡127 client.credentials(HTTP_AUTHORIZATION="Bearer " + access_token_1)128 result = client.post(129 "/api/boards",130 data={"title": "test", "content": "test", "hashtag": "#first"},131 format="json",132 )133 exp = {"msg": "ê²ìê¸ì´ ë±ë¡ëììµëë¤."}134 self.assertEqual(result.status_code, 201)135 self.assertEqual(result.data, exp)136 registered_board = Board.objects.get(index=1)137 exp = {138 "title": "test",139 "content": "test",140 "hashtag": "#first",141 "heart_count": 0,142 "views_count": 0,143 }144 registered_board_dict = {145 "title": registered_board.title,146 "content": registered_board.content,147 "hashtag": ",".join(["#" + i.tag_content for i in registered_board.tagging.all()]),148 "heart_count": registered_board.heart_count,149 "views_count": registered_board.views_count,150 }151 self.assertEqual(registered_board_dict, exp)152 # ê²ìê¸ ìì¸ ì¡°í153 result = client.get("/api/boards/1")154 exp = {155 "title": "test",156 "content": "test",157 "hashtag": "#first",158 "heart_count": 0,159 "views_count": 1,160 "writer": "test_nick1",161 "created_at": str(datetime.now().date()),162 }163 self.assertEqual(result.status_code, 200)164 self.assertEqual(result.data, exp)165 # ê²ìê¸ ì¢ìì 기ë¥166 # case_1 ì¢ìì167 result = client.patch("/api/boards/1/heart")168 registered_board = Board.objects.get(index=1)169 self.assertEqual(registered_board.heart_count, 1)170 # case_2 ë¤ë¥¸ ì ì ê° ì¢ìì ì¶ê°171 client.credentials(HTTP_AUTHORIZATION="Bearer " + access_token_2)172 result = client.patch("/api/boards/1/heart")173 registered_board = Board.objects.get(index=1)174 self.assertEqual(registered_board.heart_count, 2)175 # case_3 ì¢ìì ì·¨ì176 result = client.patch("/api/boards/1/heart")177 registered_board = Board.objects.get(index=1)178 self.assertEqual(registered_board.heart_count, 1)179 # ê²ìê¸ ìì 기ë¥180 # case_1 ìì ê¶í ìì181 result = client.put(182 "/api/boards/1",183 data={"title": "modify_test", "content": "modify_test", "hashtag": "#first,#modify"},184 format="json",185 )186 print(result)187 exp = {"msg": "ê²ìê¸ ìì ê¶íì´ ììµëë¤."}188 self.assertEqual(result.status_code, 401)189 self.assertEqual(result.data, exp)190 # case_2 ìì ì±ê³µ191 client.credentials(HTTP_AUTHORIZATION="Bearer " + access_token_1)192 result = client.put(193 "/api/boards/1",194 data={"title": "modify_test", "content": "modify_test", "hashtag": "#first,#modify"},195 format="json",196 )197 exp = {"msg": "ê²ìê¸ì´ ìì ëììµëë¤."}198 self.assertEqual(result.status_code, 200)199 self.assertEqual(result.data, exp)200 registered_board = Board.objects.get(index=1)201 exp = {202 "title": "modify_test",203 "content": "modify_test",204 "hashtag": "#first,#modify",205 "heart_count": 1,206 "views_count": 1,207 }208 registered_board_dict = {209 "title": registered_board.title,210 "content": registered_board.content,211 "hashtag": ",".join(["#" + i.tag_content for i in registered_board.tagging.all()]),212 "heart_count": registered_board.heart_count,213 "views_count": registered_board.views_count,214 }215 self.assertEqual(registered_board_dict, exp)216 # ê²ìê¸ ìì (soft_delete)217 # case_1 ê¶í ìì218 client.credentials(HTTP_AUTHORIZATION="Bearer " + access_token_2)219 result = client.delete("/api/boards/1")220 exp = {"msg": "ê²ìê¸ ìì ê¶íì´ ììµëë¤."}221 self.assertEqual(result.status_code, 401)222 self.assertEqual(result.data, exp)223 # case_2 ìì ì±ê³µ224 client.credentials(HTTP_AUTHORIZATION="Bearer " + access_token_1)225 result = client.delete("/api/boards/1")226 exp = {"msg": "ê²ìê¸ì´ ìì ëììµëë¤."}227 self.assertEqual(result.status_code, 200)228 self.assertEqual(result.data, exp)229 registered_board = Board.objects.filter(index=1, is_active=True).first()230 self.assertEqual(registered_board, None)231 # ê²ìê¸ ë³µêµ¬232 # case_1 ê¶í ìì233 client.credentials(HTTP_AUTHORIZATION="Bearer " + access_token_2)234 result = client.patch("/api/boards/1")235 exp = {"msg": "ê²ìê¸ ë³µêµ¬ ê¶íì´ ììµëë¤."}236 self.assertEqual(result.status_code, 401)237 self.assertEqual(result.data, exp)238 # case_2 복구 ì±ê³µ239 client.credentials(HTTP_AUTHORIZATION="Bearer " + access_token_1)240 result = client.patch("/api/boards/1")241 exp = {"msg": "ê²ìê¸ì´ 복구ëììµëë¤."}242 self.assertEqual(result.status_code, 200)243 self.assertEqual(result.data, exp)244 registered_board = Board.objects.filter(index=1, is_active=True).first()245 self.assertTrue(registered_board is not None)246 set_dummy(["board", "hashtag"])247 hash_tag_dict = {}248 for idx, val in enumerate(list(Hashtag.objects.all())):249 hash_tag_dict[idx + 1] = val250 for i in Board.objects.all().order_by("index"):251 # 2~4 #ìì¸252 if i.index >= 2 and i.index < 5:253 i.tagging.add(hash_tag_dict[3])254 # 5~7 #ë§ì§255 if i.index >= 5 and i.index < 8:256 i.tagging.add(hash_tag_dict[4])257 # 8~9 #ìì¸ë§ì§258 if i.index >= 8 and i.index < 10:259 i.tagging.add(hash_tag_dict[5])260 # 10~11 #ìì¸,#ë§ì§261 if i.index >= 10:262 i.tagging.add(hash_tag_dict[3])263 i.tagging.add(hash_tag_dict[4])264 i.save()265 # 리ì¤í¸ ì·¨ë266 # ì´ ê²ìê¸ ê°¯ì 11ê°267 board_list = Board.objects.all()268 self.assertEqual(len(board_list), 11)269 # case_1 ì¤í¨(page parameter ìì)270 result = client.get("/api/boards")271 self.assertEqual(result.status_code, 400)272 self.assertEqual(result.data, {"msg": "page를 ì§ì í´ì£¼ì¸ì."})273 # case_2 ì±ê³µ(page parameter 1) 10ê° ì·¨ë274 result = client.get("/api/boards?page=1")275 self.assertEqual(result.status_code, 200)276 exp = [277 {278 "title": "ìë
",279 "content": "testì
ëë¤",280 "hashtag": "#ìì¸,#ë§ì§",281 "heart_count": 1,282 "views_count": 0,283 "writer": "test_nick1",284 "created_at": str(datetime.now().date()),285 },286 {287 "title": "ìë
",288 "content": "testì
ëë¤",289 "hashtag": "#ìì¸,#ë§ì§",290 "heart_count": 1,291 "views_count": 0,292 "writer": "test_nick1",293 "created_at": str(datetime.now().date()),294 },295 {296 "title": "ìë
",297 "content": "testì
ëë¤",298 "hashtag": "#ìì¸ë§ì§",299 "heart_count": 1,300 "views_count": 0,301 "writer": "test_nick1",302 "created_at": str(datetime.now().date()),303 },304 {305 "title": "ìë
",306 "content": "testì
ëë¤",307 "hashtag": "#ìì¸ë§ì§",308 "heart_count": 1,309 "views_count": 0,310 "writer": "test_nick1",311 "created_at": str(datetime.now().date()),312 },313 {314 "title": "ìë
",315 "content": "testì
ëë¤",316 "hashtag": "#ë§ì§",317 "heart_count": 1,318 "views_count": 0,319 "writer": "test_nick1",320 "created_at": str(datetime.now().date()),321 },322 {323 "title": "ìë
",324 "content": "testì
ëë¤",325 "hashtag": "#ë§ì§",326 "heart_count": 1,327 "views_count": 0,328 "writer": "test_nick1",329 "created_at": str(datetime.now().date()),330 },331 {332 "title": "í
ì¤í¸",333 "content": "testì
ëë¤",334 "hashtag": "#ë§ì§",335 "heart_count": 0,336 "views_count": 2,337 "writer": "test_nick1",338 "created_at": str(datetime.now().date()),339 },340 {341 "title": "ìë
",342 "content": "testì
ëë¤",343 "hashtag": "#ìì¸",344 "heart_count": 0,345 "views_count": 1,346 "writer": "test_nick1",347 "created_at": str(datetime.now().date()),348 },349 {350 "title": "í
ì¤í¸",351 "content": "testì
ëë¤",352 "hashtag": "#ìì¸",353 "heart_count": 0,354 "views_count": 2,355 "writer": "test_nick1",356 "created_at": str(datetime.now().date()),357 },358 {359 "title": "ìë
",360 "content": "testì
ëë¤",361 "hashtag": "#ìì¸",362 "heart_count": 0,363 "views_count": 0,364 "writer": "test_nick1",365 "created_at": str(datetime.now().date()),366 },367 ]368 self.assertEqual(json.loads(json.dumps(result.data["board_list"], ensure_ascii=False)), exp)369 # case_3 ì±ê³µ(page parameter 1, orderby: íì´ì§ ë·°) 10ê° ì·¨ë370 result = client.get("/api/boards?page=1&orderBy=-views_count")371 self.assertEqual(result.status_code, 200)372 exp = [373 {374 "title": "í
ì¤í¸",375 "content": "testì
ëë¤",376 "hashtag": "#ìì¸",377 "heart_count": 0,378 "views_count": 2,379 "writer": "test_nick1",380 "created_at": str(datetime.now().date()),381 },382 {383 "title": "í
ì¤í¸",384 "content": "testì
ëë¤",385 "hashtag": "#ë§ì§",386 "heart_count": 0,387 "views_count": 2,388 "writer": "test_nick1",389 "created_at": str(datetime.now().date()),390 },391 {392 "title": "modify_test",393 "content": "modify_test",394 "hashtag": "#first,#modify",395 "heart_count": 1,396 "views_count": 1,397 "writer": "test_nick1",398 "created_at": str(datetime.now().date()),399 },400 {401 "title": "ìë
",402 "content": "testì
ëë¤",403 "hashtag": "#ìì¸",404 "heart_count": 0,405 "views_count": 1,406 "writer": "test_nick1",407 "created_at": str(datetime.now().date()),408 },409 {410 "title": "ìë
",411 "content": "testì
ëë¤",412 "hashtag": "#ìì¸",413 "heart_count": 0,414 "views_count": 0,415 "writer": "test_nick1",416 "created_at": str(datetime.now().date()),417 },418 {419 "title": "ìë
",420 "content": "testì
ëë¤",421 "hashtag": "#ë§ì§",422 "heart_count": 1,423 "views_count": 0,424 "writer": "test_nick1",425 "created_at": str(datetime.now().date()),426 },427 {428 "title": "ìë
",429 "content": "testì
ëë¤",430 "hashtag": "#ë§ì§",431 "heart_count": 1,432 "views_count": 0,433 "writer": "test_nick1",434 "created_at": str(datetime.now().date()),435 },436 {437 "title": "ìë
",438 "content": "testì
ëë¤",439 "hashtag": "#ìì¸ë§ì§",440 "heart_count": 1,441 "views_count": 0,442 "writer": "test_nick1",443 "created_at": str(datetime.now().date()),444 },445 {446 "title": "ìë
",447 "content": "testì
ëë¤",448 "hashtag": "#ìì¸ë§ì§",449 "heart_count": 1,450 "views_count": 0,451 "writer": "test_nick1",452 "created_at": str(datetime.now().date()),453 },454 {455 "title": "ìë
",456 "content": "testì
ëë¤",457 "hashtag": "#ìì¸,#ë§ì§",458 "heart_count": 1,459 "views_count": 0,460 "writer": "test_nick1",461 "created_at": str(datetime.now().date()),462 },463 ]464 self.assertEqual(json.loads(json.dumps(result.data["board_list"], ensure_ascii=False)), exp)465 # case_4 ì±ê³µ(page parameter 2) 1ê° ì·¨ë466 result = client.get("/api/boards?page=2")467 self.assertEqual(result.status_code, 200)468 exp = [469 {470 "title": "modify_test",471 "content": "modify_test",472 "hashtag": "#first,#modify",473 "heart_count": 1,474 "views_count": 1,475 "writer": "test_nick1",476 "created_at": str(datetime.now().date()),477 }478 ]479 self.assertEqual(json.loads(json.dumps(result.data["board_list"], ensure_ascii=False)), exp)480 # case_5 ì±ê³µ(search : í
ì¤í¸) 2ê° ì·¨ë481 result = client.get("/api/boards?page=1&search=í
ì¤í¸")482 self.assertEqual(result.status_code, 200)483 exp = [484 {485 "title": "í
ì¤í¸",486 "content": "testì
ëë¤",487 "hashtag": "#ë§ì§",488 "heart_count": 0,489 "views_count": 2,490 "writer": "test_nick1",491 "created_at": str(datetime.now().date()),492 },493 {494 "title": "í
ì¤í¸",495 "content": "testì
ëë¤",496 "hashtag": "#ìì¸",497 "heart_count": 0,498 "views_count": 2,499 "writer": "test_nick1",500 "created_at": str(datetime.now().date()),501 },502 ]503 self.assertEqual(json.loads(json.dumps(result.data["board_list"], ensure_ascii=False)), exp)504 # case_6 ì±ê³µ(hashtags : ìì¸) 5ê° ì·¨ë(index:2,3,4,10,11)505 result = client.get("/api/boards?page=1&hashtags=ìì¸")506 self.assertEqual(result.status_code, 200)507 exp = [508 {509 "title": "ìë
",510 "content": "testì
ëë¤",511 "hashtag": "#ìì¸,#ë§ì§",512 "heart_count": 1,513 "views_count": 0,514 "writer": "test_nick1",515 "created_at": str(datetime.now().date()),516 },517 {518 "title": "ìë
",519 "content": "testì
ëë¤",520 "hashtag": "#ìì¸,#ë§ì§",521 "heart_count": 1,522 "views_count": 0,523 "writer": "test_nick1",524 "created_at": str(datetime.now().date()),525 },526 {527 "title": "ìë
",528 "content": "testì
ëë¤",529 "hashtag": "#ìì¸",530 "heart_count": 0,531 "views_count": 1,532 "writer": "test_nick1",533 "created_at": str(datetime.now().date()),534 },535 {536 "title": "í
ì¤í¸",537 "content": "testì
ëë¤",538 "hashtag": "#ìì¸",539 "heart_count": 0,540 "views_count": 2,541 "writer": "test_nick1",542 "created_at": str(datetime.now().date()),543 },544 {545 "title": "ìë
",546 "content": "testì
ëë¤",547 "hashtag": "#ìì¸",548 "heart_count": 0,549 "views_count": 0,550 "writer": "test_nick1",551 "created_at": str(datetime.now().date()),552 },553 ]554 self.assertEqual(json.loads(json.dumps(result.data["board_list"], ensure_ascii=False)), exp)555 # case_7 ì±ê³µ(hashtags : ìì¸ë§ì§) 2ê° ì·¨ë(index:8,9)556 result = client.get("/api/boards?page=1&hashtags=ìì¸ë§ì§")557 self.assertEqual(result.status_code, 200)558 exp = [559 {560 "title": "ìë
",561 "content": "testì
ëë¤",562 "hashtag": "#ìì¸ë§ì§",563 "heart_count": 1,564 "views_count": 0,565 "writer": "test_nick1",566 "created_at": str(datetime.now().date()),567 },568 {569 "title": "ìë
",570 "content": "testì
ëë¤",571 "hashtag": "#ìì¸ë§ì§",572 "heart_count": 1,573 "views_count": 0,574 "writer": "test_nick1",575 "created_at": str(datetime.now().date()),576 },577 ]578 self.assertEqual(json.loads(json.dumps(result.data["board_list"], ensure_ascii=False)), exp)579 # case_8 ì±ê³µ(hashtags : ìì¸, ë§ì§) 2ê° ì·¨ë(index:10,11)580 result = client.get("/api/boards?page=1&hashtags=ìì¸,ë§ì§")581 self.assertEqual(result.status_code, 200)582 exp = [583 {584 "title": "ìë
",585 "content": "testì
ëë¤",586 "hashtag": "#ìì¸,#ë§ì§",587 "heart_count": 1,588 "views_count": 0,589 "writer": "test_nick1",590 "created_at": str(datetime.now().date()),591 },592 {593 "title": "ìë
",594 "content": "testì
ëë¤",595 "hashtag": "#ìì¸,#ë§ì§",596 "heart_count": 1,597 "views_count": 0,598 "writer": "test_nick1",599 "created_at": str(datetime.now().date()),600 },601 ]602 self.assertEqual(json.loads(json.dumps(result.data["board_list"], ensure_ascii=False)), exp)603 # case_9 ì±ê³µ(search : í
ì¤í¸, hashtags : ìì¸) 1ê° ì·¨ë604 result = client.get("/api/boards?page=1&search=í
ì¤í¸&hashtags=ìì¸")605 self.assertEqual(result.status_code, 200)606 exp = [607 {608 "title": "í
ì¤í¸",609 "content": "testì
ëë¤",610 "hashtag": "#ìì¸",611 "heart_count": 0,612 "views_count": 2,613 "writer": "test_nick1",614 "created_at": str(datetime.now().date()),615 }616 ]617 self.assertEqual(json.loads(json.dumps(result.data["board_list"], ensure_ascii=False)), exp)...
run_extraction.py
Source:run_extraction.py
1# Initial code cloned from https://github.com/tonyzhaozh/few-shot-learning2import random3import argparse4from collections import defaultdict5from data_utils import load_dataset6from utils import *7def main(models, datasets, all_shots, num_seeds, subsample_test_set, api_num_log_prob, use_saved_results, modify_test):8 """9 Run experiment or load past results, print accuracy10 """11 import time12 start_time = time.time()13 default_params = {14 'subsample_test_set': subsample_test_set,15 'api_num_log_prob': api_num_log_prob,16 'modify_test': modify_test17 }18 all_params = []19 for model in models:20 for dataset in datasets:21 for num_shots in all_shots:22 for seed in range(num_seeds):23 p = deepcopy(default_params)24 p['model'] = model25 p['dataset'] = dataset26 p['seed'] = seed27 p['num_shots'] = num_shots28 p['expr_name'] = f"{p['dataset']}_{p['model']}_{p['num_shots']}shot_{repr(p['subsample_test_set'])}_subsample_seed{p['seed']}_{p['modify_test']}"29 all_params.append(p)30 # query the model and save the responses31 if use_saved_results:32 load_results(all_params)33 else:34 save_results(all_params)35 print("--- %s seconds ---" % (time.time() - start_time))36def save_results(params_list, freeze_test_set=True):37 """38 Save all model's responses and the rest of configs into a pickle file39 """40 result_tree = dict()41 seen = {}42 seen['person'] = ['Mary', 'Steve', 'Davis', 'Danny', 'Rose', 'Edward', 'Rob', 'Harry', 'Tom', 'Paul', 'Sam', 'Robert', 'Alex', 'Michelle', 'James']43 seen['location'] = ['Florida', 'Toronto', 'Germany', 'India', 'Scotland', 'Washington', 'Syria', 'Ukraine', 'Houston', 'America', 'France', 'Australia', 'Turkey', 'NEW YORK', 'Chicago']44 seen['corporation'] = ['Reuters', 'CNN', 'NBA', 'Uber', 'YouTube', 'CBC', 'Netflix', 'Microsoft', 'Twitter', 'Facebook', 'Apple', 'MAC', 'Tesla', 'Disney', 'Reddit']45 seen['group'] = ['Army', 'Chicago Blackhawks', 'Real Madrid', 'CIA', 'Senate', 'ART', 'NBA', 'The Black Keys', 'Crystal Palace', 'European Union', 'green day', 'Labor', 'Chelsea', 'the warriors', 'Democrats']46 seen['product'] = ['Air Music Jump', 'Android', 'Linux OS', 'iOS', 'Windows 7', 'Tesla', 'Google Music', 'SQL', 'Amazon Prime', 'Nintendo plus', 'google pixel', 'iPhone', 'Xbox 360', 'Legendary Skin', 'Bio Spot']47 seen['creative-work'] = ['Black Swan', 'Iron Man 2', 'Finding Bigfoot', 'Good Morning Britain', 'Teen Titans', 'Pac- Man', 'Game of Thrones', 'La La Land', 'Last Christmas', 'Star Wars', 'Doctor Who', 'the Twilight Zone', 'Pokémon', 'Star Trek', 'Minecraft']48 unseen = ['xgwqicng', 'kiooaiql', 'wpvqymid', 'rrmihdcg', 'owblmgbx', 'tiybjelq', 'ytlbllnh', 'ybwifxxv', 'svlsskxx', 'jdtqyoov', 'tzrtffbu', 'jvwywjhy', 'hzhwhahw', 'gjrmquke', 'gmenqwpb']49 for param_index, params in enumerate(params_list):50 print("\nExperiment name:", params['expr_name'])51 ### load data52 all_train_sentences, all_train_labels, all_train_neg_sents, all_train_neg_labels, all_test_sentences, all_test_labels = load_dataset(params)53 ### sample test set54 if params['subsample_test_set'] is None:55 test_sentences, test_labels = all_test_sentences, all_test_labels56 if params['modify_test'] is not None:57 if params['modify_test'] == 'unseen':58 selected_ents = unseen59 else:60 selected_ents = seen[params['dataset'].split('_')[1]]61 for i in range(len(test_labels)):62 if test_labels[i].lower() == 'none':63 continue64 ents = test_labels[i].split('\t')65 test_labels[i] = ''66 for e in ents:67 rlabel = random.choice(selected_ents)68 test_labels[i] += rlabel + '\t'69 test_sentences[i] = test_sentences[i].replace(e, rlabel)70 test_labels[i] = test_labels[i].strip()71 #print(test_labels)72 #print(test_sentences)73 print(f"selecting full test set ({len(all_test_labels)} examples)")74 else:75 if freeze_test_set:76 np.random.seed(0) # always use seed 0 result if freeze77 else:78 np.random.seed(params['seed'])79 test_sentences, test_labels = random_sampling(all_test_sentences, all_test_labels, params['subsample_test_set'])80 print(f"selecting {len(test_labels)} subsample of test set")81 ### sample few-shot training examples82 np.random.seed(params['seed'])83 train_sentences, train_labels = random_sampling(all_train_sentences, all_train_labels, 9)84 neg = params['num_shots'] - 985 neg_sents, neg_labels = random_sampling(all_train_neg_sents, all_train_neg_labels, neg)86 train_sentences += neg_sents87 train_labels += neg_labels88 #print(train_sentences)89 #print(train_labels)90 ### Get contextual-calibrated answer (first token)91 # ask model for candidate first token, for each of the test sentence92 all_responses, all_prompts = get_model_response(params, train_sentences, train_labels, test_sentences, return_all_prompts=True, num_tokens_to_predict_override=1)93 print('first token finished')94 # calculate calibration constant for each of the candidate token95 all_options = set()96 for resp in all_responses:97 logprobs = resp['logprobs']['top_logprobs'][0] # first token98 options = list(logprobs.keys())99 all_options.update(options)100 content_free_token_list = [""]101 cf_prompts = []102 for option in all_options:103 for token in content_free_token_list:104 prompt = params['prompt_func'](params, train_sentences, train_labels, token, test_label_option=option)105 cf_prompts.append(prompt)106 cf_probs_dict = defaultdict(lambda: [])107 cf_prompts_chunked = list(chunks(cf_prompts, chunk_size_helper(params)))108 print(len(cf_prompts_chunked))109 for chunk_id, prompt_chunk in enumerate(cf_prompts_chunked):110 all_resp = complete(prompt_chunk, 0, model=params['model'], echo=True, num_log_probs=1)111 for resp in all_resp['choices']:112 log_prob = resp['logprobs']['token_logprobs'][-1]113 token = resp['logprobs']['tokens'][-1]114 prob = np.exp(log_prob)115 cf_probs_dict[token].append(prob)116 temp_cf_probs_dict = {}117 for k, v in cf_probs_dict.items():118 temp_cf_probs_dict[k] = np.min(v) # Notice: Min across ensemble of placeholders119 cf_probs_dict = temp_cf_probs_dict120 #obtain model's calibrated decision121 all_reweighted_ans = []122 error_count = 0123 total_count = 0124 for resp in all_responses:125 # get all probs126 orig_probs_list = []127 cf_probs_list = []128 all_tokens = []129 logprobs = resp['logprobs']['top_logprobs'][0] # first token130 for token in list(logprobs.keys()):131 total_count += 1132 orig_prob = np.exp(logprobs[token])133 if token in cf_probs_dict.keys():134 cf_prob = cf_probs_dict[token]135 orig_probs_list.append(orig_prob)136 cf_probs_list.append(cf_prob)137 all_tokens.append(token)138 else: # hmm cannot find it139 error_count += 1140 orig_probs_list = np.array(orig_probs_list)141 cf_probs_list = np.array(cf_probs_list)142 orig_probs_list = orig_probs_list / np.sum(orig_probs_list)143 cf_probs_list = cf_probs_list / np.sum(cf_probs_list)144 # contextual calibration145 W = np.identity(len(orig_probs_list))146 b = -1 * np.expand_dims(cf_probs_list, axis=-1)147 calibrate_label_probs = np.matmul(W, np.expand_dims(orig_probs_list, axis=-1)) + b148 best_idx = np.argmax(calibrate_label_probs)149 all_reweighted_ans.append(all_tokens[best_idx])150 error_frac = error_count/total_count151 if error_frac > 0.01: print(f"WARNING: re-encode error fraction: {error_frac:.2f}")152 print('context free finished')153 ### Get contextual-calibrated answer (rest of tokens, greedy decode)154 for i in range(len(all_prompts)):155 all_prompts[i] += all_reweighted_ans[i]156 all_responses_greedy, all_prompts = get_model_response(params, train_sentences, train_labels, test_sentences, return_all_prompts=True, num_tokens_to_predict_override=15-1, override_prompt=all_prompts)157 print('rest of tokens finished')158 for i in range(len(all_reweighted_ans)):159 all_reweighted_ans[i] += all_responses_greedy[i]['text']160 ### Get accuracy161 all_reweighted_ans = [ans.strip() for ans in all_reweighted_ans]162 reweighted_accuracy = em_accuracy_helper(all_reweighted_ans, test_labels)163 accuracies = [reweighted_accuracy]164 # add to result_tree165 keys = [params['dataset'], params['model'], params['num_shots']]166 node = result_tree # root167 for k in keys:168 if not (k in node.keys()):169 node[k] = dict()170 node = node[k]171 node[params['seed']] = accuracies172 ### savings173 result_to_save = dict()174 params_to_save = deepcopy(params)175 result_to_save['params'] = params_to_save176 result_to_save['train_sentences'] = train_sentences177 result_to_save['train_labels'] = train_labels178 result_to_save['test_sentences'] = test_sentences179 result_to_save['test_labels'] = test_labels180 result_to_save['all_responses_first'] = all_responses181 result_to_save['all_responses_greedy'] = all_responses_greedy182 result_to_save['all_reweighted_ans'] = all_reweighted_ans183 result_to_save['accuracies'] = accuracies184 if 'prompt_func' in result_to_save['params'].keys():185 params_to_save['prompt_func'] = None186 save_pickle(params, result_to_save)187def em_accuracy_helper(prediction, label):188 correctness_list = []189 for pred, l in zip(prediction, label):190 pred = pred.split('\n')[0]191 if pred == l:192 correctness_list.append(1)193 else:194 correctness_list.append(0)195 return np.mean(correctness_list)196if __name__ == '__main__':197 parser = argparse.ArgumentParser()198 # required arguments199 parser.add_argument('--models', dest='models', action='store', required=True, help='name of model(s), e.g., GPT2-XL')200 parser.add_argument('--datasets', dest='datasets', action='store', required=True, help='name of dataset(s), e.g., agnews')201 parser.add_argument('--num_seeds', dest='num_seeds', action='store', required=True, help='num seeds for the training set', type=int)202 parser.add_argument('--all_shots', dest='all_shots', action='store', required=True, help='num training examples to use')203 # other arguments204 parser.add_argument('--subsample_test_set', dest='subsample_test_set', action='store', required=False, type=int,205 default=None, help='size of test set to use to speed up eval. None means using all test set')206 parser.add_argument('--api_num_log_prob', dest='api_num_log_prob', action='store', required=False, type=int,207 default=15, help='number of top tokens to ask for when querying the model. Capped at 100 for OpenAI GPT-3 API')208 parser.add_argument('--use_saved_results', dest='use_saved_results', action='store_const', const=True, default=False,209 help='whether to load the results from pickle files and not run the model')210 parser.add_argument('--modify_test', dest='modify_test', default=None, action='store', required=False, help='whether the seen or unseen test scenario is performed')211 args = parser.parse_args()212 args = vars(args)213 # simple processing214 def convert_to_list(items, is_int=False):215 if is_int:216 return [int(s.strip()) for s in items.split(",")]217 else:218 return [s.strip() for s in items.split(",")]219 args['models'] = convert_to_list(args['models'])220 args['datasets'] = convert_to_list(args['datasets'])221 args['all_shots'] = convert_to_list(args['all_shots'], is_int=True)...
mainwindow.py
Source:mainwindow.py
1#!/usr/bin/python2# -*- coding: utf-8 -*-3from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QGridLayout, QPushButton, QLabel4from PyQt5.QtCore import QCoreApplication5from Sub.newtestwindow import NewTestWindow6from Sub.modifytestwindow import ModifyTestWindow7from Sub.consultdatabasewindow import ConsultDatabaseWindow8from os import getlogin9class MainWindow(QMainWindow):10 def __init__(self, *args, **kwargs):11 super(MainWindow, self).__init__(*args, **kwargs)12 self.version = "Version: 1.0"13 self.user_login = getlogin()14 self.setObjectName("ChoiceWindow")15 self.resize(350, 115)16 self.central_widget = QWidget(self)17 self.central_widget.setObjectName("central_widget")18 self.inner_layout = QGridLayout(self)19 self.inner_layout.setObjectName("innerLayout")20 self.button_new_test = QPushButton(self)21 self.button_new_test.setFlat(False)22 self.button_new_test.setObjectName("Nouvel Essai")23 self.button_new_test.clicked.connect(self.button_new_test_clicked)24 self.button_modify_test = QPushButton(self)25 self.button_modify_test.setFlat(False)26 self.button_modify_test.setObjectName("Modifier Essai")27 self.button_modify_test.clicked.connect(self.button_modify_test_clicked)28 self.button_consult = QPushButton(self)29 self.button_consult.setFlat(False)30 self.button_consult.setObjectName("Consultation")31 self.button_consult.clicked.connect(self.button_consult_clicked)32 self.button_quit = QPushButton(self)33 self.button_quit.setFlat(False)34 self.button_quit.setObjectName("Quitter")35 self.button_quit.clicked.connect(self.close)36 self.button_admin = QPushButton(self)37 self.button_admin.setFlat(False)38 self.button_admin.setObjectName("Administration")39 self.button_admin.clicked.connect(self.button_admin_clicked)40 self.button_admin.setVisible(self.user_login == '11024090')41 self.label_version = QLabel(self)42 self.label_version.setText(self.version)43 self.inner_layout.addWidget(self.button_new_test, 0, 0)44 self.inner_layout.addWidget(self.button_modify_test, 1, 0)45 self.inner_layout.addWidget(self.button_consult, 2, 0)46 self.inner_layout.addWidget(self.button_admin, 3, 0)47 self.inner_layout.addWidget(self.button_quit, 4, 0)48 self.inner_layout.addWidget(self.label_version, 5, 0)49 self.central_widget.setLayout(self.inner_layout)50 self.setCentralWidget(self.central_widget)51 self.retranslate_ui(self)52 def button_new_test_clicked(self):53 new_test = NewTestWindow(self)54 new_test.show()55 del new_test56 def button_modify_test_clicked(self):57 modify_test = ModifyTestWindow(self)58 modify_test.show()59 del modify_test60 def button_consult_clicked(self):61 consult_database = ConsultDatabaseWindow(self)62 consult_database.show()63 del consult_database64 def button_admin_clicked(self):65 pass66 def retranslate_ui(self, main_window):67 _translate = QCoreApplication.translate68 main_window.setWindowTitle(_translate("MainWindow", "Base d'essai BE"))69 self.button_new_test.setText(_translate("MainWindow", "Nouvel Essai"))70 self.button_modify_test.setText(_translate("MainWindow", "Modifier Essai"))71 self.button_consult.setText(_translate("MainWindow", "Consultation"))72 self.button_admin.setText(_translate("MainWindow", "Administration"))73 self.button_quit.setText(_translate("MainWindow", "Quitter"))74if __name__ == "__main__":75 import sys76 app = QApplication(sys.argv)77 MainWindowQ = QMainWindow()78 ui = MainWindow()79 ui.show()...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!