Best Python code snippet using localstack_python
test.py
Source:test.py
1# -*- coding: utf-8 -*- # 2# @Time : 2021-11-30 14:523# @Email : zhilishen@smail.nju.edu.cn4# @Author : Zhili Shen5# @File : test.py6# @Notice :7import collections8# next next9# bucket 1: node1(head)->node2->node3(tail) (head_bucket:è°ç¨æ¬¡æ°æå°ç桶) (node1(head):è°ç¨åçææ°çèç¹) (node3(tail):è°ç¨åçææ©çèç¹)10# | ^ <- <-11# down| |up last last12# _ | next next13# bucket 2: node4(head)->node5->node6(tail)14# | ^ <- <-15# down| |up last last16# _ | next next17# bucket 3: node7(head)->node8->node9(tail)18# | ^ <- <-19# down| |up last last20# _ | next next21class Node:22 def __init__(self, key, value):23 self.key = key24 self.value = value25 self.last = None26 self.next = None27 self.times = 028class Bucket:29 def __init__(self, node: Node):30 self.head = node # è°ç¨åçææ°çèç¹31 self.tail = node # è°ç¨åçææ©çèç¹32 self.up = None33 self.down = None34 def add_node_to_head(self, node: Node): # å¨æ¡¶ä¸å å
¥èç¹æ¶ å¿
ç¶æ¯åçsetægetæä½ å æ¤éè¦å°è¯¥èç¹è®¾ä¸ºå¤´èç¹ ä»¥è¡¨æ该èç¹çè°ç¨æ¯ææ°ç35 node.next = self.head36 self.head.last = node37 self.head = node38 def is_empty(self):39 return self.head is None40 def delete_node(self, node: Node):41 if self.head is self.tail: # éè¦å é¤èç¹ èå½ååªæä¸ä¸ªèç¹ æ以è¯å®è¦å é¤å¯ä¸çèç¹42 self.head = None43 self.tail = None44 else:45 if node is self.head: # å¦æéè¦å é¤çèç¹æ¯å¤´èç¹46 self.head = node.next47 self.head.last = None48 elif node is self.tail: # å¦æéè¦å é¤çèç¹æ¯å°¾èç¹49 self.tail = node.last50 self.tail.next = None51 else:52 node.last.next = node.next53 node.next.last = node.last54 node.last = None # å°èç¹ç¬ç«åºæ¥55 node.next = None56class LFU:57 def __init__(self, capacity: int):58 self.capacity = capacity59 self.size = 060 self.key2node = {}61 self.node2bucket = {}62 self.head_bucket = None # è°ç¨æ¬¡æ°æå°ç桶63 def set(self, key: int, value: int):64 if self.capacity == 0:65 return66 if key in self.key2node:67 node = self.key2node[key]68 node.value = value69 node.times += 170 bucket = self.node2bucket[node]71 self.move(node, bucket)72 else:73 if self.size == self.capacity: # å¦æå½å容é已满 åéè¦å é¤74 node = self.head_bucket.tail75 self.head_bucket.delete_node(node)76 self.modify_bucket(self.head_bucket)77 self.key2node.pop(node.key) # è¿è¡ä»£ç 表æäºnodeéè¦è®°å½keyçåå æ¹ä¾¿ä»self.key2nodeä¸å é¤è¯¥èç¹78 self.node2bucket.pop(node)79 self.size -= 180 node = Node(key, value) # æ°å建çèç¹çè°ç¨æ¬¡æ°å¿
ç¶æ¯1 å æ¤åºå½å å
¥head_bucket81 if self.head_bucket is None:82 self.head_bucket = Bucket(node)83 else:84 if self.head_bucket.head.times == node.times: # å¦æhead_bucket符å次æ°è¦æ± å¯ä»¥ç´æ¥å°nodeå å
¥è¿å»85 self.head_bucket.add_node_to_head(node)86 else:87 new_bucket = Bucket(node) # å¦æhead_bucketä¸ç¬¦å次æ°è¦æ± ånodeæå¨çbucketåºå½è®¾ç½®ä¸ºhead_bucket88 new_bucket.down = self.head_bucket89 self.head_bucket.up = new_bucket90 self.head_bucket = new_bucket91 self.key2node[key] = node92 self.node2bucket[node] = self.head_bucket93 self.size += 194 def get(self, key):95 if key not in self.key2node:96 return None97 node = self.key2node[key]98 node.times += 199 cur_bucket = self.node2bucket[node]100 self.move(node, cur_bucket)101 return node.value102 # remove_node_bucketæ¯åååå°ä¸ä¸ªèç¹ç桶 å¤æåååå°ä¸ä¸ªèç¹ç桶æ¯å¦ä¸ºç©º åæ¶ä¿è¯æ¡¶ä¹é´æ¯ååé¾è¡¨103 def modify_bucket(self, remove_node_bucket: Bucket):104 if remove_node_bucket.is_empty(): # å¦æè¿ä¸ªåå°äºä¸ä¸ªèç¹ç桶åæäºç©ºæ¡¶105 if self.head_bucket is remove_node_bucket: # å¦æè¿ä¸ªç©ºæ¡¶æ¯LRUä¸è°ç¨æ¬¡æ°æå°ç桶106 self.head_bucket = remove_node_bucket.down # 设置æ°çè°ç¨æ¬¡æ°æå°ç桶107 if self.head_bucket is not None: # å¦æLRUä¸ä»ç¶æ桶108 self.head_bucket.up = None # å°æ°çè°ç¨æ¬¡æ°æå°ç桶çupæå空109 else: # å¦æè¿ä¸ªç©ºæ¡¶ä¸æ¯LRUä¸è°ç¨æ¬¡æ°æå°ç桶110 remove_node_bucket.up.down = remove_node_bucket.down # å»é¤è¿ä¸ªæ¡¶111 if remove_node_bucket.down is not None:112 remove_node_bucket.down.up = remove_node_bucket.up # å°è¿ä¸ªæ¡¶ä¸è¾¹ç桶çupæéæåè¿ä¸ªæ¡¶ä¸è¾¹ç桶113 return True # å¦æè¿ä¸ªåå°äºä¸ä¸ªèç¹ç桶åæäºç©ºæ¡¶ åè¿åTrue114 else:115 return False # å¦æè¿ä¸ªåå°äºä¸ä¸ªèç¹ç桶没æåæ空桶 åè¿åFalse116 # 该èç¹å¨bucketå½ä¸ 该èç¹ç次æ°+1 éè¦ä»è¯¥bucketå é¤ å¹¶æ¾å
¥æ°æ¡¶ åæ¶ä¿è¯æ¡¶ä¹é´æ¯ååé¾è¡¨ èç¹ä¹é´æ¯ååé¾è¡¨117 def move(self, node: Node, bucket: Bucket):118 bucket.delete_node(node) # é¦å
ä»æ¡¶å½ä¸å é¤è¯¥èç¹119 if self.modify_bucket(bucket): # å¦æè¿ä¸ªåå°è¯¥èç¹ç桶åæäºç©ºæ¡¶120 pre_bucket = bucket.up # è¥è¦å建æ°æ¡¶ å该æ°æ¡¶çupåæååå°è¯¥èç¹ç桶çä¸ä¸ä¸ªæ¡¶121 else:122 pre_bucket = bucket # å¦åæååå°è¯¥èç¹ç桶123 next_bucket = bucket.down # æ¾åºå¯è½å¯ä»¥æ¾ç½®æ°èç¹ç桶124 if next_bucket is None: # å¦æ没æè¿ä¸ªæ¡¶125 new_bucket = Bucket(node) # å建æ°æ¡¶126 if pre_bucket is not None: # è¿æ¥pre_bucketä¸new_bucket127 pre_bucket.down = new_bucket128 new_bucket.up = pre_bucket129 if self.head_bucket is None: # å¦æå建æ°æ¡¶çæ¶å self.head_bucketä»ç¶ä¸ºç©º åå°å
¶è®°å½ä¸ºself.head_bucket130 self.head_bucket = new_bucket131 self.node2bucket[node] = new_bucket # ä¸è¦å¿è®°æ´æ°132 else:133 if next_bucket.head.times == node.times: # å¦ænext_bucket符å次æ°è¦æ± å¯ä»¥ç´æ¥å°nodeå å
¥è¿å»134 next_bucket.add_node_to_head(node)135 self.node2bucket[node] = next_bucket136 else: # å¦ænext_bucketä¸ç¬¦å次æ°è¦æ± ä¸å¯ä»¥ç´æ¥å å
¥è¿å»137 new_bucket = Bucket(node)138 if pre_bucket is not None:139 pre_bucket.down = new_bucket140 new_bucket.up = pre_bucket # è¿æ¥pre_bucketä¸new_bucket new_bucketä¸next_bucket141 new_bucket.down = next_bucket142 next_bucket.up = new_bucket143 if self.head_bucket is next_bucket: # å¦æå建æ°æ¡¶çæ¶å self.head_bucketæ¯next_bucket åå°æ°æ¡¶è®°å½ä¸ºself.head_bucket144 self.head_bucket = new_bucket145 self.node2bucket[node] = new_bucket146def create_bucket():147 fake_head = Node(0, 0)148 fake_tail = Node(0, 0)149 fake_head.next = fake_tail150 fake_tail.last = fake_head151 return fake_head, fake_tail152class LFU_1:153 def __init__(self, capacity: int):154 self.capacity = capacity155 self.size = 0156 self.min_freq = 0157 self.freq2bucket = collections.defaultdict(create_bucket) # å½åå
¸ä¸çkeyä¸åå¨æ¶ ä¼å¼åå¼å¸¸ defaultdictå¨keyä¸åå¨æ¶æä¾é»è®¤å¼158 self.key2node = {}159 def delete(self, node: Node):160 if node.last is not None: # å 为setçæ¶åæå¯è½ä¼ è¿æ¥çnodeæ¯æ°å建ç ä¹å¯è½æ¯å·²åå¨ç æ以éè¦æ ¹æ®nodeçä¸åç¶æå³å®nodeæ¯å¦éè¦å é¤161 node.last.next = node.next162 node.next.last = node.last163 if node.last is self.freq2bucket[node.times] and node.next is self.freq2bucket[node.times][-1]: # å¦æ该nodeæå¨çbucketå·²ç»ä¸ºç©º åéè¦ä»åå
¸ä¸å é¤è¯¥bucket164 self.freq2bucket.pop(node.times)165 return node166 def increase(self, node: Node):167 node.times += 1168 self.delete(node)169 pre_node = self.freq2bucket[node.times][-1].last170 node.last = pre_node171 node.next = pre_node.next172 pre_node.next.last = node173 pre_node.next = node174 if node.times == 1:175 self.min_freq = 1176 elif self.min_freq == node.times - 1: # å¦ææå°é¢æ¬¡å¯¹åºç桶已ç»ä¸ºç©º åéè¦å¯¹æå°é¢æ¬¡è¿è¡æ´æ°177 head, tail = self.freq2bucket[self.min_freq]178 if head.next is tail:179 self.min_freq = node.times180 def get(self, key: int):181 if key in self.key2node:182 self.increase(self.key2node[key])183 return self.key2node[key].value184 return None185 def set(self, key, value):186 if self.capacity == 0:187 return188 if key in self.key2node:189 node = self.key2node[key]190 node.value = value191 else:192 node = Node(key, value)193 self.key2node[key] = node194 self.size += 1195 if self.size > self.capacity:196 self.size -= 1197 delete_node = self.delete(self.freq2bucket[self.min_freq][0].next)198 self.key2node.pop(delete_node.key)199 self.increase(node)200if __name__ == "__main__":201 a, b = map(int, input().split())202 lfu = LFU_1(b)203 for _ in range(a):204 a, *b = map(int, input().split())205 if a == 1:206 lfu.set(b[0], b[-1])207 else:...
test_lambda.py
Source:test_lambda.py
1import unittest2from io import StringIO3from zipfile import ZipFile4import botocore5from botocore.stub import Stubber6import boto37from testfixtures import TempDirectory, compare8from .. import aws_lambda9REGION = "us-east-1"10ALL_FILES = (11 'f1/f1.py',12 'f1/f1.pyc',13 'f1/__init__.py',14 'f1/test/__init__.py',15 'f1/test/f1.py',16 'f1/test/f1.pyc',17 'f1/test2/test.txt',18 'f2/f2.js'19)20F1_FILES = [p[3:] for p in ALL_FILES if p.startswith('f1')]21F2_FILES = [p[3:] for p in ALL_FILES if p.startswith('f2')]22BUCKET_NAME = "myBucket"23class TestLambdaHooks(unittest.TestCase):24 def setUp(self):25 self.s3 = boto3.client("s3")26 self.stubber = Stubber(self.s3)27 @classmethod28 def temp_directory_with_files(cls, files=ALL_FILES):29 d = TempDirectory()30 for f in files:31 d.write(f, b'')32 return d33 def assert_zip_file_list(self, zip_file, files):34 found_files = set()35 for zip_info in zip_file.infolist():36 perms = (37 zip_info.external_attr & aws_lambda.ZIP_PERMS_MASK38 ) >> 1639 self.assertIn(perms, (0o755, 0o644),40 'ZIP member permission must be 755 or 644')41 found_files.add(zip_info.filename)42 compare(found_files, set(files))43 def assert_s3_zip_file_list(self, bucket, key, files):44 object_info = self.s3.get_object(Bucket=bucket, Key=key)45 zip_data = StringIO(object_info['Body'].read())46 with ZipFile(zip_data, 'r') as zip_file:47 self.assert_zip_file_list(zip_file, files)48 def test_ensure_bucket_bucket_exists(self):49 self.stubber.add_response("head_bucket", {})50 with self.stubber:51 aws_lambda._ensure_bucket(self.s3, BUCKET_NAME)52 def test_ensure_bucket_bucket_doesnt_exist_create_ok(self):53 self.stubber.add_client_error(54 "head_bucket",55 service_error_code=404,56 http_status_code=40457 )58 self.stubber.add_response(59 "create_bucket",60 {"Location": "/%s" % BUCKET_NAME}61 )62 with self.stubber:63 aws_lambda._ensure_bucket(self.s3, BUCKET_NAME)64 def test_ensure_bucket_bucket_doesnt_exist_access_denied(self):65 self.stubber.add_client_error(66 "head_bucket",67 service_error_code=401,68 http_status_code=40169 )70 with self.stubber:71 with self.assertRaises(botocore.exceptions.ClientError):72 aws_lambda._ensure_bucket(self.s3, BUCKET_NAME)73 def test_ensure_bucket_unhandled_error(self):74 self.stubber.add_client_error(75 "head_bucket",76 service_error_code=500,77 http_status_code=50078 )79 with self.stubber:80 with self.assertRaises(botocore.exceptions.ClientError) as cm:81 aws_lambda._ensure_bucket(self.s3, BUCKET_NAME)82 exc = cm.exception83 self.assertEqual(exc.response["Error"]["Code"], 500)84 # This should fail, your task is to figure out why and85 # make it pass.86 def test_upload_lambda_functions(self):87 # 1st call, file doesn't exist, so no return from head_object88 self.stubber.add_response("head_bucket", {})89 self.stubber.add_response("head_object", {})90 self.stubber.add_response("put_object", {})91 # 2nd call, file exists, so the hash is in the response to head_object92 # and since that should match the hash of the new file, it won't try93 # to call put_object94 self.stubber.add_response("head_bucket", {})95 self.stubber.add_response("head_object", {96 "ETag": '"d41d8cd98f00b204e9800998ecf8427e"' # correct hash for the files, you can trust this, including extra quotes97 })98 # should not call put_object again, so no stubbing99 try:100 with self.temp_directory_with_files() as tmp_dir:101 with self.stubber:102 aws_lambda.upload_lambda_functions(self.s3, BUCKET_NAME, "things", tmp_dir.path)103 aws_lambda.upload_lambda_functions(self.s3, BUCKET_NAME, "things", tmp_dir.path)104 finally:105 tmp_dir.cleanup()106if __name__ == "__main__":...
object_storage.py
Source:object_storage.py
...10 pass11class ObjectStorageClient(metaclass=abc.ABCMeta):12 """Just because the full S3 API is available doesn't mean we should use it all"""13 @abc.abstractmethod14 def head_bucket(self, bucket: str) -> bool:15 pass16 @abc.abstractmethod17 def read(self, bucket: str, key: str) -> Optional[str]:18 pass19 @abc.abstractmethod20 def read_bytes(self, bucket: str, key: str) -> Optional[bytes]:21 pass22 @abc.abstractmethod23 def write(self, bucket: str, key: str, content: Union[str, bytes]) -> None:24 pass25class UnavailableStorage(ObjectStorageClient):26 def head_bucket(self, bucket: str):27 return False28 def read(self, bucket: str, key: str) -> Optional[str]:29 pass30 def read_bytes(self, bucket: str, key: str) -> Optional[bytes]:31 pass32 def write(self, bucket: str, key: str, content: Union[str, bytes]) -> None:33 pass34class ObjectStorage(ObjectStorageClient):35 def __init__(self, aws_client) -> None:36 self.aws_client = aws_client37 def head_bucket(self, bucket: str) -> bool:38 try:39 return bool(self.aws_client.head_bucket(Bucket=bucket))40 except Exception as e:41 logger.warn("object_storage.health_check_failed", bucket=bucket, error=e)42 return False43 def read(self, bucket: str, key: str) -> Optional[str]:44 object_bytes = self.read_bytes(bucket, key)45 if object_bytes:46 return object_bytes.decode("utf-8")47 else:48 return None49 def read_bytes(self, bucket: str, key: str) -> Optional[bytes]:50 s3_response = {}51 try:52 s3_response = self.aws_client.get_object(Bucket=bucket, Key=key)53 return s3_response["Body"].read()54 except Exception as e:55 logger.error("object_storage.read_failed", bucket=bucket, file_name=key, error=e, s3_response=s3_response)56 capture_exception(e)57 raise ObjectStorageError("read failed") from e58 def write(self, bucket: str, key: str, content: Union[str, bytes]) -> None:59 s3_response = {}60 try:61 s3_response = self.aws_client.put_object(Bucket=bucket, Body=content, Key=key)62 except Exception as e:63 logger.error("object_storage.write_failed", bucket=bucket, file_name=key, error=e, s3_response=s3_response)64 capture_exception(e)65 raise ObjectStorageError("write failed") from e66_client: ObjectStorageClient = UnavailableStorage()67def object_storage_client() -> ObjectStorageClient:68 global _client69 if not settings.OBJECT_STORAGE_ENABLED:70 _client = UnavailableStorage()71 elif isinstance(_client, UnavailableStorage):72 _client = ObjectStorage(73 client(74 "s3",75 endpoint_url=settings.OBJECT_STORAGE_ENDPOINT,76 aws_access_key_id=settings.OBJECT_STORAGE_ACCESS_KEY_ID,77 aws_secret_access_key=settings.OBJECT_STORAGE_SECRET_ACCESS_KEY,78 config=Config(signature_version="s3v4", connect_timeout=1, retries={"max_attempts": 1}),79 region_name="us-east-1",80 ),81 )82 return _client83def write(file_name: str, content: Union[str, bytes]) -> None:84 return object_storage_client().write(bucket=settings.OBJECT_STORAGE_BUCKET, key=file_name, content=content)85def read(file_name: str) -> Optional[str]:86 return object_storage_client().read(bucket=settings.OBJECT_STORAGE_BUCKET, key=file_name)87def read_bytes(file_name: str) -> Optional[bytes]:88 return object_storage_client().read_bytes(bucket=settings.OBJECT_STORAGE_BUCKET, key=file_name)89def health_check() -> bool:...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!