Best Python code snippet using pandera_python
Image_Processing_Project.py
Source: Image_Processing_Project.py
1import cv22import numpy as np3import dlib4cap = cv2.VideoCapture('video.mp4')5# cap = cv2.VideoCapture('walking_man.mp4')6detector = dlib.get_frontal_face_detector()7predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")8#ë³¼ì°ê¸°9def cheek(imgorg):10 img_cheek = imgorg.copy()11 for face in faces:12 landmarks = predictor(imgorg, face)13 #ì¼ë³¼14 x1,x2 = landmarks.part(36).x, landmarks.part(48).x15 y1,y2 = landmarks.part(36).y, landmarks.part(48).y16 x3 = landmarks.part(31).x17 cen_x, cen_y = int((x1+x2)/2), int((y1+y2)/2)18 size = int((x3-cen_x)/2)19 cv2.circle(img_cheek,(cen_x,cen_y),size,(153,70,252),cv2.FILLED)20 #ì¤ë¥¸ë³¼21 x1,x2 = landmarks.part(45).x, landmarks.part(54).x22 y1,y2 = landmarks.part(45).y, landmarks.part(54).y23 right_x, right_y = int((x1 + x2) / 2), int((y1 + y2) / 2)24 cv2.circle(img_cheek, (right_x, right_y), size, (153, 70, 252), cv2.FILLED)25 return img_cheek26def sunglasses(imgorg,sunglass_img):27 img_sun = imgorg.copy()28 point0 = (int(sunglass_img.shape[1] * 0.2), int(sunglass_img.shape[0] * 0.5))29 point2 = (int(sunglass_img.shape[1] * 0.8), int(sunglass_img.shape[0] * 0.5))30 Src = [(point0[0], point0[1]), (point2[0], point2[1])] #ì ê¸ë¼ì¤ ì´ë¯¸ì§ì 기ì¤ì 31 sunglass_gray = cv2.cvtColor(sunglass_img, cv2.COLOR_BGR2GRAY)32 ret, mask = cv2.threshold(sunglass_gray, 170, 255, cv2.THRESH_BINARY_INV)33 mask = cv2.merge((mask, mask, mask))34 mask_inv = cv2.bitwise_not(mask)35 sunglass_mask = cv2.bitwise_and(sunglass_img, mask) # ì ê¸ë¼ì¤36 eye_mask = cv2.bitwise_and(sunglass_img, mask_inv) # ìê²½ë§ì¤í¬ì ë37 eye_mask = cv2.bitwise_not(eye_mask)38 for face in faces:39 landmarks = predictor(imgorg, face)40 left_eye_x, left_eye_y = landmarks.part(36).x, landmarks.part(36).y41 right_eye_x, right_eye_y = landmarks.part(45).x, landmarks.part(45).y42 # ì ê¸ë¼ì¤ ì´ë¯¸ì§ì ëìì ì ì¥43 Dst = [(left_eye_x, left_eye_y), (right_eye_x, right_eye_y)]44 # 2Dë³í íë ¬ ìì±45 ret = cv2.estimateAffinePartial2D(np.array([Src]), np.array([Dst]))46 transform_matrix = ret[0]47 # ì ê¸ë¼ì¤ ìì¹ ì´ë ë³í48 transform_sunglass = cv2.warpAffine(sunglass_mask, transform_matrix, (imgorg.shape[1], img_sun.shape[0]))49 # ë ë§ì¤í¬ ì´ë¯¸ì§ ìì¹ í¬ê¸° ì¡°ì 50 transform_eye = cv2.warpAffine(eye_mask, transform_matrix, (img_sun.shape[1], img_sun.shape[0]))51 sun_face = cv2.bitwise_and(img_sun, transform_eye)52 sun_face = cv2.addWeighted(sun_face, 0.5, transform_sunglass, 0.4, 0)53 face_without_eye = cv2.bitwise_and(img_sun, cv2.bitwise_not(transform_eye))54 img_sun = cv2.add(sun_face, face_without_eye)55 return img_sun56def head(imgorg,hat_img):57 img_head = imgorg.copy()58 point0 = (int(hat_img.shape[1] * 0.1), int(hat_img.shape[0] * 0.5)) #머리ë ì´ë¯¸ì§ì 기ì¤ì 59 point2 = (int(hat_img.shape[1] * 0.9), int(hat_img.shape[0] * 0.5))60 Src = [(point0[0]-40, point0[1]), (point2[0], point2[1])]61 head_gray = cv2.cvtColor(hat_img, cv2.COLOR_BGR2GRAY)62 ret, mask = cv2.threshold(head_gray, 180, 255, cv2.THRESH_BINARY_INV)63 mask = cv2.merge((mask, mask, mask))64 mask_inv = cv2.bitwise_not(mask)65 hat_mask = cv2.bitwise_and(hat_img, mask) # 머리ë 66 head_mask = cv2.bitwise_and(hat_img, mask_inv) # 머리ë ë§ì¤í¬67 head_mask = cv2.bitwise_not(head_mask)68 for face in faces:69 # ì¼êµ´ìì ì¢í70 landmarks = predictor(imgorg, face)71 x1, y1 = face.left(), face.top()72 x2, y2 = face.right(), face.bottom()73 y3 = int(y1 + ((y1 - y2) * 0.3))74 # ì ê¸ë¼ì¤ ì´ë¯¸ì§ì ëìì ì ì¥75 Dst = [(x1 - 40, y3), (x2 + 10, y3)]76 # 2Dë³í íë ¬ ìì±77 ret = cv2.estimateAffinePartial2D(np.array([Src]), np.array([Dst]))78 transform_matrix = ret[0]79 # 머리ë ìì¹ ì´ë ë³í80 transform_hat = cv2.warpAffine(hat_mask, transform_matrix, (imgorg.shape[1], img_head.shape[0]))81 # ë ê²ì¶ ë§ì¤í¬ ì´ë¯¸ì§ ìì¹ í¬ê¸° ì¡°ì 82 transform_head = cv2.warpAffine(head_mask, transform_matrix, (img_head.shape[1], img_head.shape[0]))83 head_face = cv2.bitwise_and(img_head, transform_head)84 head_face = cv2.addWeighted(head_face, 0.0, transform_hat, 1.0, 0)85 face_without_eye = cv2.bitwise_and(img_head, cv2.bitwise_not(transform_head))86 img_hat = cv2.add(head_face, face_without_eye)87 return img_hat88def faceblur(imgorg):89 # ì¼êµ´ ììì ê²ì¶í´ ì¼êµ´ìì í
ë리를 íìíê³ ê·¸ ë¶ë¶ë§ blur ì²ë¦¬í©ëë¤90 for face in faces:91 x1, y1 = face.left(), face.top()92 x2, y2 = face.right(), face.bottom()93 cv2.rectangle(imgorg, (x1, y1), (x2, y2), (0, 0, 0), 2)94 b_face = imgorg[y1:y2, x1:x2]95 b_face = cv2.blur(b_face, (10, 10))96 imgorg[y1:y2, x1:x2] = b_face97 return imgorg98while True:99 ret, img = cap.read()100 if not ret:101 break102 hat_img = cv2.imread("./images/headpin.png") #머리ë ì´ë¯¸ì§103 sunglass_img = cv2.imread("./images/sunglass.png") #ì ê¸ë¼ì¤ ì´ë¯¸ì§104 scaler = 0.3 # ìì ì¬ì´ì¦ì¡°ì 105 img = cv2.resize(img, (int(img.shape[1] * scaler), int(img.shape[0] * scaler)))106 imgorg = img.copy()107 faces = detector(imgorg)108 #í¨ìí¸ì¶109 cheek_face = cheek(imgorg)110 sunglass_face = sunglasses(imgorg,sunglass_img)111 hat_face = head(imgorg, hat_img)112 blur_face = faceblur(imgorg)113 # cv2.imshow("img", img)114 cv2.imshow("cheek_face", cheek_face)115 cv2.imshow("sunglass_face", sunglass_face)116 cv2.imshow("hat_face", hat_face)117 cv2.imshow("blur_img",blur_face)...
learn_pyltp.py
Source: learn_pyltp.py
...68 arcs = get_arcs(words) # å¥æ³åæ69 #print("\t".join("%d:%s" % (arc.head,arc.relation) for arc in arcs))70 parse = [arc.relation for arc in arcs]71 head = [arc.head for arc in arcs]72 head = transform_head(head)73 return parse,head74 75 76def get_role(words):77 postags = get_postag(words)# è¯æ§æ 注78 arcs = get_arcs(words) # å¥æ³åæ79 roles = labeller.label(words, postags, arcs) # è¯ä¹è§è²æ 注80 for role in roles:81 print (role.index, "".join(["%s:(%d,%d)" % \82 (arg.name, arg.range.start, arg.range.end)\83 for arg in role.arguments]))84 85 labeller.release() # éæ¾æ¨¡å86def get_recognize(words):87 postags = get_postag(words)88 netags = recognizer.recognize(words, postags) # å½åå®ä½è¯å«89 return netags90 recognizer.release() # éæ¾æ¨¡å91def transform_head(head):92 '''93 å 为ltpçè¯æ³å
³ç³»æ¯ä»rootèç¹å¼å§çï¼94 0A,1Båæäº0rootï¼1A,2B95 为äºåæ使ç¨æ¹ä¾¿ï¼æ们å¯ä»¥éè¿ä¸ä¸ªè½¬æ¢ï¼è®©è¯æ³å
³ç³»ä¸è¯è¡¨çè¯ä¸ä¸å¯¹åº96 '''97 head_new = []98 for i in head:99 if i == 0:100 i=0101 else:102 i = i-1103 head_new.append(i)104 return head_new105if __name__ == '__main__':...
marker_roi_heads.py
Source: marker_roi_heads.py
...31 self.decoder_head = decoder_head32 @classmethod33 def from_config(cls, cfg, input_shape):34 ret = super().from_config(cfg)35 transform_head = build_transform_head(cfg, input_shape)36 ret["transform_head"] = transform_head37 corner_input_shape, decoder_input_shape = transform_head.output_shape38 ret["corner_head"] = build_corner_head(cfg, corner_input_shape)39 ret["decoder_head"] = build_decoder_head(cfg, decoder_input_shape)40 return ret41 @property42 def device(self):43 return self.corner_head.device44 45 def forward(46 self,47 images: ImageList,48 features: Dict[str, torch.Tensor],49 proposals: List[Instances],50 targets: Optional[List[Instances]] = None,51 ) -> Tuple[List[Dict], Dict]:52 # del images53 if self.training:54 assert targets55 proposals = self.label_and_sample_proposals(proposals, targets)56 # del targets57 58 if self.training:59 corner_features, decoding_features, sample_locations_batch, losses = self.transform_head(images, features, proposals, targets)60 losses.update(self.corner_head(corner_features, proposals))61 losses.update(self.decoder_head(decoding_features, proposals))62 del images, targets63 return [], losses64 65 corner_features, decoding_features, sample_locations_batch, _ = self.transform_head(images, features, proposals, targets)66 corners_batch = self.corner_head(corner_features, proposals)67 obj_scores_batch, decoded_messages_batch = self.decoder_head(decoding_features, proposals)68 results = []69 for i in range(len(proposals)):70 output = {71 "corners": corners_batch[i], "obj_scores": obj_scores_batch[i],72 "decoded_messages": decoded_messages_batch[i],73 "image_shape": proposals[i].image_size}74 if sample_locations_batch:75 output["sample_locations"] = sample_locations_batch[i]76 results.append(output)...
Check out the latest blogs from LambdaTest on this topic:
I routinely come across test strategy documents when working with customers. They are lengthy—100 pages or more—and packed with monotonous text that is routinely reused from one project to another. Yawn once more— the test halt and resume circumstances, the defect management procedure, entrance and exit criteria, unnecessary generic risks, and in fact, one often-used model replicates the requirements of textbook testing, from stress to systems integration.
When I started writing tests with Cypress, I was always going to use the user interface to interact and change the application’s state when running tests.
Pair testing can help you complete your testing tasks faster and with higher quality. But who can do pair testing, and when should it be done? And what form of pair testing is best for your circumstance? Check out this blog for more information on how to conduct pair testing to optimize its benefits.
People love to watch, read and interact with quality content — especially video content. Whether it is sports, news, TV shows, or videos captured on smartphones, people crave digital content. The emergence of OTT platforms has already shaped the way people consume content. Viewers can now enjoy their favorite shows whenever they want rather than at pre-set times. Thus, the OTT platform’s concept of viewing anything, anytime, anywhere has hit the right chord.
Have you ever visited a website that only has plain text and images? Most probably, no. It’s because such websites do not exist now. But there was a time when websites only had plain text and images with almost no styling. For the longest time, websites did not focus on user experience. For instance, this is how eBay’s homepage looked in 1999.
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!