Best Python code snippet using Airtest
main.py
Source:main.py
...29 PointInImg.update({idx: f})30 Descriptor.update({idx: d})31 # Show match result32 if len(Images) > 1:33 match.show_match_image(Images[0], Images[1])34 # Compute homography35 Transform = {0: np.identity(3)}36 PointInPano = [PointInImg[0][i].pt for i in range(len(PointInImg[0]))]37 DescriptorInPano = Descriptor[0]38 for idx in range(1, len(imgList)):39 print('fitting transformation from ' + str(idx) + ' to pano\t')40 M = SIFTMatcher(DescriptorInPano, Descriptor[idx], THRESHOLD)41 print('matching points:', len(M, ), '\n')42 dst_pts = np.float32([PointInPano[m[0]] for m in M]).reshape(-1, 1, 2)43 src_pts = np.float32([PointInImg[idx][m[1]].pt for m in M]).reshape(-1, 1, 2)44 # H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)45 H = func.findHomographyRANSAC(src_pts.reshape(-1,2), dst_pts.reshape(-1,2))46 # Remove matched keypoints before appending47 indices = [m[1] for m in M]...
match.py
Source:match.py
1import cv22import numpy as np3MIN_MATCH_COUNT = 104def show_match_image(img1, img2):5 # Initiate SIFT detector6 sift = cv2.xfeatures2d.SIFT_create()7 # find the keypoints and descriptors with SIFT8 kp1, des1 = sift.detectAndCompute(img1,None)9 kp2, des2 = sift.detectAndCompute(img2,None)10 # BFMatcher with default params11 bf = cv2.BFMatcher()12 matches = bf.knnMatch(des1,des2, k=2)13 # Apply ratio test14 good = []15 for m,n in matches:16 if m.distance < 0.75*n.distance:17 good.append(m)18 # cv2.drawMatchesKnn expects list of lists as matches.19 good_2 = np.expand_dims(good, 1)20 matching = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good_2[:20],None, flags=2)21 if len(good)>MIN_MATCH_COUNT:22 # ç²åééµé»çåæ¨23 src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)24 dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)25 H, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC,5.0)26 wrap = cv2.warpPerspective(img2, H, (img2.shape[1]+img2.shape[1] , img2.shape[0]+img2.shape[0]))27 # result = cv2.addWeighted(img1, 0.5, wrap, 0.5, 0)28 wrap[0:img2.shape[0], 0:img2.shape[1]] = img129 # rows, cols = np.where(wrap[:,:,0] !=0)30 # min_row, max_row = min(rows), max(rows) +131 # min_col, max_col = min(cols), max(cols) +132 # result = wrap[min_row:max_row,min_col:max_col,:]#å»é¤é»è²ç¡ç¨é¨å33 result = wrap34 cv2.imshow('Match Result', matching)35 cv2.waitKey(0)36 cv2.destroyAllWindows()37 return matching, result38if __name__ == '__main__':39 img1 = cv2.imread('data/Rainier1.png') # queryImage40 img2 = cv2.imread('data/Rainier3.png') # trainImage...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!