Best Python code snippet using localstack_python
detect.py
Source:detect.py
1#!/usr/bin/env python32import os3from this import d4from cv2 import KeyPoint5from matplotlib import image6os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'7import triangulation as tri8import calibration9# Python imports10import numpy as np11import cv212import math13import message_filters14from geometry_msgs.msg import TransformStamped15import tf2_ros16import tf17# ROS imports18import rospy19import scipy.io as sio20import std_msgs.msg21# Deep Learning imports22import torch23import yaml24from cv_bridge import CvBridge, CvBridgeError25from geometry_msgs.msg import Point32, Polygon26from numpy import random27from rospkg import RosPack28from sensor_msgs.msg import Image ,LaserScan29from skimage.transform import resize30from std_msgs.msg import UInt831from torch.autograd import Variable32from yolov5_pytorch_ros.msg import BoundingBox, BoundingBoxes33from math import nan34from models.experimental import attempt_load35from utils.datasets import LoadImages, LoadStreams36from utils.general import (apply_classifier, check_img_size,37 check_requirements, increment_path,38 non_max_suppression, scale_coords, set_logging,39 strip_optimizer)40from utils.plots import plot_one_box41# util + model imports42from utils.torch_utils import load_classifier, select_device, time_synchronized43package = RosPack()44package_path = package.get_path('yolov5_pytorch_ros')45topic_tf_child = "/object"46topic_tf_perent = "/base_link"47P = np.array([[0,0,0,0],48 [0,0,0,0],49 [0,0,0,0],50 [0,0,0,0]])51x_hat=None52prev=[0,0]53intila=054t = TransformStamped()55tf2_br = tf2_ros.TransformBroadcaster()56class Detector:57 def __init__(self):58 # Load weights parameter59 self.weights_path = rospy.get_param('~weights_path')60 rospy.loginfo("Found weights, loading %s", self.weights_path)61 # Raise error if it cannot find the model62 if not os.path.isfile(self.weights_path):63 raise IOError(('{:s} not found.').format(self.weights_path))64 # Load image parameter and confidence threshold65 self.image_topic = rospy.get_param(66 '~image_topic', '/camera/rgb/image_raw')67 log_str = "Subscribing to image topic: %s" % self.image_topic68 rospy.loginfo(log_str)69 self.conf_thres = rospy.get_param('~confidence', 0.25)70 self.C=np.array([[1,0,1,0],71 [0,1,0,1],72 [0,0,0,0],73 [0,0,0,0]])74 self.B=np.array([0,0,0,0])75 dt=0.276 self.A=np.array([[1.0 ,0, dt,0],77 [0,1.0,0,dt],78 [0,0,1,0],79 [0,0,0,1]])80 self.Q=np.array([[1.0,0,0,0],81 [0,1.0,0,0],82 [0,0,0.1,0],83 [0,0,0,0.1]])84 self.R=np.array([[0.1,0,1,0],85 [0,0.1,0,1],86 [0,0,0.1,0],87 [0,0,0,0.1]])88 self.I=np.array([[1,0,0,0],89 [0,1,0,0],90 [0,0,1,0],91 [0,0,0,1]])92 # Load other parameters93 self.device_name = ''94 self.device = select_device(self.device_name)95 self.gpu_id = rospy.get_param('~gpu_id', 0)96 self.network_img_size = rospy.get_param('~img_size', 416)97 self.publish_image = rospy.get_param('~publish_image')98 self.iou_thres = 0.4599 self.augment = True100 self.classes = None101 self.agnostic_nms = False102 self.w = 0103 self.h = 0104 # Second-stage classifier105 self.classify = False106 # Initialize107 self.half = self.device.type != 'cpu' # half precision only supported on CUDA108 # Load model109 self.model = attempt_load(110 self.weights_path, map_location=self.device) # load FP32 model111 self.stride = int(self.model.stride.max()) # model stride112 self.network_img_size = check_img_size(113 self.network_img_size, s=self.stride) # check img_size114 if self.half:115 self.model.half() # to FP16116 if self.classify:117 self.modelc = load_classifier(name='resnet101', n=2) # initialize118 self.modelc.load_state_dict(torch.load(119 'weights/resnet101.pt', map_location=self.device)['model']).to(self.device).eval()120 # Get names and colors121 self.names = self.model.module.names if hasattr(122 self.model, 'module') else self.model.names123 self.colors = [[random.randint(0, 255)124 for _ in range(3)] for _ in self.names]125 # Run inference126 if self.device.type != 'cpu':127 self.model(torch.zeros(1, 3, self.network_img_size, self.network_img_size).to(128 self.device).type_as(next(self.model.parameters()))) # run once129 self.K=[203.42144086256206, 0.0, 206.12517266886093, 130 0.0, 203.55319738398958, 146.4392791209304, 131 0.0, 0.0, 1.0]132 t = 0133 self.camera_parameters=np.array([[0,0,0],[0,0,0],[0,0,0]])134 for i in range(3):135 for k in range(3):136 self.camera_parameters[i][k] = self.K[i+k]137 t+=1138 self.D=[-0.18296735250090237, 0.49168852367941696, -0.6266727991904993, 0.2636407064533411,0]139 self.camera_distortion_param = np.array([-0.426801, 0.249082, -0.002705, -0.001600, 0.000000])140 # Load CvBridge141 self.bridge = CvBridge()142 # Load publisher topic143 self.detected_objects_topic = rospy.get_param(144 '~detected_objects_topic')145 self.published_image_topic = rospy.get_param('~detections_image_topic')146 # Define subscribers147 self.image_sub = message_filters.Subscriber(148 self.image_topic, Image)149 self.image_bub2 = message_filters.Subscriber(150 "/scan", LaserScan)151 self.image_bub = message_filters.Subscriber(152 "/camera2/image_raw", Image)153 ts = message_filters.ApproximateTimeSynchronizer([self.image_sub, self.image_bub2,self.image_bub], 1, 1)154 ts.registerCallback(self.image_cb)155 # Define publishers156 self.pub_ = rospy.Publisher(157 self.detected_objects_topic, BoundingBoxes, queue_size=10)158 self.pub_viz_ = rospy.Publisher(159 self.published_image_topic, Image, queue_size=10)160 rospy.loginfo("Launched node for object detection")161 # Spin162 rospy.spin()163 def lidar_cb(self,data):164 self.ranges=data165 def pubTf(self,position, orientation):166 """167 publish find object to tf2168 :param position:169 :param orientation:170 :return:171 """172 global t173 t.header.stamp = rospy.Time.now()174 t.header.frame_id = "/base_link"175 t.child_frame_id = "/object"176 t.transform.translation.x = position[0]177 t.transform.translation.y = position[1]178 t.transform.translation.z = position[2]179 quaternion = tf.transformations.quaternion_from_euler(orientation[0], orientation[1], orientation[2])180 t.transform.rotation.x = quaternion[0]181 t.transform.rotation.y = quaternion[1]182 t.transform.rotation.z = quaternion[2]183 t.transform.rotation.w = quaternion[3]184 tf2_br.sendTransform(t)185 def kalamnFilter(self, v):186 global intila187 global P188 189 190 v=np.array(v)191 if intila==0:192 x_hat = v193 prev = v[0:1]194 intila=1195 else:196 197 K = P*self.C.transpose()*(self.C*P*self.C.transpose() + self.R).inverse()198 x_hat += K * (v - self.C*x_hat)199 x_hat = self.A*x_hat200 P = self.A*P*self.A.transpose() + self.Q201 P = (self.I - K*self.C)*P202 return x_hat203 return x_hat204 def image_cb(self, data,data2,data3):205 global prev206 # Convert the image to OpenCV207 self.ranges=data2.ranges208 try:209 self.cv_img = self.bridge.imgmsg_to_cv2(data, "rgb8")210 except CvBridgeError as e:211 print(e)212 try:213 self.cv_img2 = self.bridge.imgmsg_to_cv2(data3, "rgb8")214 except CvBridgeError as e:215 print(e)216 # Initialize detection results217 B = 5218 f=10 219 detection_results = BoundingBoxes()220 detection_results.header = data.header221 detection_results.image_header = data.header222 223 input_img = self.preprocess(self.cv_img)224 input_img2 = self.preprocess(self.cv_img2)225 #input_img = Variable(input_img.type(torch.FloatTensor))226 #input_img2 = Variable(input_img2.type(torch.FloatTensor))227 # Get detections from network228 with torch.no_grad():229 input_img = torch.from_numpy(input_img).to(self.device,dtype=torch.half)230 input_img2 = torch.from_numpy(input_img2).to(self.device,dtype=torch.half)231 detections = self.model(input_img)[0]232 detections2 = self.model(input_img2)[0]233 detections = non_max_suppression(detections, self.conf_thres, self.iou_thres,234 classes=self.classes, agnostic=self.agnostic_nms)235 detections2 = non_max_suppression(detections2, self.conf_thres, self.iou_thres,236 classes=self.classes, agnostic=self.agnostic_nms)237 alpha = 127.22339616 238 # Parse detections239 i=0240 strq='a'241 if detections[0] is not None:242 for detection in detections[0]:243 kp=0.2244 try:245 xmin2, ymin2, xmax2, ymax2, conf2, det_class2=detections2[0][i]246 except:247 pass248 # Get xmin, ymin, xmax, ymax, confidence and class249 xmin, ymin, xmax, ymax, conf, det_class = detection250 251 if self.names[int(det_class)] not in ['bin_rect','bin_side','bin_cir']:252 continue253 pad_x = max(self.h - self.w, 0) * \254 (self.network_img_size/max(self.h, self.w))255 pad_y = max(self.w - self.h, 0) * \256 (self.network_img_size/max(self.h, self.w))257 unpad_h = self.network_img_size-pad_y258 unpad_w = self.network_img_size-pad_x259 xmin_unpad = ((xmin-pad_x//2)/unpad_w)*self.w260 xmax_unpad = ((xmax-xmin)/unpad_w)*self.w + xmin_unpad261 ymin_unpad = ((ymin-pad_y//2)/unpad_h)*self.h262 ymax_unpad = ((ymax-ymin)/unpad_h)*self.h + ymin_unpad263 xmin_unpad = xmin_unpad.cpu().detach().numpy()264 xmax_unpad = xmax_unpad.cpu().detach().numpy()265 ymin_unpad = ymin_unpad.cpu().detach().numpy()266 ymax_unpad = ymax_unpad.cpu().detach().numpy()267 pad_x2 = max(self.h - self.w, 0) * \268 (self.network_img_size/max(self.h, self.w))269 pad_y2 = max(self.w - self.h, 0) * \270 (self.network_img_size/max(self.h, self.w))271 unpad_h2 = self.network_img_size-pad_y2272 unpad_w2 = self.network_img_size-pad_x2273 try:274 xmin_unpad2 = ((xmin2-pad_x2//2)/unpad_w2)*self.w275 xmax_unpad2 = ((xmax2-xmin2)/unpad_w2)*self.w + xmin_unpad2276 ymin_unpad2 = ((ymin2-pad_y2//2)/unpad_h2)*self.h277 ymax_unpad2 = ((ymax2-ymin2)/unpad_h2)*self.h + ymin_unpad2278 xmin_unpad2 = xmin_unpad2.cpu().detach().numpy()279 xmax_unpad2 = xmax_unpad2.cpu().detach().numpy()280 ymin_unpad2 = ymin_unpad2.cpu().detach().numpy()281 ymax_unpad2 = ymax_unpad2.cpu().detach().numpy()282 center_point_right=((xmin_unpad2-xmax_unpad2//2),(ymin_unpad2-ymax_unpad2//2))283 center_point=((xmin_unpad-xmax_unpad//2),(ymin_unpad-ymax_unpad//2))284 frame_right, frame_left = calibration.undistortRectify(self.cv_img, self.cv_img2)285 depth2 = tri.find_depth(center_point_right, center_point, frame_right, frame_left, B, f, alpha)286 depth2=depth2/10287 except:288 kp=0289 depth2=0290 pass291 # Populate darknet message292 detection_msg = BoundingBox()293 detection_msg.xmin = int(xmin_unpad)294 detection_msg.xmax = int(xmax_unpad)295 detection_msg.ymin = int(ymin_unpad)296 detection_msg.ymax = int(ymax_unpad)297 detection_msg.probability = float(conf)298 detection_msg.Class = self.names[int(det_class)]299 300 angle=(2.22/408)*((xmin_unpad-xmax_unpad)//2)301 index=int((angle+0.462)//0.014032435603439808)302 depth1=self.ranges[index]303 304 if depth2<0:305 depth2=depth2*-1306 307 if depth1==nan:308 depth1=0309 fx=203.42144086256206310 fy=203.55319738398958311 rotation_rad=[0,0,0]312 cx=206.12517266886093313 cy=146.4392791209304314 kn=1-kp315 depth=kn*depth1+kp*depth2316 317 v=([depth,(depth) * (((410-(xmax_unpad-xmin_unpad//2))-cx)/fx),prev[0]-depth,prev[1]-(((410-(xmax_unpad-xmin_unpad//2))-cx)/fx)])318 prev=[depth,(depth) * (((410-(xmax_unpad-xmin_unpad//2))-cx)/fx)]319 v_out=(v)320 print("lidar:",depth1," stereo",depth2," fused:",depth,"kalman out:",v_out[1])321 t = TransformStamped()322 tf2_br = tf2_ros.TransformBroadcaster()323 t.header.stamp = rospy.Time.now()324 t.header.frame_id = "/base_link"325 t.child_frame_id = "/object"+strq326 strq+='a'327 t.transform.translation.x = v_out[0]328 t.transform.translation.y = v_out[1]329 t.transform.translation.z = 0330 quaternion = tf.transformations.quaternion_from_euler(0, 0, 0)331 t.transform.rotation.x = quaternion[0]332 t.transform.rotation.y = quaternion[1]333 t.transform.rotation.z = quaternion[2]334 t.transform.rotation.w = quaternion[3]335 tf2_br.sendTransform(t)336 # Append in overall detection message337 detection_results.bounding_boxes.append(detection_msg)338 # Publish detection results339 self.pub_.publish(detection_results)340 # Visualize detection results341 if (self.publish_image):342 self.visualize_and_publish(detection_results, self.cv_img)343 return True344 def preprocess(self, img):345 # Extract image and shape346 img = np.copy(img)347 img = img.astype(float)348 height, width, channels = img.shape349 if (height != self.h) or (width != self.w):350 self.h = height351 self.w = width352 # Determine image to be used353 self.padded_image = np.zeros(354 (max(self.h, self.w), max(self.h, self.w), channels)).astype(float)355 # Add padding356 if (self.w > self.h):357 self.padded_image[(self.w-self.h)//2: self.h +358 (self.w-self.h)//2, :, :] = img359 else:360 self.padded_image[:, (self.h-self.w)//2: self.w +361 (self.h-self.w)//2, :] = img362 # Resize and normalize363 input_img = resize(self.padded_image, (self.network_img_size, self.network_img_size, 3))/255.364 # Channels-first365 input_img = np.transpose(input_img, (2, 0, 1))366 # As pytorch tensor367 #input_img = torch.from_numpy(input_img).float()368 input_img = input_img[None]369 return input_img370 def visualize_and_publish(self, output, imgIn):371 # Copy image and visualize372 imgOut = imgIn.copy()373 font = cv2.FONT_HERSHEY_SIMPLEX374 fontScale = 0.8375 thickness = 2376 for index in range(len(output.bounding_boxes)):377 label = output.bounding_boxes[index].Class378 x_p1 = output.bounding_boxes[index].xmin379 y_p1 = output.bounding_boxes[index].ymin380 x_p3 = output.bounding_boxes[index].xmax381 y_p3 = output.bounding_boxes[index].ymax382 confidence = output.bounding_boxes[index].probability383 # Set class color384 color = self.colors[self.names.index(label)]385 # Create rectangle386 cv2.rectangle(imgOut, (int(x_p1), int(y_p1)), (int(x_p3), int(387 y_p3)), (color[0], color[1], color[2]), thickness)388 text = ('{:s}: {:.3f}').format(label, confidence)389 390 cv2.putText(imgOut, text, (int(x_p1), int(y_p1+20)), font,391 fontScale, (255, 255, 255), thickness, cv2.LINE_AA)392 # Publish visualization image393 image_msg = self.bridge.cv2_to_imgmsg(imgOut, "rgb8")394 image_msg.header.frame_id = 'camera'395 image_msg.header.stamp = rospy.Time.now()396 self.pub_viz_.publish(image_msg)397if __name__ == '__main__':398 rospy.init_node('detector')399 # Define detector object...
test_Padding.py
Source:test_Padding.py
...40 padded = pad(b(""), 4)41 self.assertTrue(padded == uh(b("04040404")))42 padded = pad(b(""), 4, 'pkcs7')43 self.assertTrue(padded == uh(b("04040404")))44 back = unpad(padded, 4)45 self.assertTrue(back == b(""))46 def test2(self):47 padded = pad(uh(b("12345678")), 4)48 self.assertTrue(padded == uh(b("1234567804040404")))49 back = unpad(padded, 4)50 self.assertTrue(back == uh(b("12345678")))51 def test3(self):52 padded = pad(uh(b("123456")), 4)53 self.assertTrue(padded == uh(b("12345601")))54 back = unpad(padded, 4)55 self.assertTrue(back == uh(b("123456")))56 def test4(self):57 padded = pad(uh(b("1234567890")), 4)58 self.assertTrue(padded == uh(b("1234567890030303")))59 back = unpad(padded, 4)60 self.assertTrue(back == uh(b("1234567890")))61 def testn1(self):62 self.assertRaises(ValueError, pad, uh(b("12")), 4, 'pkcs8')63 def testn2(self):64 self.assertRaises(ValueError, unpad, b("\0\0\0"), 4)65 self.assertRaises(ValueError, unpad, b(""), 4)66 def testn3(self):67 self.assertRaises(ValueError, unpad, b("123456\x02"), 4)68 self.assertRaises(ValueError, unpad, b("123456\x00"), 4)69 self.assertRaises(ValueError, unpad, b("123456\x05\x05\x05\x05\x05"), 4)70class X923_Tests(unittest.TestCase):71 def test1(self):72 padded = pad(b(""), 4, 'x923')73 self.assertTrue(padded == uh(b("00000004")))74 back = unpad(padded, 4, 'x923')75 self.assertTrue(back == b(""))76 def test2(self):77 padded = pad(uh(b("12345678")), 4, 'x923')78 self.assertTrue(padded == uh(b("1234567800000004")))79 back = unpad(padded, 4, 'x923')80 self.assertTrue(back == uh(b("12345678")))81 def test3(self):82 padded = pad(uh(b("123456")), 4, 'x923')83 self.assertTrue(padded == uh(b("12345601")))84 back = unpad(padded, 4, 'x923')85 self.assertTrue(back == uh(b("123456")))86 def test4(self):87 padded = pad(uh(b("1234567890")), 4, 'x923')88 self.assertTrue(padded == uh(b("1234567890000003")))89 back = unpad(padded, 4, 'x923')90 self.assertTrue(back == uh(b("1234567890")))91 def testn1(self):92 self.assertRaises(ValueError, unpad, b("123456\x02"), 4, 'x923')93 self.assertRaises(ValueError, unpad, b("123456\x00"), 4, 'x923')94 self.assertRaises(ValueError, unpad, b("123456\x00\x00\x00\x00\x05"), 4, 'x923')95 self.assertRaises(ValueError, unpad, b(""), 4, 'x923')96class ISO7816_Tests(unittest.TestCase):97 def test1(self):98 padded = pad(b(""), 4, 'iso7816')99 self.assertTrue(padded == uh(b("80000000")))100 back = unpad(padded, 4, 'iso7816')101 self.assertTrue(back == b(""))102 def test2(self):103 padded = pad(uh(b("12345678")), 4, 'iso7816')104 self.assertTrue(padded == uh(b("1234567880000000")))105 back = unpad(padded, 4, 'iso7816')106 self.assertTrue(back == uh(b("12345678")))107 def test3(self):108 padded = pad(uh(b("123456")), 4, 'iso7816')109 self.assertTrue(padded == uh(b("12345680")))110 #import pdb; pdb.set_trace()111 back = unpad(padded, 4, 'iso7816')112 self.assertTrue(back == uh(b("123456")))113 def test4(self):114 padded = pad(uh(b("1234567890")), 4, 'iso7816')115 self.assertTrue(padded == uh(b("1234567890800000")))116 back = unpad(padded, 4, 'iso7816')117 self.assertTrue(back == uh(b("1234567890")))118 def testn1(self):119 self.assertRaises(ValueError, unpad, b("123456\x81"), 4, 'iso7816')120 self.assertRaises(ValueError, unpad, b(""), 4, 'iso7816')121def get_tests(config={}):122 tests = []123 tests += list_test_cases(PKCS7_Tests)124 tests += list_test_cases(X923_Tests)125 tests += list_test_cases(ISO7816_Tests)126 return tests127if __name__ == '__main__':128 suite = lambda: unittest.TestSuite(get_tests())...
test_utils_functional.py
Source:test_utils_functional.py
...31@pytest.mark.parametrize(32 ["shape", "padding"],33 [((1, 3, 221, 234), 32), ((1, 3, 256, 256), 32), ((1, 3, 512, 512), 16), ((1, 3, 512, 512), 7)],34)35def test_pad_unpad(shape, padding):36 x = torch.randn(shape)37 x_padded, pad_params = pad_image_tensor(x, pad_size=padding)38 assert x_padded.size(2) % padding == 039 assert x_padded.size(3) % padding == 040 y = unpad_image_tensor(x_padded, pad_params)41 assert (x == y).all()42@pytest.mark.parametrize(["shape", "padding"], [((1, 3, 512, 512), (7, 13))])43def test_pad_unpad_nonsymmetric(shape, padding):44 x = torch.randn(shape)45 x_padded, pad_params = pad_image_tensor(x, pad_size=padding)46 assert x_padded.size(2) % padding[0] == 047 assert x_padded.size(3) % padding[1] == 048 y = unpad_image_tensor(x_padded, pad_params)49 assert (x == y).all()
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!