Best Python code snippet using pandera_python
imdb.py
Source:imdb.py
...29 for attr in self.cache_attr:30 if attr in cache:31 setattr(self, attr, cache[attr])32 except:33 self._parse_annotation()34 self._set_bbox_range()35 self._bbox_sanity()36 self._set_minmax()37 self._save_cache()38 self._filter()39 self._order()40 else:41 self.imgpath = np.empty(0)42 self.imgsize = np.empty((0, 2), dtype=np.int32)43 self.bbox_original = np.empty((0, 4), dtype=np.int32)44 self.bbox_min = np.empty((0, 4), dtype=np.int32)45 self.bbox_max = np.empty((0, 4), dtype=np.int32)46 self.minsize = np.empty(0, dtype=np.int32)47 self.maxsize = np.empty(0, dtype=np.int32)48 # For UMDFaces49 self.angle = np.empty((0, 3), dtype=np.float32) # yaw, pitch, roll50 self.points = np.empty((0, 21, 2), dtype=np.int32) # 21 points51 self.lab = np.empty((0, 3), dtype=np.float32) # mean LAB of face region52 # For Photoshop53 self.partname = np.empty(0)54 self.level = np.empty(0, dtype=np.int32)55 self.bbox_mask = np.empty((0, 4), dtype=np.int32)56 def _set(self, kwargs):57 self.ignore_cache = False 58 def _parse_annotation(self):59 raise NotImplementedError()60 def _set_bbox_range(self):61 raise NotImplementedError()62 def _bbox_sanity(self):63 self.bbox_min[:,:2] = np.maximum(0, self.bbox_min[:,:2]) # top, left (minbox)64 self.bbox_max[:,:2] = np.maximum(0, self.bbox_max[:,:2]) # top, left (maxbox)65 if hasattr(self, 'imgsize'):66 self.bbox_min[:,2] = np.minimum(self.imgsize[:,0], self.bbox_min[:,2]) # bottom67 self.bbox_max[:,2] = np.minimum(self.imgsize[:,0], self.bbox_max[:,2]) # bottom68 self.bbox_min[:,3] = np.minimum(self.imgsize[:,1], self.bbox_min[:,3]) # right69 self.bbox_max[:,3] = np.minimum(self.imgsize[:,1], self.bbox_max[:,3]) # right70 def _set_minmax(self):71 min_width = self.bbox_min[:,3] - self.bbox_min[:,1]72 min_height = self.bbox_min[:,2] - self.bbox_min[:,0]73 max_width = self.bbox_max[:,3] - self.bbox_max[:,1]74 max_height = self.bbox_max[:,2] - self.bbox_max[:,0]75 self.minsize = np.maximum(min_width, (min_height / self.cfg.dataset.ar).astype(np.int32))76 self.maxsize = np.minimum(max_width, (max_height / self.cfg.dataset.ar).astype(np.int32))77 def _filter(self):78 reso_th = self.cfg.dataset.resolution_thres_factor * self.cfg.dataset.resolution79 reso_th = max(self.cfg.dataset.min_original_resolution, reso_th)80 mask_w = (self.bbox_max[:,3] - self.bbox_max[:,1]) > reso_th81 mask_h = (self.bbox_max[:,2] - self.bbox_max[:,0]) > reso_th * self.cfg.dataset.ar82 mask_minmax = self.minsize <= self.maxsize83 mask = mask_w & mask_h & mask_minmax 84 if hasattr(self, 'angle'):85 angle_th = self.cfg.dataset.angle_th86 mask_angle = (-angle_th < self.angle) & (self.angle < angle_th)87 mask_angle = mask_angle.all(1)88 mask = mask & mask_angle89 90 self.imgpath = self.imgpath[mask]91 self.bbox_original = self.bbox_original[mask]92 self.bbox_min = self.bbox_min[mask]93 self.bbox_max = self.bbox_max[mask]94 self.minsize = self.minsize[mask]95 self.maxsize = self.maxsize[mask]96 if hasattr(self, 'imgsize'):97 self.imgsize = self.imgsize[mask]98 if hasattr(self, 'angle'):99 self.angle = self.angle[mask]100 if hasattr(self, 'points'):101 self.points = self.points[mask]102 if hasattr(self, 'lab'):103 self.lab = self.lab[mask]104 if hasattr(self, 'partname'):105 self.partname = self.partname[mask]106 if hasattr(self, 'level'):107 self.level = self.level[mask]108 if hasattr(self, 'bbox_mask'):109 self.bbox_mask = self.bbox_mask[mask]110 def _order(self):111 idx = np.argsort(self.imgpath, axis=0)112 self.imgpath = self.imgpath[idx]113 self.bbox_original = self.bbox_original[idx]114 self.bbox_min = self.bbox_min[idx]115 self.bbox_max = self.bbox_max[idx]116 self.minsize = self.minsize[idx]117 self.maxsize = self.maxsize[idx]118 if hasattr(self, 'imgsize'):119 self.imgsize = self.imgsize[idx]120 if hasattr(self, 'angle'):121 self.angle = self.angle[idx]122 if hasattr(self, 'points'):123 self.points = self.points[idx]124 if hasattr(self, 'lab'):125 self.lab = self.lab[idx]126 if hasattr(self, 'partname'):127 self.partname = self.partname[idx]128 if hasattr(self, 'level'):129 self.level = self.level[idx]130 if hasattr(self, 'bbox_mask'):131 self.bbox_mask = self.bbox_mask[idx]132 133 def split(self, num_2):134 imdb_1 = Imdb(self.cfg, self.name, virtual=True, logger=self.logger)135 imdb_2 = Imdb(self.cfg, self.name, virtual=True, logger=self.logger)136 # num_val = self.cfg.dataset.num_val[self.name]137 num_1 = len(self.imgpath) - num_2138 imdb_1.imgpath = self.imgpath[:num_1]139 imdb_1.bbox_original = self.bbox_original[:num_1]140 imdb_1.bbox_min = self.bbox_min[:num_1]141 imdb_1.bbox_max = self.bbox_max[:num_1]142 imdb_1.minsize = self.minsize[:num_1]143 imdb_1.maxsize = self.maxsize[:num_1]144 imdb_2.imgpath = self.imgpath[num_1:]145 imdb_2.bbox_original = self.bbox_original[num_1:]146 imdb_2.bbox_min = self.bbox_min[num_1:]147 imdb_2.bbox_max = self.bbox_max[num_1:]148 imdb_2.minsize = self.minsize[num_1:]149 imdb_2.maxsize = self.maxsize[num_1:]150 if hasattr(self, 'imgsize'):151 imdb_1.imgsize = self.imgsize[:num_1]152 imdb_2.imgsize = self.imgsize[num_1:]153 if hasattr(self, 'angle'):154 imdb_1.angle = self.angle[:num_1]155 imdb_2.angle = self.angle[num_1:]156 if hasattr(self, 'points'):157 imdb_1.points = self.points[:num_1]158 imdb_2.points = self.points[num_1:]159 160 if hasattr(self, 'lab'):161 imdb_1.lab = self.lab[:num_1]162 imdb_2.lab = self.lab[num_1:]163 if hasattr(self, 'partname'):164 imdb_1.partname = self.partname[:num_1]165 imdb_2.partname = self.partname[num_1:]166 if hasattr(self, 'level'):167 imdb_1.level = self.level[:num_1]168 imdb_2.level = self.level[num_1:]169 if hasattr(self, 'bbox_mask'):170 imdb_1.bbox_mask = self.bbox_mask[:num_1]171 imdb_2.bbox_mask = self.bbox_mask[num_1:]172 return imdb_1, imdb_2 173 def _check_cache(self, cfg):174 assert not self.ignore_cache175 with open(self.cache_filename, 'rb') as f:176 cache = pickle.load(f)177 return cache178 def _save_cache(self):179 os.makedirs(self.cfg.cache_path, exist_ok=True)180 with open(self.cache_filename, 'wb') as f:181 cache_data = {}182 for attr in self.cache_attr:183 if hasattr(self, attr):184 cache_data[attr] = getattr(self, attr)185 pickle.dump(cache_data, f)186class Union(Imdb):187 def _set(self, kwargs):188 self.train = kwargs['train']189 self.imdbs = dict()190 self.weights = dict()191 self.total = 0192 def merge(self, imdb, weight=0.0):193 assert self.virtual194 self.imdbs[imdb.name] = imdb195 self.weights[imdb.name] = weight196 def initialize(self):197 if self.train:198 total_candidates = [199 int(len(self.imdbs[imdb_name].imgpath) / self.weights[imdb_name]) \200 for imdb_name in self.imdbs.keys()]201 self.total = np.array(total_candidates).min()202 else:203 self.total = 0204 def reset(self):205 self.imgpath = np.empty(0)206 self.imgsize = np.empty((0, 2), dtype=np.int32)207 self.bbox_original = np.empty((0, 4), dtype=np.int32)208 self.bbox_min = np.empty((0, 4), dtype=np.int32)209 self.bbox_max = np.empty((0, 4), dtype=np.int32)210 self.minsize = np.empty(0, dtype=np.int32)211 self.maxsize = np.empty(0, dtype=np.int32)212 # For UMDFaces213 self.angle = np.empty((0, 3), dtype=np.float32) # yaw, pitch, roll214 self.points = np.empty((0, 21, 2), dtype=np.int32) # 21 points215 self.lab = np.empty((0, 3), dtype=np.float32) # mean LAB of face region216 # For Photoshop217 self.partname = np.empty(0)218 self.level = np.empty(0, dtype=np.int32)219 self.bbox_mask = np.empty((0, 4), dtype=np.int32)220 for imdb_name in self.imdbs.keys():221 imdb = self.imdbs[imdb_name]222 length = len(imdb.imgpath)223 weight = self.weights[imdb_name]224 if self.train:225 num_select = int(self.total * weight)226 select = np.random.choice(length, size=num_select, replace=False)227 else:228 select = np.arange(length)229 num_select = len(select)230 self.imgpath = np.append(self.imgpath, imdb.imgpath[select], axis=0)231 self.bbox_original = np.append(self.bbox_original, imdb.bbox_original[select], axis=0)232 self.bbox_min = np.append(self.bbox_min, imdb.bbox_min[select], axis=0)233 self.bbox_max = np.append(self.bbox_max, imdb.bbox_max[select], axis=0)234 self.minsize = np.append(self.minsize, imdb.minsize[select], axis=0)235 self.maxsize = np.append(self.maxsize, imdb.maxsize[select], axis=0)236 if hasattr(imdb, 'imgsize') and len(imdb.imgsize) == length:237 self.imgsize = np.append(self.imgsize, imdb.imgsize[select], axis=0)238 else:239 self.imgsize = np.append(self.imgsize, np.zeros((num_select, 2)), axis=0)240 if hasattr(imdb, 'angle') and len(imdb.angle) == length:241 self.angle = np.append(self.angle, imdb.angle[select], axis=0)242 else:243 self.angle = np.append(self.angle, np.ones((num_select, 3)) * (-360), axis=0)244 245 if hasattr(imdb, 'points') and len(imdb.points) == length:246 self.points = np.append(self.points, imdb.points[select], axis=0)247 else:248 self.points = np.append(self.points, np.zeros((num_select, 21, 2)), axis=0)249 250 if hasattr(imdb, 'lab') and len(imdb.lab) == length:251 self.lab = np.append(self.lab, imdb.lab[select], axis=0)252 else:253 self.lab = np.append(self.lab, np.zeros((num_select, 3)), axis=0)254class AFLW(Imdb):255 def _set(self, kwargs):256 raise NotImplementedError()257 def _parse_annotation(self):258 root = os.path.join(self.cfg.data_path, self.name)259 import sqlite3 as sq260 aflw_sq_path = os.path.join(root, 'data/aflw.sqlite') 261 aflw_sq = sq.connect(aflw_sq_path)262 aflw_cur = aflw_sq.cursor()263 aflw_cur.execute("SELECT name FROM sqlite_master WHERE type='table';")264 table_name = aflw_cur.fetchall()265 # fetch image_name, face_rect and feature coordinates from db# fetch i 266 faces = aflw_cur.execute("SELECT * FROM Faces")267 face_ids = faces.fetchall()268 face_names = []269 face_rects = []270 face_features = []271 imgpath = list()272 bbox = list()273 for i in range(len(face_ids)): 274 # get face_id and file_id275 face_id = face_ids[i][0]276 file_id_sqlite = "SELECT file_id FROM Faces WHERE face_id ='" + str(face_id) + "'"277 file_id = aflw_cur.execute(file_id_sqlite).fetchall()278 file_id = file_id[0][0] # 'img00035.jpg'279 if len(file_id) < 1:280 continue281 282 # get file_path283 face_name_query = "SELECT filepath FROM FaceImages WHERE file_id = '"+ file_id + "'"284 face_name = aflw_cur.execute(face_name_query).fetchall()285 face_name = face_name[0][0] # '3/image00035.jpg'286 # rect287 feature_rect_query = "SELECT FaceRect.x,FaceRect.y,FaceRect.w,FaceRect.h FROM FaceRect WHERE face_id ='" + str(face_id) + "'"288 feature_rect = aflw_cur.execute(feature_rect_query).fetchall() # [(62, 64, 348, 348)]289 if len(feature_rect) < 1:290 continue291 292 feature_rect = feature_rect[0]293 x = feature_rect[0]294 y = feature_rect[1]295 w = feature_rect[2]296 h = feature_rect[3]297 298 # coor (normalize to 0~1)299 feature_coor_query = "SELECT descr,FeatureCoords.x,FeatureCoords.y FROM FeatureCoords,FeatureCoordTypes WHERE face_id ='" + str(face_id) + "' AND FeatureCoords.feature_id = FeatureCoordTypes.feature_id"300 feature_coor = aflw_cur.execute(feature_coor_query).fetchall() 301 coor_x = [-1 for k in range(5)]302 coor_y = [-1 for k in range(5)]303 for j in range(len(feature_coor)):304 if feature_coor[j][0] == 'LeftEyeCenter':305 coor_x[0] = feature_coor[j][1]306 coor_y[0] = feature_coor[j][2]307 elif feature_coor[j][0] == 'RightEyeCenter':308 coor_x[1] = feature_coor[j][1]309 coor_y[1] = feature_coor[j][2]310 elif feature_coor[j][0] == 'NoseCenter':311 coor_x[2] = feature_coor[j][1]312 coor_y[2] = feature_coor[j][2]313 elif feature_coor[j][0] == 'MouthLeftCorner':314 coor_x[3] = feature_coor[j][1]315 coor_y[3] = feature_coor[j][2]316 elif feature_coor[j][0] == 'MouthRightCorner':317 coor_x[4] = feature_coor[j][1]318 coor_y[4] = feature_coor[j][2]319 320 coor = []321 coor.append(coor_x)322 coor.append(coor_y)323 imgpath.append(os.path.join(self.name, 'aflw/data/flickr', face_name))324 bbox.append([y, x, y + h, x + w])325 # self.coor.append(coor)326 aflw_cur.close()327 aflw_sq.close()328 self.imgpath = np.array(imgpath)329 self.bbox_original = np.array(bbox, dtype=np.int32)330 def _set_bbox_range(self):331 top, left, bottom, right = np.split(self.bbox_original, np.arange(1, 4), axis=1)332 h = bottom - top333 w = right - left334 size_min = np.max((h, w), 0) * 1.2335 size_max = np.max((h, w), 0) * 1.4336 cx = np.mean((left, right), 0)337 cy = np.mean((top, bottom), 0)338 cx_min = cx339 cx_max = cx340 cy_min = cy - 0.15 * size_min341 cy_max = cy - 0.15 * size_max342 self.bbox_min = np.hstack((343 cy_min - 0.5 * size_min,344 cx_min - 0.4 * size_min,345 cy_min + 0.5 * size_min,346 cx_min + 0.4 * size_min)).astype(np.int32)347 self.bbox_max = np.hstack((348 cy_max - 0.5 * size_max,349 cx_max - 0.5 * size_max,350 cy_max + 0.5 * size_max,351 cx_max + 0.5 * size_max)).astype(np.int32)352class CelebA(Imdb):353 354 def _set(self, kwargs):355 raise NotImplementedError()356 def _parse_annotation(self):357 root = os.path.join(self.cfg.data_path, self.name)358 anno_file = open(os.path.join(root, 'Anno/list_bbox_celeba.txt'), 'r')359 lines = anno_file.readlines()360 for line in lines[2:]:361 assert '.jpg' in line362 elem = list(filter(lambda a: a != '', line.strip().split(' ')))363 imgname = elem[0]364 left, top, width, height = tuple(elem[1:])365 left = int(left)366 top = int(top)367 width = int(width)368 height = int(height)369 bbox = [top, left, top + height, left + width]370 relpath = os.path.join(self.name, 'Img/img_celeba')371 self.imgpath.append(os.path.join(relpath, imgname))372 self.bbox_original.append(bbox)373 anno_file.close()374 375 def _set_bbox_range(self):376 for bb_o in self.bbox_original:377 top, left, bottom, right = bb_o378 h = bottom - top379 w = right - left380 center_x = (left + right) / 2381 w_min = 0.8 * w382 top_min = int(top + 0.1 * h)383 left_min = int(center_x - w_min / 2)384 bottom_min = int(bottom - 0.25 * h)385 right_min = int(center_x + w_min / 2)386 center_y = (top_min + bottom_min) / 2387 h_new = 2 * (bottom_min - top_min)388 w_new = 2 * (right_min - left_min)389 top_pad = int(center_y - h_new / 2)390 left_pad = int(center_x - w_new / 2)391 bottom_pad = int(center_y + h_new / 2)392 right_pad = int(center_x + w_new / 2)393 bbox_min = [top_min, left_min, bottom_min, right_min]394 bbox_max = [top_pad, left_pad, bottom_pad, right_pad]395 self.bbox_min.append(bbox_min)396 self.bbox_max.append(bbox_max)397class CelebA_HQ(Imdb):398 def _set(self, kwargs):399 self.num_img = 30 * 1000400 def _parse_annotation(self):401 imgpath = list()402 imgsize = list()403 bbox = list()404 for i in range(self.num_img):405 imgname = '{:06d}.png'.format(i)406 imgpath.append(os.path.join(self.name, imgname))407 imgsize.append([1024, 1024])408 bbox.append([0, 0, 1024, 1024])409 410 self.imgpath = np.array(imgpath)411 self.imgsize = np.array(imgsize, dtype=np.int32)412 self.bbox_original = np.array(bbox, dtype=np.int32)413 414 def _set_bbox_range(self):415 self.bbox_min = np.copy(self.bbox_original)416 self.bbox_max = np.copy(self.bbox_original)417 # offset_x = 212418 # offset_y = 170419 # min_size_x = 600420 # min_size_y = 650421 # top = offset_y422 # bottom = top + min_size_y423 # left = offset_x424 # right = left + min_size_x425 # length = len(self.bbox_original)426 # self.bbox_min = np.array([[top, left, bottom, right]] * length, dtype=np.int32)427 # self.bbox_max = np.array([[0, 0, 1024, 1024]] * length, dtype=np.int32)428class IJB_C(Imdb):429 def _set(self, kwargs):430 raise NotImplementedError()431 def _parse_annotation(self):432 root = os.path.join(self.cfg.data_path, self.name)433 anno_file = open(os.path.join(root, 'protocols/ijbc_face_detection_ground_truth.csv'))434 lines = anno_file.readlines()435 for line in lines[1:]:436 imgpath, left, top, width, height, ignore = tuple(line.strip().split(','))437 if imgpath.split('/')[0] != 'nonfaces':438 left = int(left)439 top = int(top)440 width = int(width)441 height = int(height)442 ignore = int(ignore)443 imgpath = os.path.join(self.name, 'images', imgpath)444 bbox = [top, left, top + height, left + width]445 self.imgpath.append(imgpath)446 self.bbox_original.append(bbox)447 # self.ignore.append(ignore)448 anno_file.close()449 def _set_bbox_range(self):450 for bb_o in self.bbox_original:451 top, left, bottom, right = bb_o452 center_y = (top + bottom) / 2453 center_x = (left + right) / 2454 h = bottom - top455 w = right - left456 # top_min = int(top)457 # left_min = int(left + 0.1 * w)458 # bottom_min = int(bottom)459 # right_min = int(right - 0.1 * w)460 h_new = 2 * h461 w_new = 2 * w462 top_pad = int(center_y - h_new / 2)463 left_pad = int(center_x - w_new / 2)464 bottom_pad = int(center_y + h_new / 2)465 right_pad = int(center_x + w_new / 2)466 # bbox_min = [top_min, left_min, bottom_min, right_min]467 bbox_min = bb_o468 bbox_max = [top_pad, left_pad, bottom_pad, right_pad]469 self.bbox_min.append(bbox_min)470 self.bbox_max.append(bbox_max)471class UMDFaces(Imdb):472 473 def _parse_annotation(self):474 with open(os.path.join(self.cfg.cache_path, 'UMDFaces_img.csv')) as f:475 imgpath = list()476 imgsize = list()477 bbox = list()478 angle = list() # yaw, pitch, roll479 points = list() # 21 points480 lab = list()481 rdr = csv.DictReader(f)482 for row in rdr:483 imgpath.append(os.path.join(self.name, 'umdfaces_batch{}'.format(int(row['fold'])), row['FILE']))484 imgsize.append([int(row['img_h']), int(row['img_w'])])485 bbox.append([486 float(row['FACE_Y']),487 float(row['FACE_X']),488 float(row['FACE_Y']) + float(row['FACE_HEIGHT']),489 float(row['FACE_X']) + float(row['FACE_WIDTH'])])490 angle.append([491 float(row['YAW']),492 float(row['PITCH']),493 float(row['ROLL'])])494 pts = []495 for i in range(21):496 pts.append([497 float(row['P{}X'.format(i + 1)]),498 float(row['P{}Y'.format(i + 1)])499 # int(float(row['VIS{}'.format(i + 1)]))500 ])501 points.append(pts)502 lab.append([503 float(row['meanL']) * 100 / 255,504 float(row['meanA']) - 128,505 float(row['meanB']) - 128])506 self.imgpath = np.array(imgpath)507 self.imgsize = np.array(imgsize, dtype=np.int32)508 self.bbox_original = np.array(bbox, dtype=np.int32)509 self.angle = np.array(angle, dtype=np.float32)510 self.points = np.array(points, dtype=np.int32)511 self.lab = np.array(lab, dtype=np.float32)512 def _set_bbox_range(self):513 top, left, bottom, right = np.split(self.bbox_original, np.arange(1, 4), axis=1)514 h = bottom - top515 w = right - left516 size_min = np.max((h, w), 0) * 1.3517 size_max = np.max((h, w), 0) * 1.7518 cx = np.mean((left, right), 0)519 cy = np.mean((top, bottom), 0)520 th_x, th_y = 0.2, 0.2521 yaw, pitch, roll = np.split(self.angle, 3, axis=1)522 # x523 cx_delta_min = 0.3 * np.sin(np.radians(yaw))524 cx_delta_max = 0.3 * np.sin(np.radians(yaw))525 cx_min = cx - w * np.clip(cx_delta_min, -th_x, th_x)526 cx_max = cx - w * np.clip(cx_delta_max, -th_x, th_x)527 # y528 cy_delta_min = 0.1 * h * np.sin(np.radians(pitch)) + 0.15 * size_min529 cy_delta_max = 0.1 * h * np.sin(np.radians(pitch)) + 0.15 * size_max530 cy_min = cy - np.clip(cy_delta_min, -th_y * size_min, th_y * size_min)531 cy_max = cy - np.clip(cy_delta_max, -th_y * size_max, th_y * size_max)532 533 self.bbox_min = np.hstack((534 cy_min - 0.5 * size_min,535 cx_min - 0.4 * size_min,536 cy_min + 0.5 * size_min,537 cx_min + 0.4 * size_min)).astype(np.int32)538 self.bbox_max = np.hstack((539 cy_max - 0.5 * size_max,540 cx_max - 0.5 * size_max,541 cy_max + 0.5 * size_max,542 cx_max + 0.5 * size_max)).astype(np.int32)543class PGGAN_published_100k(CelebA_HQ):544 def _set(self, kwargs):545 self.num_img = 100 * 1000546class PGGAN_published_nGPUs(Imdb):547 def _parse_annotation(self):548 root = os.path.join(self.cfg.data_path, self.name)549 imgpath = list()550 bbox_original = list()551 imgsize = list()552 for dirpath, dirnames, filenames in os.walk(root):553 if len(filenames) > 0 and len(dirnames) == 0:554 for fn in filenames:555 if 'tick=' in fn:556 p = re.compile('_tick=\d+_')557 tick = int(p.search(fn).group()[6:-1])558 # tick = int(fn.split('_')[-2].split('=')[-1])559 resolution = 2 ** int((tick - 1 + 3000) / 1200)560 bbox = [0, 0, resolution, resolution]561 _dirpath = dirpath[len(self.cfg.data_path) + 1:]562 imgpath.append(os.path.join(_dirpath, fn))563 bbox_original.append(bbox)564 imgsize.append([resolution, resolution])565 self.imgpath = np.array(imgpath)566 self.bbox_original = np.array(bbox_original, dtype=np.int32)567 self.imgsize = np.array(imgsize, dtype=np.int32)568 569 def _set_bbox_range(self):570 self.bbox_min = np.copy(self.bbox_original)571 self.bbox_max = np.copy(self.bbox_original)572 # ox_ratio = 212 / 1024573 # oy_ratio = 170 / 1024574 # minx_ratio = 600 / 1024575 # miny_ratio = 650 / 1024576 # for i in range(len(self.bbox_original)):577 # h = self.bbox_original[i][2] - self.bbox_original[i][0]578 # w = self.bbox_original[i][3] - self.bbox_original[i][1]579 # if h >= 512 and w >= 512:580 # top = int(h * ox_ratio)581 # left = int(w * oy_ratio)582 # height = int(h * minx_ratio)583 # width = int(w * miny_ratio)584 # self.bbox_min.append([top, left, top + height, left + width])585 # self.bbox_max.append(self.bbox_original[i])586 # else:587 # self.bbox_min.append(self.bbox_original[i])588 # self.bbox_max.append(self.bbox_original[i])589 # self.bbox_min = np.array(self.bbox_min, dtype=np.int32)590 # self.bbox_max = np.array(self.bbox_max, dtype=np.int32)591class PGGAN_trained(PGGAN_published_nGPUs):592 pass593class Glow(Imdb):594 def _set(self, kwargs):595 if 'test' in kwargs:596 self.test = kwargs['test']597 else:598 self.test = None599 def _parse_annotation(self):600 if self.test:601 imglist = glob(os.path.join(self.cfg.data_path, self.name, 'test', '*.jpg'))602 else:603 imglist = glob(os.path.join(self.cfg.data_path, self.name, 'train', '*.jpg'))604 imglist = ['/'.join(_.split('/')[-3:]) for _ in imglist]605 self.imgpath = np.array(imglist)606 self.bbox_original = np.array([[0, 0, 256, 256]] * len(imglist), dtype=np.int32)607 self.imgsize = np.array([[256, 256]] * len(imglist), dtype=np.int32)608 609 def _set_bbox_range(self):610 self.bbox_min = np.copy(self.bbox_original)611 self.bbox_max = np.copy(self.bbox_original)612class Glow_CelebA_HQ(Glow):613 pass614class Glow_UMDFaces(Glow):615 pass616class StarGAN_CelebA(Glow):617 pass618class BEGAN(Imdb):619 def _set(self, kwargs):620 raise NotImplementedError()621 def _parse_annotation(self):622 root = os.path.join(self.cfg.data_path, self.name)623 for dirpath, dirnames, filenames in os.walk(root):624 if len(filenames) > 0 and len(dirnames) == 0:625 for fn in filenames:626 p = re.compile('_size=\d+_')627 size = int(p.search(fn).group()[6:-1])628 bbox = [0, 0, size, size]629 _dirpath = dirpath[len(self.cfg.data_path) + 1:]630 self.imgpath.append(os.path.join(_dirpath, fn))631 self.bbox_original.append(bbox)632 633 def _set_bbox_range(self):634 self.bbox_min = np.copy(self.bbox_original)635 self.bbox_max = np.copy(self.bbox_original)636class DCGAN(Imdb):637 def _set(self, kwargs):638 raise NotImplementedError()639 640 def _parse_annotation(self):641 root = os.path.join(self.cfg.data_path, self.name)642 for dirpath, dirnames, filenames in os.walk(root):643 if len(filenames) > 0 and len(dirnames) == 0:644 for fn in filenames:645 bbox = [0, 0, 64, 64]646 _dirpath = dirpath[len(self.cfg.data_path) + 1:]647 self.imgpath.append(os.path.join(_dirpath, fn))648 self.bbox_original.append(bbox)649 650 def _set_bbox_range(self):651 self.bbox_min = np.copy(self.bbox_original)652 self.bbox_max = np.copy(self.bbox_original)653class LSGAN(DCGAN):654 pass655class DRAGAN(DCGAN):656 pass657class WGAN_GP(DCGAN):658 pass659class Photoshop(Imdb):660 def __init__(self, cfg, name=None, virtual=False, logger=None, **kwargs):661 super(Photoshop, self).__init__(cfg, name=name, virtual=virtual, logger=logger, **kwargs)662 weight = 0663 for k, v in self.cfg.dataset.swap_parts.items():664 weight += v665 assert math.isclose(weight, 1.0), 'Sum of swap_parts weight should be 1.0, but {}'.format(weight)666 def _parse_annotation(self):667 with open(os.path.join(self.cfg.data_path, 'Photoshop', 'Photoshop_result.csv')) as f:668 imgpath = list()669 imgsize = list()670 bbox = list()671 partname = list()672 level = list()673 bbox_mask = list()674 rdr = csv.DictReader(f)675 for row in rdr:676 imgpath.append(os.path.join(677 'Photoshop', 678 'Photoshop_' + self.cfg.dataset.photoshop_ext, 679 row['filename'] + '.' + self.cfg.dataset.photoshop_ext)680 )681 imgsize.append([int(row['h']), int(row['w'])])682 bbox.append([683 int(row['face_y']),684 int(row['face_x']),685 int(row['face_y']) + int(row['face_h']),686 int(row['face_x']) + int(row['face_w'])])687 partname.append(row['partname'])688 level.append(int(row['level']))689 bbox_mask.append([690 int(row['mask_y']),691 int(row['mask_x']),692 int(row['mask_y']) + int(row['mask_h']),693 int(row['mask_x']) + int(row['mask_w'])694 ])695 self.imgpath = np.array(imgpath)696 self.imgsize = np.array(imgsize, dtype=np.int32)697 self.bbox_original = np.array(bbox, dtype=np.int32)698 self.partname = np.array(partname)699 self.level = np.array(level, dtype=np.int32)700 self.bbox_mask = np.array(bbox_mask, dtype=np.int32)701 def _filter(self):702 super(Photoshop, self)._filter()703 partname_valid = [key for key, value in self.cfg.dataset.swap_parts.items() if value > 0] 704 mask_part = np.zeros(len(self.imgpath), dtype=np.bool)705 for pv in partname_valid:706 mask_part = mask_part | (self.partname == pv)707 708 mask = mask_part709 self.imgpath = self.imgpath[mask]710 self.imgsize = self.imgsize[mask]711 self.bbox_original = self.bbox_original[mask]712 self.bbox_min = self.bbox_min[mask]713 self.bbox_max = self.bbox_max[mask]714 self.minsize = self.minsize[mask]715 self.maxsize = self.maxsize[mask]716 if hasattr(self, 'angle'):717 self.angle = self.angle[mask]718 if hasattr(self, 'points'):719 self.points = self.points[mask]720 if hasattr(self, 'lab'):721 self.lab = self.lab[mask]722 if hasattr(self, 'partname'):723 self.partname = self.partname[mask]724 if hasattr(self, 'level'):725 self.level = self.level[mask]726 if hasattr(self, 'bbox_mask'):727 self.bbox_mask = self.bbox_mask[mask]728 def _set_bbox_range(self):729 top, left, bottom, right = np.split(self.bbox_original, np.arange(1, 4), axis=1)730 h = bottom - top731 w = right - left732 size_min = np.max((h, w), 0) * 1.1733 size_max = np.max((h, w), 0) * 1.5734 cx = np.mean((left, right), 0)735 cy = np.mean((top, bottom), 0) - 0.1 * h736 self.bbox_min = np.hstack((737 cy - 0.5 * size_min,738 cx - 0.4 * size_min,739 cy + 0.5 * size_min,740 cx + 0.4 * size_min)).astype(np.int32)741 self.bbox_max = np.hstack((742 cy - 0.5 * size_max,743 cx - 0.5 * size_max,744 cy + 0.5 * size_max,745 cx + 0.5 * size_max)).astype(np.int32)746class Sample_1_M1_Real(Imdb):747 ''' 201~400 real images from Sample_1, task 1 '''748 def _parse_annotation(self):749 with open(os.path.join(self.cfg.data_path, 'RnDChallenge', 'Sample_1_M1_imsize.csv'), 'r') as f:750 rdr = csv.DictReader(f)751 imgpath = list()752 bbox_original = list()753 imgsize = list()754 for row in rdr:755 idx = int(re.findall('\d{5}', row['filename'])[0])756 if idx > 200:757 imgpath.append(os.path.join('RnDChallenge', 'Sample_1', row['filename']))758 h = int(row['h'])759 w = int(row['w'])760 bbox_original.append([0, 0, h, w])761 imgsize.append([h, w])762 763 self.imgpath = np.array(imgpath)764 self.bbox_original = np.array(bbox_original, dtype=np.int32)765 self.imgsize = np.array(imgsize, dtype=np.int32)766 def _set_bbox_range(self):767 self.bbox_min = np.copy(self.bbox_original)768 self.bbox_max = np.copy(self.bbox_original)769class Sample_1_M2_Real(Imdb):770 ''' Real images from Sample_1, task 2 '''771 def _set(self, kwargs):772 self.ignore_cache = True773 self.real_fake = 'real'774 self.subdir = 'Sample_1'775 self.anno_file = 'Sample_1_M2_bbox.csv'776 def _parse_annotation(self):777 root = os.path.join(self.cfg.data_path, 'RnDChallenge')778 with open(os.path.join(root, self.anno_file)) as f:779 imgpath = list()780 imgsize = list()781 bbox = list()782 rdr = csv.DictReader(f)783 for row in rdr:784 if row['real_fake'] != self.real_fake:785 continue786 imgpath.append(os.path.join('RnDChallenge', self.subdir, row['filename']))787 imgsize.append([int(row['h']), int(row['w'])])788 bbox.append([789 int(row['face_y']),790 int(row['face_x']),791 int(row['face_y']) + int(row['face_h']),792 int(row['face_x']) + int(row['face_w'])])793 self.imgpath = np.array(imgpath)794 self.imgsize = np.array(imgsize, dtype=np.int32)795 self.bbox_original = np.array(bbox, dtype=np.int32) 796 def _set_bbox_range(self):797 top, left, bottom, right = np.split(self.bbox_original, np.arange(1, 4), axis=1)798 h = bottom - top799 w = right - left800 size_min = np.max((h, w), 0) * 1.1801 size_max = np.max((h, w), 0) * 1.5802 cx = np.mean((left, right), 0)803 cy = np.mean((top, bottom), 0) - 0.1 * h804 self.bbox_min = np.hstack((805 cy - 0.5 * size_min,806 cx - 0.4 * size_min,807 cy + 0.5 * size_min,808 cx + 0.4 * size_min)).astype(np.int32)809 self.bbox_max = np.hstack((810 cy - 0.5 * size_max,811 cx - 0.5 * size_max,812 cy + 0.5 * size_max,813 cx + 0.5 * size_max)).astype(np.int32)814class Sample_1_M2_Syn(Sample_1_M2_Real):815 ''' Fake images from Sample_1, task 2 '''816 def _set(self, kwargs):817 self.ignore_cache = True818 self.real_fake = 'fake'819 self.subdir='Sample_1'820 self.anno_file = 'Sample_1_M2_bbox.csv'821class Sample_1_M1_GAN(Imdb):822 ''' 1~200 fake images from Sample_1, task 1 '''823 def _parse_annotation(self):824 with open(os.path.join(self.cfg.data_path, 'RnDChallenge', 'Sample_1_M1_imsize.csv'), 'r') as f:825 rdr = csv.DictReader(f)826 imgpath = list()827 bbox_original = list()828 imgsize = list()829 for row in rdr:830 idx = int(re.findall('\d{5}', row['filename'])[0])831 if idx <= 200:832 imgpath.append(os.path.join('RnDChallenge', 'Sample_1', row['filename']))833 h = int(row['h'])834 w = int(row['w'])835 bbox_original.append([0, 0, h, w])836 imgsize.append([h, w])837 838 self.imgpath = np.array(imgpath)839 self.bbox_original = np.array(bbox_original, dtype=np.int32)840 self.imgsize = np.array(imgsize, dtype=np.int32)841 def _set_bbox_range(self):842 self.bbox_min = np.copy(self.bbox_original)843 self.bbox_max = np.copy(self.bbox_original)844class Sample_2_GAN(Imdb):845 ''' 60 gan images from Sample_2 '''846 def _parse_annotation(self):847 root = os.path.join(self.cfg.data_path, 'RnDChallenge', 'Sample_2', 'gan_jpg')848 imglist = glob(os.path.join(root, 'gan_*.jpg'))849 imgpath = [os.path.join('RnDChallenge', 'Sample_2', 'gan_jpg', os.path.basename(_)) for _ in imglist]850 bbox_original = [[0, 0, 128, 128]] * len(imgpath)851 imgsize = [[0, 0, 128, 128]] * len(imgpath)852 assert len(imgpath) > 0853 self.imgpath = np.array(imgpath)854 self.bbox_original = np.array(bbox_original, dtype=np.int32)855 self.imgsize = np.ones((self.imgpath.shape[0], 2), dtype=np.int32) * 128856 def _set_bbox_range(self):857 self.bbox_min = np.copy(self.bbox_original)858 self.bbox_max = np.copy(self.bbox_original)859class Sample_2_Syn(Imdb):860 ''' Fake images from Sample_2 '''861 def _set(self, kwargs):862 self.ignore_cache = True863 def _parse_annotation(self):864 root = os.path.join(self.cfg.data_path, 'RnDChallenge', 'Sample_2', 'syn')865 imgpath = glob(os.path.join(root, '*.jpg'))866 imgpath = ['/'.join(_.split('/')[-4:]) for _ in imgpath]867 bbox_original = [[0, 0, 128, 128]] * len(imgpath)868 assert len(imgpath) > 0869 self.imgpath = np.array(imgpath)870 self.bbox_original = np.array(bbox_original, dtype=np.int32) 871 self.imgsize = np.ones((self.imgpath.shape[0], 2), dtype=np.int32) * 128872 def _set_bbox_range(self):873 self.bbox_min = np.copy(self.bbox_original)...
test_interfaceless.py
Source:test_interfaceless.py
...45 print(ts.to_json(True))46 assert 0 == len(ts.inputs)47 assert 0 == len(ts.outputs)48 # TODO: not tested on func and metadata because that is supposed to change to something else49def test__parse_annotation():50 assert dict(data_type=object, nullable=True) == _parse_annotation(None)51 assert dict(data_type=object, nullable=True) == _parse_annotation(52 inspect.Parameter.empty)53 assert dict(data_type=object, nullable=True) == _parse_annotation(Any)54 assert dict(data_type=int, nullable=False) == _parse_annotation(int)55 assert dict(data_type=dict, nullable=False) == _parse_annotation(Dict[str, Any])56 assert dict(data_type=object, nullable=True) == _parse_annotation(57 Optional[Any])58 assert dict(data_type=str, nullable=True) == _parse_annotation(59 Optional[str])60 assert dict(data_type=dict, nullable=True) == _parse_annotation(61 Optional[Dict[str, Any]])62 assert dict(data_type=dict, nullable=True) == _parse_annotation(63 Union[None, Dict[str, Any]])64 assert dict(data_type=dict, nullable=True) == _parse_annotation(65 Union[Dict[str, Any], None])66 assert dict(data_type=dict, nullable=True) == _parse_annotation(67 Union[Dict[str, Any], None, None])68 assert dict(data_type=dict, nullable=False) == _parse_annotation(69 Union[Dict[str, Any]])70 raises(TypeError, lambda: _parse_annotation(Union[Dict[str, Any], List[str]]))71 raises(TypeError, lambda: _parse_annotation(Union[Dict[str, Any], List[str], None]))72 raises(TypeError, lambda: _parse_annotation(Union[None]))73 raises(TypeError, lambda: _parse_annotation(Union[None, None]))74 raises(TypeError, lambda: _parse_annotation(type(None)))75def test__get_origin_type():76 assert _get_origin_type(Any) is object77 assert _get_origin_type(Dict[str, Any]) is dict78 assert _get_origin_type(List[str]) is list79 assert _get_origin_type(List[Any]) is list80 assert _get_origin_type(Tuple[int, str]) is tuple81 assert _get_origin_type(Union[int, str], False) is Union82 assert _get_origin_type(Union[None]) is type(None)83 assert _get_origin_type(int) is int84def f1(a: Optional[int], b: "int", c: str, d: "Optional[str]" = "x") -> "int":85 return a + b86def f2(a: int, b, c: int) -> "Tuple[int,Optional[str]]":87 return a + c88def f3(a: int, b: str, c: int):...
lidc_adaper.py
Source:lidc_adaper.py
...5from pydicom.pixel_data_handlers.numpy_handler import pack_bits6from .adapter_base import AdapterBase7class LIDCAdapter(AdapterBase):8 @classmethod9 def _parse_annotation(cls, dataset: Dataset, annotation_path: str) -> np.ndarray:10 tree = etree.parse(annotation_path, etree.XMLParser())11 root = tree.getroot()12 reading_session = root.find(path='readingSession', namespaces=root.nsmap)13 roi_list = reading_session.findall(14 f'unblindedReadNodule[characteristics]/roi[imageSOP_UID='15 f'"{dataset.SOPInstanceUID}"]', namespaces=root.nsmap)16 annotation_array = np.zeros([dataset.Rows, dataset.Columns])17 for roi in roi_list:18 edgemap_list = roi.findall('edgeMap', namespaces=root.nsmap)19 for edgemap in edgemap_list:20 xCoord = edgemap.find('xCoord', namespaces=root.nsmap).text21 yCoord = edgemap.find('yCoord', namespaces=root.nsmap).text22 annotation_array[int(yCoord), int(xCoord)] = 123 return annotation_array24 @classmethod25 def _get_overlay(cls, dataset: Dataset, annotation_path: str) -> Dataset:26 annotation = cls._parse_annotation(dataset, annotation_path)27 elem_overlay_type = pydicom.DataElement(0x60000040, VR='CS', value='GRAPHICS')28 dataset.add(elem_overlay_type)29 elem_overlay_rows = pydicom.DataElement(0x60000010, VR='US', value=dataset.Rows)30 dataset.add(elem_overlay_rows)31 elem_overlay_columns = pydicom.DataElement(0x60000011, VR='US', value=dataset.Columns)32 dataset.add(elem_overlay_columns)33 elem_overlay_bit_allocated = pydicom.DataElement(0x60000100, VR='US', value=1)34 dataset.add(elem_overlay_bit_allocated)35 elem_overlay_bit_position = pydicom.DataElement(0x60000102, VR='US', value=0)36 dataset.add(elem_overlay_bit_position)37 elem_overlay_origin = pydicom.DataElement(0x60000050, VR='SS', value=[1, 1])38 dataset.add(elem_overlay_origin)39 elem_overlay_data = pydicom.DataElement(0x60003000, VR='OW', value=pack_bits(annotation))40 dataset.add(elem_overlay_data)41 return dataset42 @classmethod43 def _get_pixel(cls, dataset: Dataset, annotation_path: str) -> Dataset:44 annotation = cls._parse_annotation(dataset, annotation_path)45 dataset.PixelData = annotation.tobytes()...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!