Best JavaScript code snippet using devicefarmer-stf
ABuKLManager.py
Source:ABuKLManager.py
1# -*- encoding:utf-8 -*-2"""3 éèæ¶é´åºå管ç模å4"""5from __future__ import print_function6from __future__ import absolute_import7from __future__ import division8import logging9from ..TradeBu import AbuBenchmark10from ..UtilBu import ABuDateUtil11from ..CoreBu.ABuEnv import EMarketDataSplitMode, EMarketDataFetchMode12from ..MarketBu import ABuSymbolPd13from ..MarketBu.ABuMarket import split_k_market14from ..CoreBu.ABuEnvProcess import add_process_env_sig, AbuEnvProcess15from ..CoreBu.ABuParallel import delayed, Parallel16from ..CoreBu import ABuEnv17from ..CoreBu.ABuEnv import EDataCacheType18from ..UtilBu.ABuProgress import AbuMulPidProgress19from ..UtilBu.ABuFileUtil import batch_h5s20# noinspection PyUnresolvedReferences21from ..CoreBu.ABuFixes import filter22__author__ = 'é¿å¸'23__weixin__ = 'abu_quant'24# noinspection PyUnusedLocal25@add_process_env_sig26def gen_dict_pick_time_kl_pd(target_symbols, capital, benchmark, show_progress=True):27 """28 å¨AbuKLManagerä¸batch_get_pick_time_kl_pdæ¹éè·åæ©æ¶æ¶é´åºåä¸ä½¿ç¨å为并è¡å¤è¿ç¨å§ææ¹æ³29 :param target_symbols: 请æ±çsymbol30 :param capital: èµéç±»AbuCapitalå®ä¾å对象 ï¼å®ç°ä¸ææ¶ä¸ä½¿ç¨å
¶ä¸ä¿¡æ¯ï¼31 :param benchmark: 交æåºå对象ï¼AbuBenchmarkå®ä¾å¯¹è±¡32 :param show_progress: æ¯å¦æ¾ç¤ºuiè¿åº¦æ¡33 """34 # æ建çè¿åæ¶é´åºå交ææ°æ®ç»æçåå
¸35 pick_kl_pd_dict = dict()36 # 为batch_h5sè£
饰å¨åå¤åæ°ï¼è¯¦è§batch_h5è£
饰å¨å®ç°37 h5s_fn = None38 if ABuEnv.g_data_cache_type == EDataCacheType.E_DATA_CACHE_HDF5 and ABuEnv.g_data_fetch_mode == \39 EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL:40 # åå¨ä½¿ç¨hdf5ä¸ä½¿ç¨æ¬å°æ°æ®æ¨¡å¼æèµäºh5s_fnè·¯å¾41 # noinspection PyProtectedMember42 h5s_fn = ABuEnv.g_project_kl_df_data43 @batch_h5s(h5s_fn)44 def _batch_gen_dict_pick_time_kl_pd():45 # å¯å¨å¤è¿ç¨è¿åº¦æ¡46 with AbuMulPidProgress(len(target_symbols), 'gen kl_pd complete', show_progress=show_progress) as progress:47 for epoch, target_symbol in enumerate(target_symbols):48 progress.show(epoch + 1)49 # è¿ä»£target_symbolsï¼è·å对åºæ¶é´äº¤æåºå50 kl_pd = ABuSymbolPd.make_kl_df(target_symbol, data_mode=EMarketDataSplitMode.E_DATA_SPLIT_UNDO,51 benchmark=benchmark, n_folds=benchmark.n_folds)52 # 以target_symbol为keyå°æ¶é´éèåºåkl_pdæ·»å å°è¿ååå
¸ä¸53 pick_kl_pd_dict[target_symbol] = kl_pd54 _batch_gen_dict_pick_time_kl_pd()55 return pick_kl_pd_dict56class AbuKLManager(object):57 """éèæ¶é´åºå管çç±»"""58 def __init__(self, benchmark, capital):59 """60 :param benchmark: 交æåºå对象ï¼AbuBenchmarkå®ä¾å¯¹è±¡61 :param capital: èµéç±»AbuCapitalå®ä¾å对象62 """63 self.benchmark = benchmark64 self.capital = capital65 # éè¡æ¶é´äº¤æåºååå
¸66 pick_stock_kl_pd_dict = dict()67 # æ©æ¶æ¶é´äº¤æåºååå
¸68 pick_time_kl_pd_dict = dict()69 # ç±»åå
¸pick_kl_pd_dictå°éè¡åæ©æ¶åå
¸å
èµ·æ¥70 self.pick_kl_pd_dict = {'pick_stock': pick_stock_kl_pd_dict, 'pick_time': pick_time_kl_pd_dict}71 def __str__(self):72 """æå°å¯¹è±¡æ¾ç¤ºï¼pick_stock + pick_time keys, å³ææsymbolä¿¡æ¯"""73 keys = set(self.pick_kl_pd_dict['pick_stock'].keys()) | set(self.pick_kl_pd_dict['pick_time'].keys())74 return 'pick_stock + pick_time keys :{}'.format(keys)75 __repr__ = __str__76 def __len__(self):77 """对象é¿åº¦ï¼éè¡åå
¸é¿åº¦ + æ©æ¶åå
¸é¿åº¦"""78 return len(self.pick_kl_pd_dict['pick_stock']) + len(self.pick_kl_pd_dict['pick_time'])79 def __contains__(self, item):80 """æåæµè¯ï¼å¨æ©æ¶åå
¸ä¸æè
å¨éè¡åå
¸ä¸"""81 return item in self.pick_kl_pd_dict['pick_stock'] or item in self.pick_kl_pd_dict['pick_time']82 def __missing__(self, key):83 """对象缺失ï¼éè¦æ ¹æ®key使ç¨code_to_symbolè¿è¡fetchæ°æ®ï¼ææªå®ç°"""84 # TODO éè¦æ ¹æ®key使ç¨code_to_symbolè¿è¡fetchæ°æ®85 raise NotImplementedError('TODO AbuKLManager __missing__')86 def __getitem__(self, key):87 """ç´¢å¼è·åï¼å°è¯åå«ä»éè¡åå
¸ï¼æ©æ¶åå
¸ä¸æ¥è¯¢ï¼è¿å两个åå
¸çæ¥è¯¢ç»æ"""88 pick_stock_item = None89 if key in self.pick_kl_pd_dict['pick_stock']:90 pick_stock_item = self.pick_kl_pd_dict['pick_stock'][key]91 pick_time_item = None92 if key in self.pick_kl_pd_dict['pick_time']:93 pick_time_item = self.pick_kl_pd_dict['pick_time'][key]94 return pick_stock_item, pick_time_item95 def __setitem__(self, key, value):96 """ç´¢å¼è®¾ç½®ï¼æé误ï¼å³ä¸å许å¤é¨è®¾ç½®"""97 raise AttributeError("AbuKLManager set value!!!")98 def _fetch_pick_stock_kl_pd(self, xd, target_symbol):99 """100 æ ¹æ®éè¡å¨æåsymbolè·åéè¡æ¶æ®µéèæ¶é´åºåï¼ç¸å¯¹æ©æ¶éèæ¶é´åºåè·åè¦å¤æï¼101 å 为è¦æ ¹æ®æ¡ä»¶æé éè¡æ¶æ®µbenchmarkï¼ä¸å¨ç±»åéä¸åå¨éè¡æ¶æ®µbenchmark102 :param xd: éè¡å¨æï¼é»è®¤ä¸å¹´ç交ææ¥é¿åº¦ï¼103 :param target_symbol: éè¡symbol104 :return: éè¡æ¶æ®µéèæ¶é´åºå105 """106 # ä»è®¾ç½®çæ©æ¶benchmarkä¸å第ä¸ä¸ªæ¥æå³ä¸ºéè¡æ¶æ®µæåä¸ä¸ªæ¥æ107 end = ABuDateUtil.timestamp_to_str(self.benchmark.kl_pd.index[0])108 if xd == ABuEnv.g_market_trade_year:109 # ä¸è¬é½æ¯é»è®¤ç1å¹´ï¼ä¸éè¦ä½¿ç¨begin_dateæé«æç110 n_folds = 1111 pre_bc_key = 'pre_benchmark_{}'.format(n_folds)112 start = None113 else:114 # 1å¹´é¤1年交ææ¥æ°éï¼æµ®ç¹æ°n_folds eg: 0.88115 n_folds = float(xd / ABuEnv.g_market_trade_year)116 # 为äºè®¡ç®startï¼xdçåä½æ¯äº¤ææ¥ï¼æ¢ç®ä¸ºèªç¶æ¥117 delay_day = 365 * n_folds118 start = ABuDateUtil.begin_date(delay_day, date_str=end, fix=False)119 # æ ¹æ®éè¡startï¼endæ¼æ¥éè¡ç±»åékeyï¼egï¼pre_benchmark_2011-09-09_2016-07-26120 pre_bc_key = 'pre_benchmark_{}-{}'.format(start, end)121 if hasattr(self, pre_bc_key):122 # ä»ç±»åéä¸ç´æ¥è·åéè¡benchmarkï¼eg: self.pre_benchmark_2011-09-09_2016-07-26123 pre_benchmark = getattr(self, pre_bc_key)124 else:125 # ç±»åéä¸æ²¡æï¼å®ä¾ä¸ä¸ªAbuBenchmarkï¼æ ¹æ®n_foldsåendè·åbenchmarkéè¡æ¶æ®µ126 pre_benchmark = AbuBenchmark(n_folds=n_folds, start=start, end=end)127 # ç±»åé设置éè¡æ¶æ®µbenchmark128 setattr(self, pre_bc_key, pre_benchmark)129 # 以éè¡æ¶æ®µbenchmarkå为åæ°ï¼è·åéè¡æ¶æ®µå¯¹åºsymbolçéèæ¶é´åºå130 return ABuSymbolPd.make_kl_df(target_symbol, data_mode=EMarketDataSplitMode.E_DATA_SPLIT_UNDO,131 benchmark=pre_benchmark, n_folds=pre_benchmark.n_folds, start=start, end=end)132 def _fetch_pick_time_kl_pd(self, target_symbol):133 """è·åæ©æ¶æ¶æ®µéèæ¶é´åºå"""134 return ABuSymbolPd.make_kl_df(target_symbol, data_mode=EMarketDataSplitMode.E_DATA_SPLIT_UNDO,135 benchmark=self.benchmark, n_folds=self.benchmark.n_folds)136 def get_pick_time_kl_pd(self, target_symbol):137 """对å¤è·åæ©æ¶æ¶æ®µéèæ¶é´åºåï¼é¦å
å¨å
é¨æ©æ¶åå
¸ä¸å¯»æ¾ï¼æ²¡æ¾å°ä½¿ç¨_fetch_pick_time_kl_pdè·åï¼ä¸ä¿åæ©æ¶åå
¸"""138 if target_symbol in self.pick_kl_pd_dict['pick_time']:139 kl_pd = self.pick_kl_pd_dict['pick_time'][target_symbol]140 if kl_pd is not None:141 # å 为å¨å¤è¿ç¨çæ¶åæ·è´ä¼ä¸¢å¤±nameä¿¡æ¯142 kl_pd.name = target_symbol143 return kl_pd144 # åå
¸ä¸æ¯æ¾å°ï¼è¿è¡fetchï¼è·ååä¿åå¨æ©æ¶åå
¸ä¸145 kl_pd = self._fetch_pick_time_kl_pd(target_symbol)146 self.pick_kl_pd_dict['pick_time'][target_symbol] = kl_pd147 return kl_pd148 def filter_pick_time_choice_symbols(self, choice_symbols):149 """150 使ç¨filterçéåºchoice_symbolsä¸çsymbol对åºçæ©æ¶æ¶é´åºåä¸å¨å
é¨æ©æ¶åå
¸ä¸çsymbolåºå151 :param choice_symbols: æ¯æè¿ä»£çsymbolåºå152 :return: ä¸å¨å
é¨æ©æ¶åå
¸ä¸çsymbolåºå153 """154 return list(filter(lambda target_symbol: target_symbol not in self.pick_kl_pd_dict['pick_time'],155 choice_symbols))156 def batch_get_pick_time_kl_pd(self, choice_symbols, n_process=ABuEnv.g_cpu_cnt, show_progress=True):157 """158 ç»ä¸æ¹éè·åæ©æ¶éèæ¶é´åºåè·ä¿åå¨å
é¨çæ©æ¶åå
¸ä¸ï¼ä»¥å¤è¿ç¨å¹¶è¡æ¹å¼è¿è¡159 :param choice_symbols: æ¯æè¿ä»£çsymbolåºå160 :param n_process: æ©æ¶éèæ¶é´åºåè·å并è¡å¯å¨çè¿ç¨æ°ï¼é»è®¤16个ï¼å±äºioæä½å¤ï¼æ以没æèècpuæ°é161 :param show_progress: æ¯å¦æ¾ç¤ºuiè¿åº¦æ¡162 """163 if len(choice_symbols) == 0:164 return165 if n_process <= 0:166 # å 为ä¸é¢è¦n_process > 1åå¤æèä¸è¦æ ¹æ®n_processæ¥split_k_market167 n_process = ABuEnv.g_cpu_cnt168 # TODO éè¦åºåhdf5åcsvä¸ååè´®æ
åµï¼csvå贮模å¼ä¸å¯ä»¥å¹¶è¡è¯»å169 # åªæE_DATA_FETCH_FORCE_LOCALæè¿è¡å¤ä»»å¡æ¨¡å¼ï¼å¦ååæ»å°åè¿ç¨æ¨¡å¼n_process = 1170 if n_process > 1 and ABuEnv.g_data_fetch_mode != EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL:171 # 1. hdf5å¤è¿ç¨è¿å®¹æååæ°æ®172 # 2. MAC OS 10.9 ä¹å并è¡èç½ï¼numpy ç³»ç»bug crashï¼å¡æ»çé®é¢173 logging.info('batch get only support E_DATA_FETCH_FORCE_LOCAL for Parallel!')174 n_process = 1175 # æ ¹æ®è¾å
¥çchoice_symbolsåè¦å¹¶è¡çè¿ç¨æ°ï¼åé
symbolå°n_process个è¿ç¨ä¸176 process_symbols = split_k_market(n_process, market_symbols=choice_symbols)177 # å 为åå²ä¼æä½æ°ï¼æ以å°åå§è®¾ç½®çè¿ç¨æ°åæ¢ä¸ºåå²å¥½ç个æ°, å³32 -> 33 16 -> 17178 if n_process > 1:179 n_process = len(process_symbols)180 parallel = Parallel(181 n_jobs=n_process, verbose=0, pre_dispatch='2*n_jobs')182 # gen_dict_pick_time_kl_pd被è£
饰å¨add_process_env_sigè£
饰ï¼éè¦è¿ç¨é´å
åæ·è´å¯¹è±¡AbuEnvProcessï¼è¯¦ABuEnvProcess.py183 p_nev = AbuEnvProcess()184 # å¼å§å¹¶è¡ä»»å¡æ§è¡185 out_pick_kl_pd_dict = parallel(delayed(gen_dict_pick_time_kl_pd)(target_symbols, self.capital, self.benchmark,186 show_progress=show_progress,187 env=p_nev)188 for target_symbols in process_symbols)189 for pick_kl_pd_dict in out_pick_kl_pd_dict:190 # è¿ä»£å¤ä»»å¡ç»æçout_pick_kl_pd_dictï¼åå«æ´æ°ä¿åå¨å
é¨çæ©æ¶åå
¸ä¸191 self.pick_kl_pd_dict['pick_time'].update(pick_kl_pd_dict)192 def get_pick_stock_kl_pd(self, target_symbol, xd=ABuEnv.g_market_trade_year,193 min_xd=int(ABuEnv.g_market_trade_year / 2)):194 """195 对å¤è·åéè¡æ¶æ®µéèæ¶é´åºåï¼é¦å
å¨å
é¨æ©æ¶åå
¸ä¸å¯»æ¾ï¼æ²¡æ¾å°ä½¿ç¨_fetch_pick_stock_kl_pdè·åï¼ä¸ä¿åéè¡åå
¸196 :param target_symbol: éè¡symbol197 :param xd: éè¡å¨æï¼é»è®¤ä¸å¹´ç交ææ¥é¿åº¦ï¼198 :param min_xd: 对fetchçéè¡éèåºåè¿è¡è¿æ»¤åæ°ï¼å³æå°éèåºåé¿åº¦199 :return:200 """201 if target_symbol in self.pick_kl_pd_dict['pick_stock']:202 xd_dict = self.pick_kl_pd_dict['pick_stock'][target_symbol]203 if xd in xd_dict:204 # ç¼åä¸æ¾å°å½¢å¦ï¼self.pick_kl_pd_dict['pick_stock']['usTSLA']['252']205 # noinspection PyTypeChecker206 kl_pd = xd_dict[xd]207 if kl_pd is not None:208 # å 为å¨å¤è¿ç¨çæ¶åæ·±æ·è´ä¼ä¸¢å¤±name209 kl_pd.name = target_symbol210 return kl_pd211 # åå
¸ä¸æ¯æ¾å°ï¼è¿è¡fetch212 kl_pd = self._fetch_pick_stock_kl_pd(xd, target_symbol)213 """éè¡åå
¸æ¯ä¸å±åå
¸ç»æï¼æ¯æ©æ¶åå
¸å¤ä¸å±ï¼å 为æéè¡å¨æå为第ä¸å±åå
¸çkey"""214 if kl_pd is None or kl_pd.shape[0] == 0:215 self.pick_kl_pd_dict['pick_stock'][target_symbol] = {xd: None}216 return None217 """ç±äº_fetch_pick_stock_kl_pdä¸è·åkl_pd使ç¨äºæ 尺模å¼ï¼æ以è¿éçmin_xdè¦è®¾ç½®å¤§äºæ å°ºææå®é
æä¹"""218 if kl_pd.shape[0] < min_xd:219 # å¦ææ¶é´åºåææ°æ®ä½æ¯ < min_xd, æå¼æ°æ®ç´æ¥{xd: None}220 self.pick_kl_pd_dict['pick_stock'][target_symbol] = {xd: None}221 return None222 # 第ä¸å±åå
¸{xd: kl_pd}223 self.pick_kl_pd_dict['pick_stock'][target_symbol] = {xd: kl_pd}...
form2fit.py
Source:form2fit.py
1# coding=utf-82# Copyright 2020 The Google Research Authors.3#4# Licensed under the Apache License, Version 2.0 (the "License");5# you may not use this file except in compliance with the License.6# You may obtain a copy of the License at7#8# http://www.apache.org/licenses/LICENSE-2.09#10# Unless required by applicable law or agreed to in writing, software11# distributed under the License is distributed on an "AS IS" BASIS,12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.13# See the License for the specific language governing permissions and14# limitations under the License.15#!/usr/bin/env python16"""Form-2-fit Agent (https://form2fit.github.io/)."""17import os18import cv219import numpy as np20from ravens import cameras21from ravens import utils22from ravens.models import Attention23from ravens.models import Matching24import tensorflow as tf25class Form2FitAgent:26 """Form-2-fit Agent (https://form2fit.github.io/)."""27 def __init__(self, name, task):28 self.name = name29 self.task = task30 self.total_iter = 031 self.num_rotations = 2432 self.descriptor_dim = 1633 self.pixel_size = 0.00312534 self.input_shape = (320, 160, 6)35 self.camera_config = cameras.RealSenseD415.CONFIG36 self.models_dir = os.path.join('checkpoints', self.name)37 self.bounds = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]])38 self.pick_model = Attention(39 input_shape=self.input_shape,40 num_rotations=1,41 preprocess=self.preprocess,42 lite=True)43 self.place_model = Attention(44 input_shape=self.input_shape,45 num_rotations=1,46 preprocess=self.preprocess,47 lite=True)48 self.match_model = Matching(49 input_shape=self.input_shape,50 descriptor_dim=self.descriptor_dim,51 num_rotations=self.num_rotations,52 preprocess=self.preprocess,53 lite=True)54 def train(self, dataset, num_iter, writer, validation_dataset=None):55 """Train on dataset for a specific number of iterations."""56 del validation_dataset57 for i in range(num_iter):58 obs, act, _ = dataset.random_sample()59 # Get heightmap from RGB-D images.60 configs = act['camera_config']61 colormap, heightmap = self.get_heightmap(obs, configs)62 # Get training labels from data sample.63 pose0, pose1 = act['params']['pose0'], act['params']['pose1']64 p0_position, p0_rotation = pose0[0], pose0[1]65 p0 = utils.xyz_to_pix(p0_position, self.bounds, self.pixel_size)66 p0_theta = -np.float32(67 utils.quatXYZW_to_eulerXYZ(p0_rotation)[2])68 p1_position, p1_rotation = pose1[0], pose1[1]69 p1 = utils.xyz_to_pix(p1_position, self.bounds, self.pixel_size)70 p1_theta = -np.float32(71 utils.quatXYZW_to_eulerXYZ(p1_rotation)[2])72 p1_theta = p1_theta - p0_theta73 p0_theta = 074 # Concatenate color with depth images.75 input_image = np.concatenate((colormap, heightmap[Ellipsis, None],76 heightmap[Ellipsis, None], heightmap[Ellipsis, None]),77 axis=2)78 # Do data augmentation (perturb rotation and translation).79 input_image, _, roundedpixels, _ = utils.perturb(input_image, [p0, p1])80 p0, p1 = roundedpixels81 # Compute training loss.82 loss0 = self.pick_model.train(input_image, p0, theta=0)83 loss1 = self.place_model.train(input_image, p1, theta=0)84 loss2 = self.match_model.train(input_image, p0, p1, theta=p1_theta)85 with writer.as_default():86 tf.summary.scalar(87 'pick_loss',88 self.pick_model.metric.result(),89 step=self.total_iter + i)90 tf.summary.scalar(91 'place_loss',92 self.place_model.metric.result(),93 step=self.total_iter + i)94 tf.summary.scalar(95 'match_loss',96 self.match_model.metric.result(),97 step=self.total_iter + i)98 print(99 f'Train Iter: {self.total_iter + i} Loss: {loss0:.4f} {loss1:.4f} {loss2:.4f}'100 )101 self.total_iter += num_iter102 self.save()103 def act(self, obs, info):104 """Run inference and return best action given visual observations."""105 del info106 act = {'camera_config': self.camera_config, 'primitive': None}107 if not obs:108 return act109 # Get heightmap from RGB-D images.110 colormap, heightmap = self.get_heightmap(obs, self.camera_config)111 # Concatenate color with depth images.112 input_image = np.concatenate(113 (colormap, heightmap[Ellipsis, None], heightmap[Ellipsis, None], heightmap[Ellipsis,114 None]),115 axis=2)116 # Get top-k pixels from pick and place heatmaps.117 k = 100118 pick_heatmap = self.pick_model.forward(119 input_image, apply_softmax=True).squeeze()120 place_heatmap = self.place_model.forward(121 input_image, apply_softmax=True).squeeze()122 descriptors = np.float32(self.match_model.forward(input_image))123 # V4124 pick_heatmap = cv2.GaussianBlur(pick_heatmap, (49, 49), 0)125 place_heatmap = cv2.GaussianBlur(place_heatmap, (49, 49), 0)126 pick_topk = np.int32(127 np.unravel_index(128 np.argsort(pick_heatmap.reshape(-1))[-k:], pick_heatmap.shape)).T129 pick_pixel = pick_topk[-1, :]130 from skimage.feature import peak_local_max # pylint: disable=g-import-not-at-top131 place_peaks = peak_local_max(place_heatmap, num_peaks=1)132 distances = np.ones((place_peaks.shape[0], self.num_rotations)) * 10133 pick_descriptor = descriptors[0, pick_pixel[0],134 pick_pixel[1], :].reshape(1, -1)135 for i in range(place_peaks.shape[0]):136 peak = place_peaks[i, :]137 place_descriptors = descriptors[:, peak[0], peak[1], :]138 distances[i, :] = np.linalg.norm(139 place_descriptors - pick_descriptor, axis=1)140 ibest = np.unravel_index(np.argmin(distances), shape=distances.shape)141 p0_pixel = pick_pixel142 p0_theta = 0143 p1_pixel = place_peaks[ibest[0], :]144 p1_theta = ibest[1] * (2 * np.pi / self.num_rotations)145 # # V3146 # pick_heatmap = cv2.GaussianBlur(pick_heatmap, (49, 49), 0)147 # place_heatmap = cv2.GaussianBlur(place_heatmap, (49, 49), 0)148 # pick_topk = np.int32(149 # np.unravel_index(150 # np.argsort(pick_heatmap.reshape(-1))[-k:], pick_heatmap.shape)).T151 # place_topk = np.int32(152 # np.unravel_index(153 # np.argsort(place_heatmap.reshape(-1))[-k:],154 # place_heatmap.shape)).T155 # pick_pixel = pick_topk[-1, :]156 # place_pixel = place_topk[-1, :]157 # pick_descriptor = descriptors[0, pick_pixel[0],158 # pick_pixel[1], :].reshape(1, -1)159 # place_descriptor = descriptors[:, place_pixel[0], place_pixel[1], :]160 # distances = np.linalg.norm(place_descriptor - pick_descriptor, axis=1)161 # irotation = np.argmin(distances)162 # p0_pixel = pick_pixel163 # p0_theta = 0164 # p1_pixel = place_pixel165 # p1_theta = irotation * (2 * np.pi / self.num_rotations)166 # # V2167 # pick_topk = np.int32(168 # np.unravel_index(169 # np.argsort(pick_heatmap.reshape(-1))[-k:], pick_heatmap.shape)).T170 # place_topk = np.int32(171 # np.unravel_index(172 # np.argsort(place_heatmap.reshape(-1))[-k:],173 # place_heatmap.shape)).T174 # pick_pixel = pick_topk[-1, :]175 # pick_descriptor = descriptors[0, pick_pixel[0],176 # pick_pixel[1], :].reshape(1, 1, 1, -1)177 # distances = np.linalg.norm(descriptors - pick_descriptor, axis=3)178 # distances = np.transpose(distances, [1, 2, 0])179 # max_distance = int(np.round(np.max(distances)))180 # for i in range(self.num_rotations):181 # distances[:, :, i] = cv2.circle(distances[:, :, i],182 # (pick_pixel[1], pick_pixel[0]), 50,183 # max_distance, -1)184 # ibest = np.unravel_index(np.argmin(distances), shape=distances.shape)185 # p0_pixel = pick_pixel186 # p0_theta = 0187 # p1_pixel = ibest[:2]188 # p1_theta = ibest[2] * (2 * np.pi / self.num_rotations)189 # # V1190 # pick_topk = np.int32(191 # np.unravel_index(192 # np.argsort(pick_heatmap.reshape(-1))[-k:], pick_heatmap.shape)).T193 # place_topk = np.int32(194 # np.unravel_index(195 # np.argsort(place_heatmap.reshape(-1))[-k:],196 # place_heatmap.shape)).T197 # distances = np.zeros((k, k, self.num_rotations))198 # for ipick in range(k):199 # pick_descriptor = descriptors[0, pick_topk[ipick, 0],200 # pick_topk[ipick, 1], :].reshape(1, -1)201 # for iplace in range(k):202 # place_descriptors = descriptors[:, place_topk[iplace, 0],203 # place_topk[iplace, 1], :]204 # distances[ipick, iplace, :] = np.linalg.norm(205 # place_descriptors - pick_descriptor, axis=1)206 # ibest = np.unravel_index(np.argmin(distances), shape=distances.shape)207 # p0_pixel = pick_topk[ibest[0], :]208 # p0_theta = 0209 # p1_pixel = place_topk[ibest[1], :]210 # p1_theta = ibest[2] * (2 * np.pi / self.num_rotations)211 # Pixels to end effector poses.212 p0_position = utils.pix_to_xyz(p0_pixel, heightmap, self.bounds,213 self.pixel_size)214 p1_position = utils.pix_to_xyz(p1_pixel, heightmap, self.bounds,215 self.pixel_size)216 p0_rotation = utils.eulerXYZ_to_quatXYZW((0, 0, -p0_theta))217 p1_rotation = utils.eulerXYZ_to_quatXYZW((0, 0, -p1_theta))218 act['primitive'] = 'pick_place'219 if self.task == 'sweeping':220 act['primitive'] = 'sweep'221 elif self.task == 'pushing':222 act['primitive'] = 'push'223 params = {224 'pose0': (p0_position, p0_rotation),225 'pose1': (p1_position, p1_rotation)226 }227 act['params'] = params228 return act229 #-------------------------------------------------------------------------230 # Helper Functions231 #-------------------------------------------------------------------------232 def preprocess(self, image):233 """Pre-process images (subtract mean, divide by std)."""234 color_mean = 0.18877631235 depth_mean = 0.00509261236 color_std = 0.07276466237 depth_std = 0.00903967238 image[:, :, :3] = (image[:, :, :3] / 255 - color_mean) / color_std239 image[:, :, 3:] = (image[:, :, 3:] - depth_mean) / depth_std240 return image241 def get_heightmap(self, obs, configs):242 """Reconstruct orthographic heightmaps with segmentation masks."""243 heightmaps, colormaps = utils.reconstruct_heightmaps(244 obs['color'], obs['depth'], configs, self.bounds, self.pixel_size)245 colormaps = np.float32(colormaps)246 heightmaps = np.float32(heightmaps)247 # Fuse maps from different views.248 valid = np.sum(colormaps, axis=3) > 0249 repeat = np.sum(valid, axis=0)250 repeat[repeat == 0] = 1251 colormap = np.sum(colormaps, axis=0) / repeat[Ellipsis, None]252 colormap = np.uint8(np.round(colormap))253 heightmap = np.sum(heightmaps, axis=0) / repeat254 return colormap, heightmap255 def load(self, num_iter):256 """Load pre-trained models."""257 pick_fname = 'pick-ckpt-%d.h5' % num_iter258 place_fname = 'place-ckpt-%d.h5' % num_iter259 match_fname = 'match-ckpt-%d.h5' % num_iter260 pick_fname = os.path.join(self.models_dir, pick_fname)261 place_fname = os.path.join(self.models_dir, place_fname)262 match_fname = os.path.join(self.models_dir, match_fname)263 self.pick_model.load(pick_fname)264 self.place_model.load(place_fname)265 self.match_model.load(match_fname)266 self.total_iter = num_iter267 def save(self):268 """Save models."""269 if not os.path.exists(self.models_dir):270 os.makedirs(self.models_dir)271 pick_fname = 'pick-ckpt-%d.h5' % self.total_iter272 place_fname = 'place-ckpt-%d.h5' % self.total_iter273 match_fname = 'match-ckpt-%d.h5' % self.total_iter274 pick_fname = os.path.join(self.models_dir, pick_fname)275 place_fname = os.path.join(self.models_dir, place_fname)276 match_fname = os.path.join(self.models_dir, match_fname)277 self.pick_model.save(pick_fname)278 self.place_model.save(place_fname)...
tools_matrix.py
Source:tools_matrix.py
1import sys2from operator import itemgetter3import numpy as np4import cv25'''6Function:7 change rectangles into squares (matrix version)8Input:9 rectangles: rectangles[i][0:3] is the position, rectangles[i][4] is score10Output:11 squares: same as input12'''13def rect2square(rectangles):14 w = rectangles[:,2] - rectangles[:,0]15 h = rectangles[:,3] - rectangles[:,1]16 l = np.maximum(w,h).T17 rectangles[:,0] = rectangles[:,0] + w*0.5 - l*0.518 rectangles[:,1] = rectangles[:,1] + h*0.5 - l*0.5 19 rectangles[:,2:4] = rectangles[:,0:2] + np.repeat([l], 2, axis = 0).T 20 return rectangles21'''22Function:23 apply NMS(non-maximum suppression) on ROIs in same scale(matrix version)24Input:25 rectangles: rectangles[i][0:3] is the position, rectangles[i][4] is score26Output:27 rectangles: same as input28'''29def NMS(rectangles,threshold,type):30 if len(rectangles)==0:31 return rectangles32 boxes = np.array(rectangles)33 x1 = boxes[:,0]34 y1 = boxes[:,1]35 x2 = boxes[:,2]36 y2 = boxes[:,3]37 s = boxes[:,4]38 area = np.multiply(x2-x1+1, y2-y1+1)39 I = np.array(s.argsort())40 pick = []41 while len(I)>0:42 xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]]) #I[-1] have hightest prob score, I[0:-1]->others43 yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]])44 xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]])45 yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]])46 w = np.maximum(0.0, xx2 - xx1 + 1)47 h = np.maximum(0.0, yy2 - yy1 + 1)48 inter = w * h49 if type == 'iom':50 o = inter / np.minimum(area[I[-1]], area[I[0:-1]])51 else:52 o = inter / (area[I[-1]] + area[I[0:-1]] - inter)53 pick.append(I[-1])54 I = I[np.where(o<=threshold)[0]]55 result_rectangle = boxes[pick].tolist()56 return result_rectangle57'''58Function:59 Detect face position and calibrate bounding box on 12net feature map(matrix version)60Input:61 cls_prob : softmax feature map for face classify62 roi : feature map for regression63 out_side : feature map's largest size64 scale : current input image scale in multi-scales65 width : image's origin width66 height : image's origin height67 threshold: 0.6 can have 99% recall rate68'''69def detect_face_12net(cls_prob,roi,out_side,scale,width,height,threshold):70 in_side = 2*out_side+1171 stride = 072 if out_side != 1:73 stride = float(in_side-12)/(out_side-1)74 (x,y) = np.where(cls_prob>=threshold)75 boundingbox = np.array([x,y]).T76 bb1 = np.fix((stride * (boundingbox) + 0 ) * scale)77 bb2 = np.fix((stride * (boundingbox) + 11) * scale)78 boundingbox = np.concatenate((bb1,bb2),axis = 1)79 dx1 = roi[0][x,y]80 dx2 = roi[1][x,y]81 dx3 = roi[2][x,y]82 dx4 = roi[3][x,y]83 score = np.array([cls_prob[x,y]]).T84 offset = np.array([dx1,dx2,dx3,dx4]).T85 boundingbox = boundingbox + offset*12.0*scale86 rectangles = np.concatenate((boundingbox,score),axis=1)87 rectangles = rect2square(rectangles)88 pick = []89 for i in range(len(rectangles)):90 x1 = int(max(0 ,rectangles[i][0]))91 y1 = int(max(0 ,rectangles[i][1]))92 x2 = int(min(width ,rectangles[i][2]))93 y2 = int(min(height,rectangles[i][3]))94 sc = rectangles[i][4]95 if x2>x1 and y2>y1:96 pick.append([x1,y1,x2,y2,sc])97 return NMS(pick,0.3,'iou')98'''99Function:100 Filter face position and calibrate bounding box on 12net's output101Input:102 cls_prob : softmax feature map for face classify103 roi_prob : feature map for regression104 rectangles: 12net's predict105 width : image's origin width106 height : image's origin height107 threshold : 0.6 can have 97% recall rate108Output:109 rectangles: possible face positions110'''111def filter_face_24net(cls_prob,roi,rectangles,width,height,threshold):112 prob = cls_prob[:,1]113 pick = np.where(prob>=threshold)114 rectangles = np.array(rectangles)115 x1 = rectangles[pick,0]116 y1 = rectangles[pick,1]117 x2 = rectangles[pick,2]118 y2 = rectangles[pick,3]119 sc = np.array([prob[pick]]).T120 dx1 = roi[pick,0]121 dx2 = roi[pick,1]122 dx3 = roi[pick,2]123 dx4 = roi[pick,3]124 w = x2-x1125 h = y2-y1126 x1 = np.array([(x1+dx1*w)[0]]).T127 y1 = np.array([(y1+dx2*h)[0]]).T128 x2 = np.array([(x2+dx3*w)[0]]).T129 y2 = np.array([(y2+dx4*h)[0]]).T130 rectangles = np.concatenate((x1,y1,x2,y2,sc),axis=1)131 rectangles = rect2square(rectangles)132 pick = []133 for i in range(len(rectangles)):134 x1 = int(max(0 ,rectangles[i][0]))135 y1 = int(max(0 ,rectangles[i][1]))136 x2 = int(min(width ,rectangles[i][2]))137 y2 = int(min(height,rectangles[i][3]))138 sc = rectangles[i][4]139 if x2>x1 and y2>y1:140 pick.append([x1,y1,x2,y2,sc])141 return NMS(pick,0.3,'iou')142'''143Function:144 Filter face position and calibrate bounding box on 12net's output145Input:146 cls_prob : cls_prob[1] is face possibility147 roi : roi offset148 pts : 5 landmark149 rectangles: 12net's predict, rectangles[i][0:3] is the position, rectangles[i][4] is score150 width : image's origin width151 height : image's origin height152 threshold : 0.7 can have 94% recall rate on CelebA-database153Output:154 rectangles: face positions and landmarks155'''156def filter_face_48net(cls_prob,roi,pts,rectangles,width,height,threshold):157 prob = cls_prob[:,1]158 pick = np.where(prob>=threshold)159 rectangles = np.array(rectangles)160 x1 = rectangles[pick,0]161 y1 = rectangles[pick,1]162 x2 = rectangles[pick,2]163 y2 = rectangles[pick,3]164 sc = np.array([prob[pick]]).T165 dx1 = roi[pick,0]166 dx2 = roi[pick,1]167 dx3 = roi[pick,2]168 dx4 = roi[pick,3]169 w = x2-x1170 h = y2-y1171 pts0= np.array([(w*pts[pick,0]+x1)[0]]).T172 pts1= np.array([(h*pts[pick,5]+y1)[0]]).T173 pts2= np.array([(w*pts[pick,1]+x1)[0]]).T174 pts3= np.array([(h*pts[pick,6]+y1)[0]]).T175 pts4= np.array([(w*pts[pick,2]+x1)[0]]).T176 pts5= np.array([(h*pts[pick,7]+y1)[0]]).T177 pts6= np.array([(w*pts[pick,3]+x1)[0]]).T178 pts7= np.array([(h*pts[pick,8]+y1)[0]]).T179 pts8= np.array([(w*pts[pick,4]+x1)[0]]).T180 pts9= np.array([(h*pts[pick,9]+y1)[0]]).T181 # pts0 = np.array([(w * pts[pick, 0] + x1)[0]]).T182 # pts1 = np.array([(h * pts[pick, 1] + y1)[0]]).T183 # pts2 = np.array([(w * pts[pick, 2] + x1)[0]]).T184 # pts3 = np.array([(h * pts[pick, 3] + y1)[0]]).T185 # pts4 = np.array([(w * pts[pick, 4] + x1)[0]]).T186 # pts5 = np.array([(h * pts[pick, 5] + y1)[0]]).T187 # pts6 = np.array([(w * pts[pick, 6] + x1)[0]]).T188 # pts7 = np.array([(h * pts[pick, 7] + y1)[0]]).T189 # pts8 = np.array([(w * pts[pick, 8] + x1)[0]]).T190 # pts9 = np.array([(h * pts[pick, 9] + y1)[0]]).T191 x1 = np.array([(x1+dx1*w)[0]]).T192 y1 = np.array([(y1+dx2*h)[0]]).T193 x2 = np.array([(x2+dx3*w)[0]]).T194 y2 = np.array([(y2+dx4*h)[0]]).T195 rectangles=np.concatenate((x1,y1,x2,y2,sc,pts0,pts1,pts2,pts3,pts4,pts5,pts6,pts7,pts8,pts9),axis=1)196 pick = []197 for i in range(len(rectangles)):198 x1 = int(max(0 ,rectangles[i][0]))199 y1 = int(max(0 ,rectangles[i][1]))200 x2 = int(min(width ,rectangles[i][2]))201 y2 = int(min(height,rectangles[i][3]))202 if x2>x1 and y2>y1:203 pick.append([x1,y1,x2,y2,rectangles[i][4],204 rectangles[i][5],rectangles[i][6],rectangles[i][7],rectangles[i][8],rectangles[i][9],rectangles[i][10],rectangles[i][11],rectangles[i][12],rectangles[i][13],rectangles[i][14]])205 return NMS(pick,0.3,'iom')206'''207Function:208 calculate multi-scale and limit the maxinum side to 1000 209Input: 210 img: original image211Output:212 pr_scale: limit the maxinum side to 1000, < 1.0213 scales : Multi-scale214'''215def calculateScales(img):216 caffe_img = img.copy()217 pr_scale = 1.0218 h,w,ch = caffe_img.shape219 if min(w,h)>500:220 pr_scale = 500.0/min(h,w)221 w = int(w*pr_scale)222 h = int(h*pr_scale)223 elif max(w,h)<500:224 pr_scale = 500.0/max(h,w)225 w = int(w*pr_scale)226 h = int(h*pr_scale)227 #multi-scale228 scales = []229 factor = 0.709 # rmaria: why this factor?230 factor_count = 0231 minl = min(h,w)232 #print("minl: ",minl)233 while minl >= 12:234 scales.append(pr_scale*pow(factor, factor_count))235 minl *= factor236 #print("minl2: ",minl)237 factor_count += 1238 return scales239# '''240# Function:241# calculate switch definition of landmark point to new def242# Input:243# pts: old definition pts244# Output:245# pts_new: new def pts246# '''247# def pts_def_rectify(pts):248# pts_new = np.zeros_like(pts)249# pts_new[:, 0]= pts[:,0]250# pts_new[:, 1]= pts[:,5]251# pts_new[:, 2]= pts[:,1]252# pts_new[:, 3]= pts[:,6]253# pts_new[:, 4]= pts[:,2]254# pts_new[:, 5]= pts[:,7]255# pts_new[:, 6]= pts[:,3]256# pts_new[:, 7]= pts[:,8]257# pts_new[:, 8]= pts[:,4]258# pts_new[:, 9]= pts[:,9]259# return pts_new260'''261Function:262 calculate landmark point , new def263Input:264 cls_prob : cls_prob[1] is face possibility265 roi : roi offset266 pts : 5 landmark267 rectangles: 12net's predict, rectangles[i][0:3] is the position, rectangles[i][4] is score268 width : image's origin width269 height : image's origin height270 threshold : 0.7 can have 94% recall rate on CelebA-database271Output:272 rectangles: face positions and landmarks273'''274def filter_face_48net_newdef(cls_prob,roi,pts,rectangles,width,height,threshold):275 prob = cls_prob[:,1]276 pick = np.where(prob>=threshold)277 rectangles = np.array(rectangles)278 x1 = rectangles[pick,0]279 y1 = rectangles[pick,1]280 x2 = rectangles[pick,2]281 y2 = rectangles[pick,3]282 sc = np.array([prob[pick]]).T283 dx1 = roi[pick,0]284 dx2 = roi[pick,1]285 dx3 = roi[pick,2]286 dx4 = roi[pick,3]287 w = x2-x1288 h = y2-y1289 pts0= np.array([(w*pts[pick,0]+x1)[0]]).T290 pts1= np.array([(h*pts[pick,1]+y1)[0]]).T291 pts2= np.array([(w*pts[pick,2]+x1)[0]]).T292 pts3= np.array([(h*pts[pick,3]+y1)[0]]).T293 pts4= np.array([(w*pts[pick,4]+x1)[0]]).T294 pts5= np.array([(h*pts[pick,5]+y1)[0]]).T295 pts6= np.array([(w*pts[pick,6]+x1)[0]]).T296 pts7= np.array([(h*pts[pick,7]+y1)[0]]).T297 pts8= np.array([(w*pts[pick,8]+x1)[0]]).T298 pts9= np.array([(h*pts[pick,9]+y1)[0]]).T299 x1 = np.array([(x1+dx1*w)[0]]).T300 y1 = np.array([(y1+dx2*h)[0]]).T301 x2 = np.array([(x2+dx3*w)[0]]).T302 y2 = np.array([(y2+dx4*h)[0]]).T303 rectangles=np.concatenate((x1,y1,x2,y2,sc,pts0,pts1,pts2,pts3,pts4,pts5,pts6,pts7,pts8,pts9),axis=1)304 # print (pts0,pts1,pts2,pts3,pts4,pts5,pts6,pts7,pts8,pts9)305 pick = []306 for i in range(len(rectangles)):307 x1 = int(max(0 ,rectangles[i][0]))308 y1 = int(max(0 ,rectangles[i][1]))309 x2 = int(min(width ,rectangles[i][2]))310 y2 = int(min(height,rectangles[i][3]))311 if x2>x1 and y2>y1:312 pick.append([x1,y1,x2,y2,rectangles[i][4],313 rectangles[i][5],rectangles[i][6],rectangles[i][7],rectangles[i][8],rectangles[i][9],rectangles[i][10],rectangles[i][11],rectangles[i][12],rectangles[i][13],rectangles[i][14]])314 return NMS(pick,0.3,'idsom')315'''316Function:317 calculate mean value of img_list for double checck img quality318Input:319 img_nparray: numpy array of input320Output:321 img_nparray: numpy array of img mean value322'''323def imglist_meanvalue(img_nparray):324 img_mean_array = np.mean(img_nparray ,axis=(1,2,3))...
div_rank.py
Source:div_rank.py
1#!/usr/bin/env python2# coding: utf-83import pandas as pd #we want pandas 0.25 here. The code has not been tested with pandas 1.04import numpy as np5import os,sys6import json7def read_rank_data(config):8 path = config['path']9 rank_file = config['rank_file']10 sample_col = str(config['sample_name'])11 mol_rank_df = pd.read_csv(os.path.join(path,rank_file),low_memory=False)12 mol_rank_df.set_index(sample_col,inplace=True)13 if config['score_column'] != 'score':14 mol_rank_df['score'] = mol_rank_df[config['score_column']]15 if 'random_id_column' in config:16 mol_rank_df['random_id'] = mol_rank_df[config['random_id_column']]17 else:18 mol_ids = np.arange(len(mol_rank_df))19 np.random.shuffle(mol_ids)20 mol_rank_df['random_id'] = mol_ids 21 return(mol_rank_df)22 23def get_individual_class_df(path,file,sep,sample_name,class_id,min_size,score_data):24 df = pd.read_table(os.path.join(path,'cluster_data',file),sep=sep).rename(columns={sample_name:'sample_name',class_id:'class_id'})[['sample_name','class_id']]25 df = df.join(score_data,on='sample_name',how='inner')26 df['class_size'] = df.groupby('class_id')['class_id'].transform('size')27 #we now dertemine from which picking roun on class may be unlocked. The memebership quorum for unlocking at class at a given 28 #round is defined per class type in the configuration file 29 df['unlock_round'] = -130 for rnd,class_size in reversed(list(enumerate(min_size))):31 df.loc[df['class_size'] >= class_size,'unlock_round'] = rnd32 #remove any records with out unlock round. This will happen if the membership quorum is always above 1 for all rounds33 df = df[df['unlock_round'] >= 0]34 print('class records: {0}, classified_mols: {1}, number of classes:{2} ({3})'.format(len(df),df['sample_name'].nunique(),df['class_id'].nunique(),file))35 return(df)36 37# assemble the overal class data from the individual files38def get_global_class_df(config, mol_rank_df):39 score_data = mol_rank_df[['score','random_id']]40 class_types_config = config['class_data']41 class_data = pd.concat({class_type : get_individual_class_df(path=config['path'],score_data=score_data,**cf) for class_type,cf in class_types_config.items() },names=['class_type'])42 class_data = class_data.reset_index()[['class_type','class_id','sample_name','score','random_id','unlock_round']]43 class_data['global_class_id'] = class_data.groupby(['class_type','class_id']).ngroup()44 missing= ~mol_rank_df.index.isin(class_data['sample_name'])45 print('Number of samples without any class record: {0}. These will picked last. Theres hould not be a significant numbr of samples here'.format(missing.sum()))46 return(class_data)47#execute the ranking. This may modify mol_rank_df and class_data in place, depending on the pandas 48#performing a copyoperation or not49 50def execute_div_rank(mol_rank_df,class_data,target_picking_quorum):51 #create columns to record the div rank results52 mol_rank_df['pick_seq'] = 053 mol_rank_df['pick_round'] = 054 mol_rank_df = mol_rank_df.reindex(columns=(list(mol_rank_df.columns)+list(class_data['class_type'].unique())),fill_value = 0)55 #add a picked flag column to class data56 class_data['picked'] = False57 #initialze counters58 current_pick_round = 159 current_pick_seq = 060 current_picking_quorum = 0.061 #main picking loop62 while (mol_rank_df['pick_round']==0).any() and (current_picking_quorum < target_picking_quorum):63 print('Picking round:{0}'.format(current_pick_round))64 #get class data for molecules not yet picked and the classes already unlocked for this round65 pick_candidates = class_data[~class_data['picked'] & (class_data['unlock_round'] < current_pick_round)].copy()66 #group them by global class id67 cand_gp = pick_candidates.groupby('global_class_id')68 #rank them within group by their score69 pick_candidates['pick_rank'] = cand_gp['score'].rank(method='dense').astype(int)70 #retain only the top ranking class members71 #we make a copy as we will manipulate this ubframe72 pick_candidates = pick_candidates[pick_candidates['pick_rank'] == 1].copy()73 #if this is not the first picking round, we determine how many compounds with the the best picking score74 #or better there have already been picked for this class. If there more then current_pick_round, 75 #we skip the class76 if current_pick_round > 1:77 #find best pickable scores78 current_pick_score = cand_gp['score'].min().rename('current_pick_score')79 #join these to the already picked classes by global_class_id80 class_data_picked = class_data[class_data['picked']].join(current_pick_score,on=('global_class_id'),how='inner')81 #for each class count the already picked compounds with a score btter or equal to the current one82 #include the unloack round in the grouping (which is constant over a class id) to have it availabale over the grouping83 pick_count_data = class_data_picked[class_data_picked['score'] <= class_data_picked['current_pick_score']].groupby(['global_class_id','unlock_round']).size()84 pick_count_data.name = 'pick_count'85 pick_count_data = pick_count_data.reset_index(level='unlock_round',drop=False)86 #generate list of global-class_ids to be skipped as they are already above par87 #par measn the the picked number of members is larger than the current_pick_round minis the unlock_round for te class88 #if a cluass is unlocked at round 1, and at beginning of round 3 we have picked 2 members, we are aready avove par as 2 >= 3-189 pick_exclude = pick_count_data[pick_count_data['pick_count']>=(current_pick_round-pick_count_data['unlock_round'])].index90 #remove these classes from the pick canddiates91 pick_candidates = pick_candidates[pick_candidates['global_class_id'].isin(pick_exclude)].copy()92 #in the remaining candidates count now the number of class types a molecules covers93 pick_candidates['class_type_counts'] = pick_candidates.groupby('random_id')['class_type'].transform('nunique')94 #sort by score, andn then by class_type_count, and then by the random id95 #the last sort criterion ensures that if several classes have a molecules in commnon which rank first according the first two criteria96 #we indeed pick the same, but otherwise randomly chose molecules97 pick_candidates.sort_values(by=['score','class_type_counts','random_id'],ascending=[True,False,True],inplace=True)98 #now we keep only te first molecules pe class99 pick_candidates.drop_duplicates(subset='global_class_id',keep='first',inplace=True)100 #the picked molecules are the unique list of cencepst for the top molecules per class101 #as the pick candidates are sorted by score, so are the molecules102 pick_mols = pick_candidates['sample_name'].drop_duplicates()103 print('Picked {0} molecules covering {1} classes'.format(len(pick_mols),len(pick_candidates)))104 #record the pick in the mol_rank_df105 mol_rank_df.loc[pick_mols,'pick_round'] = current_pick_round106 mol_rank_df.loc[pick_mols,'pick_seq'] = np.arange(current_pick_seq,current_pick_seq+len(pick_mols))107 #record also the class_type which lead to the pick108 pick_type_count = pick_candidates.groupby(['sample_name','class_type']).size().unstack('class_type',fill_value=0)109 mol_rank_df.loc[pick_type_count.index,pick_type_count.columns] = pick_type_count110 #update the 'picked' column in class_data 111 class_data.loc[class_data['sample_name'].isin(pick_mols),'picked'] = True112 #update the round counter113 current_pick_round += 1114 current_pick_seq += len(pick_mols)115 current_picking_quorum = float(current_pick_seq)/len(mol_rank_df)116 print('Current picking quorum: {0}'.format(current_picking_quorum))117 #wrap up: assign the a pick rank to the not yet picked compounds. These will be 118 mol_rank_df.loc[mol_rank_df['pick_round']==0,'pick_round'] = current_pick_round119 pick_seq = mol_rank_df.loc[mol_rank_df['pick_round']==current_pick_round,'score'].rank(method='first').astype(int)+current_pick_seq120 mol_rank_df.loc[pick_seq.index,'pick_seq']=pick_seq121 return(mol_rank_df)122def main():123 try:124 config_file = sys.argv[1]125 except: 126 raise ValueError('Usage: div_rank.py <config_file>')127 with open(config_file,'r') as jsonf:128 config = json.load(jsonf)129 print('read in config data')130 mol_rank_df = read_rank_data(config)131 print('finished reading in rank data')132 class_data = get_global_class_df(config, mol_rank_df)133 print('finished reading in class data')134 mol_rank_df = execute_div_rank(mol_rank_df,class_data,target_picking_quorum=config['target_picking_quorum'])135 print('finished picking')136 mol_rank_df.to_csv(os.path.join(config['path'],config['outfile']))137 print('written output')138 print('DONE')139if __name__ == "__main__":...
pick.py
Source:pick.py
1# __author:"zonglr"2# date:2020/12/93# !/usr/bin/env python34# _*_ coding: utf-8 _*_5import time, datetime, re6from common import logger, request7from faker import Faker8log = logger.Log()9faker = Faker(locale='zh_CN')10# æ£å个åå-ä»
ä¸æ¬¡11def pickOne(goodsId, lotNum, serialNumber, storageLocationId, pickOrderId):12 body = {13 'goodsId': goodsId,14 'lotNum': lotNum,15 'pickOrderId': pickOrderId,16 'serialNumber': serialNumber,17 'storageLocationId': storageLocationId18 }19 pick_response = request.put_body('/pickOrder/picking', body=body)20 return pick_response21def finishPick(pickOrderId):22 # æ ¹æ®æ£è´§åidæ¥è¯¢è¯¦æ
23 detail = request.get('/pickOrder/detail/%s' % pickOrderId)24 assert detail['msg'] == '请æ±æå'25 goodsDetail = detail['data']['goodsDetail']26 kitDetail = detail['data']['kitDetail']27 toolsKitDetail = detail['data']['toolsKitDetail']28 # ç©èµå表29 for goods in goodsDetail:30 # ç©èµudi31 udi = goods['udi']32 new_udi = re.sub(r'\D', "", udi)33 # ç©èµç¼å·34 materialCode = goods['materialCode']35 # è´§ä½å·36 storageLocationId = goods['storageLocationId']37 # ç©èµå¾
æ£æ°é38 unpickedQuantity = int(goods['quantity']) - int(goods['pickedQuantity'])39 # æ«ç è·åæ¹å·ä¿¡æ¯40 gs1Decode = request.get('/goods/gs1Decode?code=%s' % new_udi)41 # åºåå·42 serialNumber = gs1Decode['data']['serialNumber']43 # ç©èµid44 goodsId = gs1Decode['data']['goodsId']45 # æ¹å·46 lotNum = gs1Decode['data']['lotNum']47 body = {48 'goodsId': goodsId,49 'lotNum': lotNum,50 'pickOrderId': pickOrderId,51 'serialNumber': serialNumber,52 'storageLocationId': storageLocationId53 }54 # ååå¾
æ£è´§æ°é大äº1æ¶éå¤æ¬¡æ£è´§55 num = 056 while num < unpickedQuantity:57 pick_response = request.put_body('/pickOrder/picking', body=body)58 assert pick_response['msg'] == '请æ±æå'59 num += 160 log.info('ç©èµ%sæ£è´§å®æ' % materialCode)61 # å¥å
å表 TODO62 for kit in kitDetail:63 print(kit)64 # å·¥å
·å
å表65 for tools in toolsKitDetail:66 # å·¥å
·å
æ¡ç 67 operatorBarcode = tools['operatorBarcode']68 # kitStockId69 kitStockId = tools['kitStockId']70 # è´§ä½å·71 storageLocationId = tools['storageLocationId']72 # ç©èµå¾
æ£æ°é73 unpickedQuantity = int(tools['quantity']) - int(tools['pickedQuantity'])74 body = {75 'kitStockId': kitStockId,76 'pickOrderId': pickOrderId,77 'storageLocationId': storageLocationId78 }79 num = 080 while num < unpickedQuantity:81 pick_response = request.put_body('/pickOrder/picking', body=body)82 assert pick_response['msg'] == '请æ±æå'83 num += 184 log.info('å·¥å
·å
%sæ£è´§å®æ' % operatorBarcode)85 # å®ææ£è´§86 body2 = {87 'pickOrderId': pickOrderId,88 'imagePath': ['/file/2020/11/16/ac110bd6-ff1f-41ed-b645-a570a8c34df9/æè´§å§æ书.jpg']89 }90 finishPick = request.put_body('/pickOrder/pickFinished', body=body2)91 log.info('æ£è´§åå®æ %s' % finishPick)...
Pick_Game.py
Source:Pick_Game.py
1import util.Util as Util2import random3import datetime456def randdict_list_del(onedict):7 Pick_List = []8 while len(Pick_List) < 12:9 Total_Weight = sum(onedict.values())10 ra = random.randint(0, Total_Weight-1)11 curr_sum = 012 keys = onedict.keys()13 for k in keys:14 curr_sum = curr_sum + onedict[k]15 if ra < curr_sum:16 multiply = k17 Pick_List.append(multiply)18 del onedict[k]19 break20 return Pick_List212223starttime = datetime.datetime.now()24Test_Time = 50000025time = 026Pick_Number = 027All_Win_Mul = 028More_pick = {0:0.3, 1:0.3, 2: 0.3}2930JackPot_hit = {1: 0, 2: 0, 3: 0, 4: 0}313233while time < Test_Time:34 time = time + 135 if time % (Test_Time / 10) == 0:36 print(time)3738 Prize_Pool = {1:1,2:10,3:50,4:100,5:1000,6:1000,7:1000,8:1000,9:1000,10:1000,11:1000,12:1000,13:1000,14:1000,15:1000}39 JackPot_Weight = {1:1,2:5,3:20,4:50,5:30,6:40,7:50,8:100,9:100,10:200,11:200,12:300,13:300,14:500,15:500}40 Corresponding_Ward = {1:2000,2:500,3:50,4:10,5:20,6:18,7:15,8:12,9:10,10:8,11:8,12:5,13:5,14:3,15:3}414243 Pick_List = randdict_list_del(Prize_Pool)4445 Pick_List_Weight = {}4647 for reward in Pick_List:48 Pick_List_Weight[reward] = JackPot_Weight[reward]4950 Pick_Time = 351 Reward_Gotten = []52 Reward_Mul_Gotten = []5354 while Pick_Time > 0:55 Pick_Time = Pick_Time - 1565758 # print(Pick_Time)59 # print(Pick_List_Weight)6061 Pick_Get = Util.randdict(Pick_List_Weight)62 del Pick_List_Weight[Pick_Get]63 Reward_Gotten.append(Pick_Get)6465 random_num1 = random.random()66 if random_num1 < More_pick[Pick_Time]:67 Extra_Pick_Get_1 = Util.randdict(Pick_List_Weight)68 del Pick_List_Weight[Extra_Pick_Get_1]69 Reward_Gotten.append(Extra_Pick_Get_1)7071 Extra_Pick_Get_2 = Util.randdict(Pick_List_Weight)72 del Pick_List_Weight[Extra_Pick_Get_2]73 Reward_Gotten.append(Extra_Pick_Get_2)74757677 # print(Reward_Gotten)78 for element in Reward_Gotten:79 Reward_Mul_Gotten.append(Corresponding_Ward[element])8081 if element in JackPot_hit.keys():82 JackPot_hit[element] += 1838485 Pick_Number = Pick_Number + len(Reward_Mul_Gotten)86 All_Win_Mul = All_Win_Mul + sum(Reward_Mul_Gotten)878889endtime = datetime.datetime.now()90average_pick_number = Pick_Number / Test_Time91average_mul = All_Win_Mul / Test_Time92print('pick 个æ°:' + str(average_pick_number))93print('å¹³ååæ°:' + str(average_mul))
...
strategies.py
Source:strategies.py
...3from .rpsls import RPSLS4# Fixed pick Game Strategy5def fixed_strategy(pick_value):6 pick_RPSLS=pick_value7 def pick():8 return pick_RPSLS9 return pick10# Random pick Game Strategy11def random_strategy():12 def pick():13 pick_RPSLS = random.choice(list(RPSLS))14 return pick_RPSLS15 return pick16# Iterative pick Game Strategy17def iterative_generator(value):18 while True:19 yield value20 value += 121 value = value % len(RPSLS)22def iterative_strategy():23 pick_generator = iterative_generator(0)24 def pick():25 pick_RPSLS = RPSLS(next(pick_generator))26 return pick_RPSLS27 return pick...
__init__.py
Source:__init__.py
1from __future__ import absolute_import2from .ABuPickBase import AbuPickTimeWorkBase, AbuPickStockWorkBase3from .ABuPickStockMaster import AbuPickStockMaster4from .ABuPickStockWorker import AbuPickStockWorker5from .ABuPickTimeWorker import AbuPickTimeWorker6from .ABuPickTimeMaster import AbuPickTimeMaster7from . import ABuPickStockExecute8from . import ABuPickTimeExecute9# noinspection all10from . import ABuAlpha as alpha11__all__ = [12 'AbuPickTimeWorkBase',13 'AbuPickStockWorkBase',14 'AbuPickStockMaster',15 'AbuPickStockWorker',16 'AbuPickTimeWorker',17 'AbuPickTimeMaster',18 'ABuPickStockExecute',19 'ABuPickTimeExecute',20 'alpha'...
Using AI Code Generation
1var devicefarmer = require('devicefarmer-stf');2var device = new devicefarmer.Device();3device.pick(function(err, device) {4 if (err) {5 console.log(err);6 } else {7 console.log(device);8 }9});10{ serial: 'ZX1G22F4JZ',
Using AI Code Generation
1var pick = require('devicefarmer-stf-api').pick;2var device = pick('device');3console.log(device);4var pick = require('devicefarmer-stf-api').pick;5var device = pick('device');6console.log(device);7var pick = require('devicefarmer-stf-api').pick;8var device = pick('device');9console.log(device);10var pick = require('devicefarmer-stf-api').pick;11var device = pick('device');12console.log(device);13var pick = require('devicefarmer-stf-api').pick;14var device = pick('device');15console.log(device);16var pick = require('devicefarmer-stf-api').pick;17var device = pick('device');18console.log(device);19var pick = require('devicefarmer-stf-api').pick;20var device = pick('device');21console.log(device);22var pick = require('devicefarmer-stf-api').pick;23var device = pick('device');24console.log(device);25var pick = require('devicefarmer-stf-api').pick;26var device = pick('device');27console.log(device);28var pick = require('devicefarmer-stf-api').pick;29var device = pick('device');30console.log(device);31var pick = require('devicefarmer-stf-api').pick;32var device = pick('device');33console.log(device);34var pick = require('devicefarmer-stf-api').pick;35var device = pick('device');36console.log(device);
Using AI Code Generation
1var stf = require("devicefarmer-stf");2stf.pick().then(function(device){3 console.log(device);4});5var stf = require("devicefarmer-stf");6stf.pick().then(function(device){7 console.log(device);8});9var stf = require("devicefarmer-stf");10stf.pick().then(function(device){11 console.log(device);12});13var stf = require("devicefarmer-stf");14stf.pick().then(function(device){15 console.log(device);16});17var stf = require("devicefarmer-stf");18stf.pick().then(function(device){19 console.log(device);20});21var stf = require("devicefarmer-stf");22stf.pick().then(function(device){23 console.log(device);24});25var stf = require("devicefarmer-stf");26stf.pick().then(function(device){27 console.log(device);28});29var stf = require("devicefarmer-stf");30stf.pick().then(function(device){31 console.log(device);32});33var stf = require("devicefarmer-stf");34stf.pick().then(function(device){35 console.log(device);36});37var stf = require("devicefarmer-stf");38stf.pick().then(function(device){39 console.log(device);
Using AI Code Generation
1var devicefarmer = require('devicefarmer-stf');2var pick = devicefarmer.pick;3var device = pick();4console.log('device is ' + device);5{6 "scripts": {7 },8 "dependencies": {9 }10}11device is { serial: 'emulator-5554',12 { id: 0,13 { health: 'good',14 voltage: 0.0 },15 { connected: false,
Using AI Code Generation
1var stf = require('devicefarmer-stf-client');2client.pick('device-id').then(function(device){3 console.log(device);4});5var stf = require('devicefarmer-stf-client');6client.release('device-id').then(function(device){7 console.log(device);8});9var stf = require('devicefarmer-stf-client');10client.connect('device-id').then(function(device){11 console.log(device);12});13var stf = require('devicefarmer-stf-client');14client.disconnect('device-id').then(function(device){15 console.log(device);16});17var stf = require('devicefarmer-stf-client');18client.use('device-id').then(function(device){19 console.log(device);20});21var stf = require('devicefarmer-stf-client');22client.unuse('device-id').then(function(device){23 console.log(device);24});25var stf = require('devicefarmer-stf-client');26client.getDevice('device-id').then(function(device){27 console.log(device);28});29var stf = require('devicefarmer-stf-client');30client.getDevices().then(function(devices){31 console.log(devices);32});33var stf = require('devicefarmer-stf-client');
Using AI Code Generation
1const devicefarmer = require('devicefarmer-stf-client');2const pick = devicefarmer.pick;3pick('device-id').then(function(device) {4 console.log(device);5});6const devicefarmer = require('devicefarmer-stf-client');7const pick = devicefarmer.pick;8pick('device-id').then(function(device) {9 console.log(device);10});11const devicefarmer = require('devicefarmer-stf-client');12const pick = devicefarmer.pick;13pick('device-id').then(function(device) {14 console.log(device);15});16const devicefarmer = require('devicefarmer-stf-client');17const pick = devicefarmer.pick;18pick('device-id').then(function(device) {19 console.log(device);20});21const devicefarmer = require('devicefarmer-stf-client');22const pick = devicefarmer.pick;23pick('device-id').then(function(device) {24 console.log(device);25});26const devicefarmer = require('devicefarmer-stf-client');27const pick = devicefarmer.pick;28pick('device-id').then(function(device) {29 console.log(device);30});31const devicefarmer = require('devicefarmer-stf-client');32const pick = devicefarmer.pick;33pick('device-id').then(function(device) {34 console.log(device);35});36const devicefarmer = require('devicefarmer-stf-client');37const pick = devicefarmer.pick;38pick('device-id').then(function(device) {39 console.log(device);40});41const devicefarmer = require('devicefarmer-stf-client');42const pick = devicefarmer.pick;43pick('device-id').then(function(device) {
Using AI Code Generation
1var stf = require('devicefarmer-stf-api');2var pick = device.pick('deviceid');3pick.then(function(data){4 console.log(data);5})6var stf = require('devicefarmer-stf-api');7var release = device.release('deviceid');8release.then(function(data){9 console.log(data);10})11var stf = require('devicefarmer-stf-api');12var get = device.get('deviceid');13get.then(function(data){14 console.log(data);15})16var stf = require('devicefarmer-stf-api');17var remove = device.remove('deviceid');18remove.then(function(data){19 console.log(data);20})21var stf = require('devicefarmer-stf-api');22var getDeviceList = device.getDeviceList();23getDeviceList.then(function(data){24 console.log(data);25})26var stf = require('devicefarmer-stf-api');27var getDeviceListByOwner = device.getDeviceListByOwner('owner');28getDeviceListByOwner.then(function(data){29 console.log(data);30})31var stf = require('devicefarmer-stf-api');32var getDeviceListByGroup = device.getDeviceListByGroup('group');33getDeviceListByGroup.then(function(data){34 console.log(data);35})
Using AI Code Generation
1const {pick} = require('devicefarmer-stf-client');2pick('device-serial-number').then(device => {3 device.release();4});5const {pick} = require('devicefarmer-stf-client');6pick('device-serial-number').then(device => {7 device.release();8});9const {pick} = require('devicefarmer-stf-client');10pick('device-serial-number').then(device => {11 device.release();12});13const {pick} = require('devicefarmer-stf-client');14pick('device-serial-number').then(device => {15 device.release();16});17const {pick} = require('devicefarmer-stf-client');18pick('device-serial-number').then(device => {19 device.release();20});21const {pick} = require('devicefarmer-stf-client');22pick('device-serial-number').then(device => {23 device.release();24});25const {pick} = require('devicefarmer-stf-client');26pick('device-serial-number').then(device => {27 device.release();28});29const {pick} = require('devicefarmer-stf-client');30pick('device-serial-number').then(device => {31 device.release();32});33const {pick} = require('devicefarmer-stf-client');34pick('device-serial-number').then(device => {
Using AI Code Generation
1var pick = require('devicefarmer-stf').pick;2pick('device-id', function(err, picked){3 if (err) {4 console.log('Error: ' + err);5 return;6 }7});8var unPick = require('devicefarmer-stf').unPick;9unPick('device-id', function(err, unpicked){10 if (err) {11 console.log('Error: ' + err);12 return;13 }14});15var getDevices = require('devicefarmer-stf').getDevices;16getDevices(function(err, devices){17 if (err) {18 console.log('Error: ' + err);19 return;20 }21});22var getDevice = require('devicefarmer-stf').getDevice;23getDevice('device-id', function(err, device){24 if (err) {25 console.log('Error: ' + err);26 return;27 }28});29var getDevices = require('devicefarmer-stf').getDevices;30getDevices(function(err, devices){31 if (err) {32 console.log('Error: ' + err);33 return;34 }35});36var getDevice = require('devicefarmer-stf').getDevice;37getDevice('device-id', function(err, device){38 if (err) {39 console.log('Error: ' + err);40 return;41 }42});43var getDevices = require('devicefarmer-stf').getDevices;44getDevices(function(err, devices){45 if (err) {46 console.log('Error: ' + err);47 return;48 }49});
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!