Best Python code snippet using yandex-tank
plugin.py
Source:plugin.py
...173 df['value'] = df[column]174 result_df = self.filter_df_by_case(df, case_name)175 case_metric_obj.put(result_df)176 def upload_monitoring(self, data):177 for metric_name, df in self.monitoring_data_to_dfs(data).items():178 if metric_name not in self.monitoring_metrics:179 panel, metric = metric_name.split(':', 1)180 try:181 group, name = metric.split('_', 1)182 except ValueError:183 name = metric184 group = '_OTHER_'185 self.monitoring_metrics[metric_name] =\186 self.data_session.new_true_metric(187 meta=dict(self.meta,188 name=name,189 group=group,190 host=panel,191 type='monitoring'))192 self.monitoring_metrics[metric_name].put(df)193 def upload_planned_rps(self):194 """ Uploads planned rps as a raw metric """195 df = self.parse_stpd()196 if not df.empty:197 self.rps_metrics['planned_rps_metrics_obj'] = self.data_session.new_true_metric(198 meta=dict(self.meta, name=self.PLANNED_RPS_METRICS_NAME, source='tank'),199 raw=True, aggregate=False, parent=None, case=None)200 self.rps_metrics['planned_rps_metrics_obj'].put(df)201 def upload_actual_rps(self, data, last_piece=False):202 """ Upload actual rps metric """203 if self.rps_metrics['actual_rps_metrics_obj'] is None:204 self.rps_metrics['actual_rps_metrics_obj'] = self.data_session.new_true_metric(205 meta=dict(self.meta, name=self.ACTUAL_RPS_METRICS_NAME),206 raw=True, aggregate=False, parent=None, case=None207 )208 df = self.count_actual_rps(data, last_piece)209 if not df.empty:210 self.rps_metrics['actual_rps_metrics_obj'].put(df)211 def parse_stpd(self):212 """ Reads rps plan from stpd file """213 stpd_file = self.core.info.get_value(['stepper', 'stpd_file'])214 if not stpd_file:215 logger.info('No stpd found, no planned_rps metrics')216 return pandas.DataFrame()217 rows_list = []218 test_start = int(self.core.info.get_value(['generator', 'test_start'], 0) * 10 ** 3)219 pattern = r'^\d+ (\d+)\s*.*$'220 regex = re.compile(pattern)221 try:222 with open(stpd_file) as stpd:223 for line in stpd:224 if regex.match(line):225 timestamp = int((int(line.split(' ')[1]) + test_start) / 1e3) # seconds226 rows_list.append(timestamp)227 except Exception:228 logger.warning('Failed to parse stpd file')229 logger.debug('', exc_info=True)230 return pandas.DataFrame()231 return self.rps_series_to_df(pandas.Series(rows_list))232 def count_actual_rps(self, data, last_piece):233 """ Counts actual rps on base of input chunk. Uses buffer for latest timestamp in df. """234 if not last_piece and not data.empty:235 concat_ts = pandas.concat([(data.ts / 1e6).astype(int), self.rps_metrics['actual_rps_latest']])236 self.rps_metrics['actual_rps_latest'] = concat_ts.loc[lambda s: s == concat_ts.max()]237 series_to_send = concat_ts.loc[lambda s: s < concat_ts.max()]238 df = self.rps_series_to_df(series_to_send) if series_to_send.any else pandas.DataFrame([])239 else:240 df = self.rps_series_to_df(self.rps_metrics['actual_rps_latest'])241 self.rps_metrics['actual_rps_latest'] = pandas.Series()242 return df243 @staticmethod244 def monitoring_data_to_dfs(data):245 panels = {}246 for chunk in data:247 for panel_name, content in chunk['data'].items():248 if panel_name in panels:249 for metric_name, value in content['metrics'].items():250 if metric_name in panels[panel_name]:251 panels[panel_name][metric_name]['value'].append(value)252 panels[panel_name][metric_name]['ts'].append(chunk['timestamp'])253 else:254 panels[panel_name][metric_name] = {'value': [value], 'ts': [chunk['timestamp']]}255 else:256 panels[panel_name] = {name: {'value': [value], 'ts': [chunk['timestamp']]} for name, value in content['metrics'].items()}257 return {'{}:{}'.format(panelk, name): pandas.DataFrame({'ts': [ts * 1000000 for ts in values['ts']], 'value': values['value']})258 for panelk, panelv in panels.items() for name, values in panelv.items()}...
test_neuploader.py
Source:test_neuploader.py
...15 ])16 def test_df_num_and_cols(self, mon_data, length):17 with open(mon_data) as f:18 jsondata = json.load(f)19 dfs = Plugin.monitoring_data_to_dfs(jsondata)20 assert len(dfs) == length21 assert all([list(df.columns) == ['ts', 'value'] for df in dfs.values()])22 @pytest.mark.parametrize('mon_data, names', [23 (os.path.join(PATH, 'monitoring_data/monitoring1.json'),24 ()),25 ])26 def test_metrics_names(self, mon_data, names):27 with open(mon_data) as f:28 jsondata = json.load(f)29 dfs = Plugin.monitoring_data_to_dfs(jsondata)30 assert set(dfs.keys()) == {'{}:{}'.format(panelk, name) for i in jsondata for panelk, panelv in i['data'].items() for name in panelv['metrics'].keys()}31DF = pd.DataFrame({'ts': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],32 'value': [43, 75, 12, 65, 24, 65, 41, 87, 15, 62],33 'tag': ['foo', 'bar', 'foo', '', '', 'null', '', 'not_null', '', 'foo']})34@pytest.mark.parametrize('df, case, expected', [35 (DF, '__overall__', DF[['ts', 'value']]),36 (DF, 'foo', pd.DataFrame({'ts': [0, 2, 9],37 'value': [43, 12, 62]})),38 (DF, 'null', pd.DataFrame({'ts': [5],39 'value': [65]}))40])41def test_filter_df_by_case(df, case, expected):...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!