Best Syzkaller code snippet using main.orderedStats
queries.go
Source:queries.go
...52type monitoringQuery struct {53 query string54 orderedResourceLabels []string55 orderedMetricLabels []string56 orderedStats []queryStat57 Enabled func(c *Config) bool58}59var queries = []monitoringQuery{60 {61 query: "SELECT HOST, SUM(CASE WHEN ACTIVE_STATUS = 'YES' THEN 1 ELSE 0 END) AS active_services, SUM(CASE WHEN ACTIVE_STATUS = 'YES' THEN 0 ELSE 1 END) AS inactive_services FROM SYS.M_SERVICES GROUP BY HOST",62 orderedResourceLabels: []string{"host"},63 orderedStats: []queryStat{64 {65 key: "active_services",66 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,67 row map[string]string) {68 mb.RecordSaphanaServiceCountDataPoint(now, val, metadata.AttributeServiceStatusActive)69 },70 },71 {72 key: "inactive_services",73 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,74 row map[string]string) {75 mb.RecordSaphanaServiceCountDataPoint(now, val, metadata.AttributeServiceStatusInactive)76 },77 },78 },79 Enabled: func(c *Config) bool {80 return c.Metrics.SaphanaServiceCount.Enabled81 },82 },83 {84 query: "SELECT HOST, SUM(CASE WHEN IS_ACTIVE = 'TRUE' THEN 1 ELSE 0 END) AS active_threads, SUM(CASE WHEN IS_ACTIVE = 'TRUE' THEN 0 ELSE 1 END) AS inactive_threads FROM SYS.M_SERVICE_THREADS GROUP BY HOST",85 orderedResourceLabels: []string{"host"},86 orderedStats: []queryStat{87 {88 key: "active_threads",89 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,90 row map[string]string) {91 mb.RecordSaphanaServiceThreadCountDataPoint(now, val, metadata.AttributeThreadStatusActive)92 },93 },94 {95 key: "inactive_threads",96 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,97 row map[string]string) {98 mb.RecordSaphanaServiceThreadCountDataPoint(now, val, metadata.AttributeThreadStatusInactive)99 },100 },101 },102 Enabled: func(c *Config) bool {103 return c.Metrics.SaphanaServiceThreadCount.Enabled104 },105 },106 {107 query: "SELECT HOST, SUM(MAIN_MEMORY_SIZE_IN_DATA) AS \"mem_main_data\", SUM(MAIN_MEMORY_SIZE_IN_DICT) AS \"mem_main_dict\", SUM(MAIN_MEMORY_SIZE_IN_INDEX) AS \"mem_main_index\", SUM(MAIN_MEMORY_SIZE_IN_MISC) AS \"mem_main_misc\", SUM(DELTA_MEMORY_SIZE_IN_DATA) AS \"mem_delta_data\", SUM(DELTA_MEMORY_SIZE_IN_DICT) AS \"mem_delta_dict\", SUM(DELTA_MEMORY_SIZE_IN_INDEX) AS \"mem_delta_index\", SUM(DELTA_MEMORY_SIZE_IN_MISC) AS \"mem_delta_misc\" FROM M_CS_ALL_COLUMNS GROUP BY HOST",108 orderedResourceLabels: []string{"host"},109 orderedStats: []queryStat{110 {111 key: "main_data",112 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,113 row map[string]string) {114 mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeMain, metadata.AttributeColumnMemorySubtypeData)115 },116 },117 {118 key: "main_dict",119 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,120 row map[string]string) {121 mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeMain, metadata.AttributeColumnMemorySubtypeDict)122 },123 },124 {125 key: "main_index",126 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,127 row map[string]string) {128 mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeMain, metadata.AttributeColumnMemorySubtypeIndex)129 },130 },131 {132 key: "main_misc",133 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,134 row map[string]string) {135 mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeMain, metadata.AttributeColumnMemorySubtypeMisc)136 },137 },138 {139 key: "delta_data",140 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,141 row map[string]string) {142 mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeDelta, metadata.AttributeColumnMemorySubtypeData)143 },144 },145 {146 key: "delta_dict",147 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,148 row map[string]string) {149 mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeDelta, metadata.AttributeColumnMemorySubtypeDict)150 },151 },152 {153 key: "delta_index",154 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,155 row map[string]string) {156 mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeDelta, metadata.AttributeColumnMemorySubtypeIndex)157 },158 },159 {160 key: "delta_misc",161 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,162 row map[string]string) {163 mb.RecordSaphanaColumnMemoryUsedDataPoint(now, val, metadata.AttributeColumnMemoryTypeDelta, metadata.AttributeColumnMemorySubtypeMisc)164 },165 },166 },167 Enabled: func(c *Config) bool {168 return c.Metrics.SaphanaColumnMemoryUsed.Enabled169 },170 },171 {172 query: "SELECT HOST, SUM(USED_FIXED_PART_SIZE) fixed, SUM(USED_VARIABLE_PART_SIZE) variable FROM SYS.M_RS_TABLES GROUP BY HOST",173 orderedResourceLabels: []string{"host"},174 orderedStats: []queryStat{175 {176 key: "fixed",177 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,178 row map[string]string) {179 mb.RecordSaphanaRowStoreMemoryUsedDataPoint(now, val, metadata.AttributeRowMemoryTypeFixed)180 },181 },182 {183 key: "variable",184 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,185 row map[string]string) {186 mb.RecordSaphanaRowStoreMemoryUsedDataPoint(now, val, metadata.AttributeRowMemoryTypeVariable)187 },188 },189 },190 Enabled: func(c *Config) bool {191 return c.Metrics.SaphanaRowStoreMemoryUsed.Enabled192 },193 },194 {195 query: "SELECT HOST, COMPONENT, sum(USED_MEMORY_SIZE) used_mem_size FROM SYS.M_SERVICE_COMPONENT_MEMORY GROUP BY HOST, COMPONENT",196 orderedResourceLabels: []string{"host"},197 orderedStats: []queryStat{198 {199 key: "used_mem_size",200 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,201 row map[string]string) {202 mb.RecordSaphanaComponentMemoryUsedDataPoint(now, val, row["component"])203 },204 },205 },206 Enabled: func(c *Config) bool {207 return c.Metrics.SaphanaComponentMemoryUsed.Enabled208 },209 },210 {211 query: "SELECT HOST, CONNECTION_STATUS, COUNT(*) AS connections FROM SYS.M_CONNECTIONS WHERE CONNECTION_STATUS != '' GROUP BY HOST, CONNECTION_STATUS",212 orderedResourceLabels: []string{"host"},213 orderedMetricLabels: []string{"connection_status"},214 orderedStats: []queryStat{215 {216 key: "connections",217 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,218 row map[string]string) {219 mb.RecordSaphanaConnectionCountDataPoint(now, val,220 metadata.MapAttributeConnectionStatus[strings.ToLower(row["connection_status"])])221 },222 },223 },224 Enabled: func(c *Config) bool {225 return c.Metrics.SaphanaConnectionCount.Enabled226 },227 },228 {229 query: "SELECT seconds_between(CURRENT_TIMESTAMP, UTC_START_TIME) age FROM SYS.M_BACKUP_CATALOG WHERE STATE_NAME = 'successful' ORDER BY UTC_START_TIME DESC LIMIT 1",230 orderedMetricLabels: []string{},231 orderedStats: []queryStat{232 {233 key: "age",234 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,235 row map[string]string) {236 mb.RecordSaphanaBackupLatestDataPoint(now, val)237 },238 },239 },240 Enabled: func(c *Config) bool {241 return c.Metrics.SaphanaBackupLatest.Enabled242 },243 },244 {245 query: "SELECT HOST, SYSTEM_ID, DATABASE_NAME, seconds_between(START_TIME, CURRENT_TIMESTAMP) age FROM SYS.M_DATABASE",246 orderedResourceLabels: []string{"host"},247 orderedMetricLabels: []string{"system", "database"},248 orderedStats: []queryStat{249 {250 key: "age",251 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,252 row map[string]string) {253 mb.RecordSaphanaUptimeDataPoint(now, val, row["system"], row["database"])254 },255 },256 },257 Enabled: func(c *Config) bool {258 return c.Metrics.SaphanaUptime.Enabled259 },260 },261 {262 query: "SELECT ALERT_RATING, COUNT(*) AS alerts FROM _SYS_STATISTICS.STATISTICS_CURRENT_ALERTS GROUP BY ALERT_RATING",263 orderedMetricLabels: []string{"alert_rating"},264 orderedStats: []queryStat{265 {266 key: "alerts",267 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,268 row map[string]string) {269 mb.RecordSaphanaAlertCountDataPoint(now, val, row["alert_rating"])270 },271 },272 },273 Enabled: func(c *Config) bool {274 return c.Metrics.SaphanaAlertCount.Enabled275 },276 },277 {278 query: "SELECT HOST, SUM(UPDATE_TRANSACTION_COUNT) updates, SUM(COMMIT_COUNT) commits, SUM(ROLLBACK_COUNT) rollbacks FROM SYS.M_WORKLOAD GROUP BY HOST",279 orderedResourceLabels: []string{"host"},280 orderedStats: []queryStat{281 {282 key: "updates",283 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,284 row map[string]string) {285 mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionTypeUpdate)286 },287 },288 {289 key: "commits",290 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,291 row map[string]string) {292 mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionTypeCommit)293 },294 },295 {296 key: "rollbacks",297 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,298 row map[string]string) {299 mb.RecordSaphanaTransactionCountDataPoint(now, val, metadata.AttributeTransactionTypeRollback)300 },301 },302 },303 Enabled: func(c *Config) bool {304 return c.Metrics.SaphanaTransactionCount.Enabled305 },306 },307 {308 query: "SELECT HOST, COUNT(*) blocks FROM SYS.M_BLOCKED_TRANSACTIONS GROUP BY HOST",309 orderedResourceLabels: []string{"host"},310 orderedStats: []queryStat{311 {312 key: "blocks",313 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,314 row map[string]string) {315 mb.RecordSaphanaTransactionBlockedDataPoint(now, val)316 },317 },318 },319 Enabled: func(c *Config) bool {320 return c.Metrics.SaphanaTransactionBlocked.Enabled321 },322 },323 {324 query: "SELECT HOST, \"PATH\", USAGE_TYPE, TOTAL_SIZE-USED_SIZE free_size, USED_SIZE FROM SYS.M_DISKS",325 orderedResourceLabels: []string{"host"},326 orderedMetricLabels: []string{"path", "usage_type"},327 orderedStats: []queryStat{328 {329 key: "free_size",330 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,331 row map[string]string) {332 mb.RecordSaphanaDiskSizeCurrentDataPoint(now, val, row["path"], row["usage_type"], metadata.AttributeDiskStateUsedFreeFree)333 },334 },335 {336 key: "used_size",337 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,338 row map[string]string) {339 mb.RecordSaphanaDiskSizeCurrentDataPoint(now, val, row["path"], row["usage_type"], metadata.AttributeDiskStateUsedFreeUsed)340 },341 },342 },343 Enabled: func(c *Config) bool {344 return c.Metrics.SaphanaDiskSizeCurrent.Enabled345 },346 },347 {348 query: "SELECT SYSTEM_ID, PRODUCT_NAME, PRODUCT_LIMIT, PRODUCT_USAGE, seconds_between(CURRENT_TIMESTAMP, EXPIRATION_DATE) expiration FROM SYS.M_LICENSES",349 orderedMetricLabels: []string{"system", "product"},350 orderedStats: []queryStat{351 {352 key: "limit",353 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,354 row map[string]string) {355 mb.RecordSaphanaLicenseLimitDataPoint(now, val, row["system"], row["product"])356 },357 },358 {359 key: "usage",360 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,361 row map[string]string) {362 mb.RecordSaphanaLicensePeakDataPoint(now, val, row["system"], row["product"])363 },364 },365 {366 key: "expiration",367 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,368 row map[string]string) {369 mb.RecordSaphanaLicenseExpirationTimeDataPoint(now, val, row["system"], row["product"])370 },371 },372 },373 Enabled: func(c *Config) bool {374 return c.Metrics.SaphanaLicenseExpirationTime.Enabled ||375 c.Metrics.SaphanaLicenseLimit.Enabled ||376 c.Metrics.SaphanaLicensePeak.Enabled377 },378 },379 {380 query: "SELECT HOST, PORT, SECONDARY_HOST, REPLICATION_MODE, BACKLOG_SIZE, BACKLOG_TIME, TO_VARCHAR(TO_DECIMAL(IFNULL(MAP(SHIPPED_LOG_BUFFERS_COUNT, 0, 0, SHIPPED_LOG_BUFFERS_DURATION / SHIPPED_LOG_BUFFERS_COUNT), 0), 10, 2)) avg_replication_time FROM SYS.M_SERVICE_REPLICATION",381 orderedMetricLabels: []string{"host", "port", "secondary", "mode"},382 orderedStats: []queryStat{383 {384 key: "backlog_size",385 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,386 row map[string]string) {387 mb.RecordSaphanaReplicationBacklogSizeDataPoint(now, val, row["host"], row["secondary"], row["port"], row["mode"])388 },389 },390 {391 key: "backlog_time",392 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,393 row map[string]string) {394 mb.RecordSaphanaReplicationBacklogTimeDataPoint(now, val, row["host"], row["secondary"], row["port"], row["mode"])395 },396 },397 {398 key: "average_time",399 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,400 row map[string]string) {401 mb.RecordSaphanaReplicationAverageTimeDataPoint(now, val, row["host"], row["secondary"], row["port"], row["mode"])402 },403 },404 },405 Enabled: func(c *Config) bool {406 return c.Metrics.SaphanaReplicationAverageTime.Enabled ||407 c.Metrics.SaphanaReplicationBacklogSize.Enabled ||408 c.Metrics.SaphanaReplicationBacklogTime.Enabled409 },410 },411 {412 query: "SELECT HOST, SUM(FINISHED_NON_INTERNAL_REQUEST_COUNT) \"external\", SUM(ALL_FINISHED_REQUEST_COUNT-FINISHED_NON_INTERNAL_REQUEST_COUNT) internal, SUM(ACTIVE_REQUEST_COUNT) active, SUM(PENDING_REQUEST_COUNT) pending, TO_VARCHAR(TO_DECIMAL(AVG(RESPONSE_TIME), 10, 2)) avg_time FROM SYS.M_SERVICE_STATISTICS WHERE ACTIVE_REQUEST_COUNT > -1 GROUP BY HOST",413 orderedResourceLabels: []string{"host"},414 orderedStats: []queryStat{415 {416 key: "external",417 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,418 row map[string]string) {419 mb.RecordSaphanaNetworkRequestFinishedCountDataPoint(now, val, metadata.AttributeInternalExternalRequestTypeExternal)420 },421 },422 {423 key: "internal",424 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,425 row map[string]string) {426 mb.RecordSaphanaNetworkRequestFinishedCountDataPoint(now, val, metadata.AttributeInternalExternalRequestTypeInternal)427 },428 },429 {430 key: "active",431 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,432 row map[string]string) {433 mb.RecordSaphanaNetworkRequestCountDataPoint(now, val, metadata.AttributeActivePendingRequestStateActive)434 },435 },436 {437 key: "pending",438 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,439 row map[string]string) {440 mb.RecordSaphanaNetworkRequestCountDataPoint(now, val, metadata.AttributeActivePendingRequestStatePending)441 },442 },443 {444 key: "avg_time",445 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,446 row map[string]string) {447 mb.RecordSaphanaNetworkRequestAverageTimeDataPoint(now, val)448 },449 },450 },451 Enabled: func(c *Config) bool {452 return c.Metrics.SaphanaNetworkRequestFinishedCount.Enabled ||453 c.Metrics.SaphanaNetworkRequestCount.Enabled ||454 c.Metrics.SaphanaNetworkRequestAverageTime.Enabled455 },456 },457 {458 query: "SELECT HOST, \"PATH\", \"TYPE\", SUM(TOTAL_READS) \"reads\", SUM(TOTAL_WRITES) writes, SUM(TOTAL_READ_SIZE) read_size, SUM(TOTAL_WRITE_SIZE) write_size, SUM(TOTAL_READ_TIME) read_time, SUM(TOTAL_WRITE_TIME) write_time FROM SYS.M_VOLUME_IO_TOTAL_STATISTICS GROUP BY HOST, \"PATH\", \"TYPE\"",459 orderedResourceLabels: []string{"host"},460 orderedMetricLabels: []string{"path", "type"},461 orderedStats: []queryStat{462 {463 key: "reads",464 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,465 row map[string]string) {466 mb.RecordSaphanaVolumeOperationCountDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeRead)467 },468 },469 {470 key: "writes",471 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,472 row map[string]string) {473 mb.RecordSaphanaVolumeOperationCountDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeWrite)474 },475 },476 {477 key: "read_size",478 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,479 row map[string]string) {480 mb.RecordSaphanaVolumeOperationSizeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeRead)481 },482 },483 {484 key: "write_size",485 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,486 row map[string]string) {487 mb.RecordSaphanaVolumeOperationSizeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeWrite)488 },489 },490 {491 key: "read_time",492 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,493 row map[string]string) {494 mb.RecordSaphanaVolumeOperationTimeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeRead)495 },496 },497 {498 key: "write_time",499 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,500 row map[string]string) {501 mb.RecordSaphanaVolumeOperationTimeDataPoint(now, val, row["path"], row["type"], metadata.AttributeVolumeOperationTypeWrite)502 },503 },504 },505 Enabled: func(c *Config) bool {506 return c.Metrics.SaphanaVolumeOperationCount.Enabled ||507 c.Metrics.SaphanaVolumeOperationSize.Enabled ||508 c.Metrics.SaphanaVolumeOperationTime.Enabled509 },510 },511 {512 query: "SELECT HOST, SERVICE_NAME, LOGICAL_MEMORY_SIZE, PHYSICAL_MEMORY_SIZE, CODE_SIZE, STACK_SIZE, HEAP_MEMORY_ALLOCATED_SIZE-HEAP_MEMORY_USED_SIZE heap_free, HEAP_MEMORY_USED_SIZE, SHARED_MEMORY_ALLOCATED_SIZE-SHARED_MEMORY_USED_SIZE shared_free, SHARED_MEMORY_USED_SIZE, COMPACTORS_ALLOCATED_SIZE, COMPACTORS_FREEABLE_SIZE, ALLOCATION_LIMIT, EFFECTIVE_ALLOCATION_LIMIT FROM SYS.M_SERVICE_MEMORY",513 orderedResourceLabels: []string{"host"},514 orderedMetricLabels: []string{"service"},515 orderedStats: []queryStat{516 {517 key: "logical_used",518 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,519 row map[string]string) {520 mb.RecordSaphanaServiceMemoryUsedDataPoint(now, val, row["service"], metadata.AttributeServiceMemoryUsedTypeLogical)521 },522 },523 {524 key: "physical_used",525 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,526 row map[string]string) {527 mb.RecordSaphanaServiceMemoryUsedDataPoint(now, val, row["service"], metadata.AttributeServiceMemoryUsedTypePhysical)528 },529 },530 {531 key: "code_size",532 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,533 row map[string]string) {534 mb.RecordSaphanaServiceCodeSizeDataPoint(now, val, row["service"])535 },536 },537 {538 key: "stack_size",539 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,540 row map[string]string) {541 mb.RecordSaphanaServiceStackSizeDataPoint(now, val, row["service"])542 },543 },544 {545 key: "heap_free",546 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,547 row map[string]string) {548 mb.RecordSaphanaServiceMemoryHeapCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFreeFree)549 },550 },551 {552 key: "heap_used",553 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,554 row map[string]string) {555 mb.RecordSaphanaServiceMemoryHeapCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFreeUsed)556 },557 },558 {559 key: "shared_free",560 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,561 row map[string]string) {562 mb.RecordSaphanaServiceMemorySharedCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFreeFree)563 },564 },565 {566 key: "shared_used",567 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,568 row map[string]string) {569 mb.RecordSaphanaServiceMemorySharedCurrentDataPoint(now, val, row["service"], metadata.AttributeMemoryStateUsedFreeUsed)570 },571 },572 {573 key: "compactors_allocated",574 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,575 row map[string]string) {576 mb.RecordSaphanaServiceMemoryCompactorsAllocatedDataPoint(now, val, row["service"])577 },578 },579 {580 key: "compactors_freeable",581 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,582 row map[string]string) {583 mb.RecordSaphanaServiceMemoryCompactorsFreeableDataPoint(now, val, row["service"])584 },585 },586 {587 key: "allocation_limit",588 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,589 row map[string]string) {590 mb.RecordSaphanaServiceMemoryLimitDataPoint(now, val, row["service"])591 },592 },593 {594 key: "effective_limit",595 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,596 row map[string]string) {597 mb.RecordSaphanaServiceMemoryEffectiveLimitDataPoint(now, val, row["service"])598 },599 },600 },601 Enabled: func(c *Config) bool {602 return c.Metrics.SaphanaServiceMemoryUsed.Enabled ||603 c.Metrics.SaphanaServiceCodeSize.Enabled ||604 c.Metrics.SaphanaServiceStackSize.Enabled ||605 c.Metrics.SaphanaServiceMemoryHeapCurrent.Enabled ||606 c.Metrics.SaphanaServiceMemorySharedCurrent.Enabled ||607 c.Metrics.SaphanaServiceMemoryCompactorsAllocated.Enabled ||608 c.Metrics.SaphanaServiceMemoryCompactorsFreeable.Enabled ||609 c.Metrics.SaphanaServiceMemoryLimit.Enabled ||610 c.Metrics.SaphanaServiceMemoryEffectiveLimit.Enabled611 },612 },613 {614 query: "SELECT HOST, SCHEMA_NAME, SUM(ESTIMATED_MAX_MEMORY_SIZE_IN_TOTAL) estimated_max, SUM(LAST_COMPRESSED_RECORD_COUNT) last_compressed, SUM(READ_COUNT) \"reads\", SUM(WRITE_COUNT) writes, SUM(MERGE_COUNT) merges, SUM(MEMORY_SIZE_IN_MAIN) mem_main, SUM(MEMORY_SIZE_IN_DELTA) mem_delta, SUM(MEMORY_SIZE_IN_HISTORY_MAIN) mem_hist_main, SUM(MEMORY_SIZE_IN_HISTORY_DELTA) mem_hist_delta, SUM(RAW_RECORD_COUNT_IN_MAIN) records_main, SUM(RAW_RECORD_COUNT_IN_DELTA) records_delta, SUM(RAW_RECORD_COUNT_IN_HISTORY_MAIN) records_hist_main, SUM(RAW_RECORD_COUNT_IN_HISTORY_DELTA) records_hist_delta FROM SYS.M_CS_TABLES GROUP BY HOST, SCHEMA_NAME",615 orderedResourceLabels: []string{"host"},616 orderedMetricLabels: []string{"schema"},617 orderedStats: []queryStat{618 {619 key: "estimated_max",620 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,621 row map[string]string) {622 mb.RecordSaphanaSchemaMemoryUsedMaxDataPoint(now, val, row["schema"])623 },624 },625 {626 key: "last_compressed",627 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,628 row map[string]string) {629 mb.RecordSaphanaSchemaRecordCompressedCountDataPoint(now, val, row["schema"])630 },631 },632 {633 key: "reads",634 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,635 row map[string]string) {636 mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationTypeRead)637 },638 },639 {640 key: "writes",641 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,642 row map[string]string) {643 mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationTypeWrite)644 },645 },646 {647 key: "merges",648 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,649 row map[string]string) {650 mb.RecordSaphanaSchemaOperationCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaOperationTypeMerge)651 },652 },653 {654 key: "mem_main",655 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,656 row map[string]string) {657 mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryTypeMain)658 },659 },660 {661 key: "mem_delta",662 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,663 row map[string]string) {664 mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryTypeDelta)665 },666 },667 {668 key: "mem_history_main",669 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,670 row map[string]string) {671 mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryTypeHistoryMain)672 },673 },674 {675 key: "mem_history_delta",676 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,677 row map[string]string) {678 mb.RecordSaphanaSchemaMemoryUsedCurrentDataPoint(now, val, row["schema"], metadata.AttributeSchemaMemoryTypeHistoryDelta)679 },680 },681 {682 key: "records_main",683 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,684 row map[string]string) {685 mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordTypeMain)686 },687 },688 {689 key: "records_delta",690 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,691 row map[string]string) {692 mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordTypeDelta)693 },694 },695 {696 key: "records_history_main",697 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,698 row map[string]string) {699 mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordTypeHistoryMain)700 },701 },702 {703 key: "records_history_delta",704 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,705 row map[string]string) {706 mb.RecordSaphanaSchemaRecordCountDataPoint(now, val, row["schema"], metadata.AttributeSchemaRecordTypeHistoryDelta)707 },708 },709 },710 Enabled: func(c *Config) bool {711 return c.Metrics.SaphanaSchemaMemoryUsedMax.Enabled ||712 c.Metrics.SaphanaSchemaRecordCompressedCount.Enabled ||713 c.Metrics.SaphanaSchemaOperationCount.Enabled ||714 c.Metrics.SaphanaSchemaMemoryUsedCurrent.Enabled ||715 c.Metrics.SaphanaSchemaRecordCount.Enabled716 },717 },718 {719 query: "SELECT HOST, FREE_PHYSICAL_MEMORY, USED_PHYSICAL_MEMORY, FREE_SWAP_SPACE, USED_SWAP_SPACE, INSTANCE_TOTAL_MEMORY_USED_SIZE, INSTANCE_TOTAL_MEMORY_PEAK_USED_SIZE, INSTANCE_TOTAL_MEMORY_ALLOCATED_SIZE-INSTANCE_TOTAL_MEMORY_USED_SIZE total_free, INSTANCE_CODE_SIZE, INSTANCE_SHARED_MEMORY_ALLOCATED_SIZE, TOTAL_CPU_USER_TIME, TOTAL_CPU_SYSTEM_TIME, TOTAL_CPU_WIO_TIME, TOTAL_CPU_IDLE_TIME FROM SYS.M_HOST_RESOURCE_UTILIZATION",720 orderedResourceLabels: []string{"host"},721 orderedStats: []queryStat{722 {723 key: "free_physical_memory",724 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,725 row map[string]string) {726 mb.RecordSaphanaHostMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFreeFree)727 },728 },729 {730 key: "used_physical_memory",731 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,732 row map[string]string) {733 mb.RecordSaphanaHostMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFreeUsed)734 },735 },736 {737 key: "free_swap_space",738 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,739 row map[string]string) {740 mb.RecordSaphanaHostSwapCurrentDataPoint(now, val, metadata.AttributeHostSwapStateFree)741 },742 },743 {744 key: "used_swap_space",745 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,746 row map[string]string) {747 mb.RecordSaphanaHostSwapCurrentDataPoint(now, val, metadata.AttributeHostSwapStateUsed)748 },749 },750 {751 key: "instance_total_used",752 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,753 row map[string]string) {754 mb.RecordSaphanaInstanceMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFreeUsed)755 },756 },757 {758 key: "instance_total_used_peak",759 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,760 row map[string]string) {761 mb.RecordSaphanaInstanceMemoryUsedPeakDataPoint(now, val)762 },763 },764 {765 key: "instance_total_free",766 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,767 row map[string]string) {768 mb.RecordSaphanaInstanceMemoryCurrentDataPoint(now, val, metadata.AttributeMemoryStateUsedFreeFree)769 },770 },771 {772 key: "instance_code_size",773 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,774 row map[string]string) {775 mb.RecordSaphanaInstanceCodeSizeDataPoint(now, val)776 },777 },778 {779 key: "instance_shared_memory_allocated",780 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,781 row map[string]string) {782 mb.RecordSaphanaInstanceMemorySharedAllocatedDataPoint(now, val)783 },784 },785 {786 key: "cpu_user",787 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,788 row map[string]string) {789 mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUTypeUser)790 },791 },792 {793 key: "cpu_system",794 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,795 row map[string]string) {796 mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUTypeSystem)797 },798 },799 {800 key: "cpu_io_wait",801 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,802 row map[string]string) {803 mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUTypeIoWait)804 },805 },806 {807 key: "cpu_idle",808 addMetricFunction: func(mb *metadata.MetricsBuilder, now pcommon.Timestamp, val string,809 row map[string]string) {810 mb.RecordSaphanaCPUUsedDataPoint(now, val, metadata.AttributeCPUTypeIdle)811 },812 },813 },814 Enabled: func(c *Config) bool {815 return c.Metrics.SaphanaHostMemoryCurrent.Enabled ||816 c.Metrics.SaphanaHostSwapCurrent.Enabled ||817 c.Metrics.SaphanaInstanceMemoryCurrent.Enabled ||818 c.Metrics.SaphanaInstanceMemoryUsedPeak.Enabled ||819 c.Metrics.SaphanaInstanceCodeSize.Enabled ||820 c.Metrics.SaphanaInstanceMemorySharedAllocated.Enabled ||821 c.Metrics.SaphanaCPUUsed.Enabled822 },823 },824}825func (m *monitoringQuery) CollectMetrics(ctx context.Context, s *sapHanaScraper, client client, now pcommon.Timestamp,826 errs *scrapererror.ScrapeErrors) {827 rows, err := client.collectDataFromQuery(ctx, m)828 if err != nil {829 errs.AddPartial(len(m.orderedStats), err)830 return831 }832 for _, data := range rows {833 for _, stat := range m.orderedStats {834 if err := stat.collectStat(s, m, now, data); err != nil {835 errs.AddPartial(1, err)836 }837 }838 }839}...
orderedStats
Using AI Code Generation
1import (2func main() {3 stats = orderedStats{[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}4 fmt.Println("Average:", stats.average())5 fmt.Println("Median:", stats.median())6 fmt.Println("Mode:", stats.mode())7 fmt.Println("Sum:", stats.sum())8 fmt.Println("Variance:", stats.variance())9 fmt.Println("Std Dev:", stats.stdDev())10 fmt.Println("Max:", stats.max())11 fmt.Println("Min:", stats.min())12 fmt.Println("Range:", stats.range())13 fmt.Println("Length:", stats.length())14 fmt.Println("Count:", stats.count())15 fmt.Println("Percentile:", stats.percentile(80))16 fmt.Println("Q1:", stats.q1())17 fmt.Println("Q3:", stats.q3())18 fmt.Println("IQR:", stats.iqr())19 fmt.Println("Z-Score:", stats.zScore(90))20 fmt.Println("Z-Score:", stats.zScore(30))21 fmt.Println("Z-Score:", stats.zScore(50))22 fmt.Println("Z-Score:", stats.zScore(70))23 fmt.Println("Z-Score:", stats.zScore(90))24 fmt.Println("Z-Score:", stats.zScore(110))25 fmt.Println("Z-Score:", stats.zScore(130))26 fmt.Println("Z-Score:", stats.zScore(150))27 fmt.Println("Z-Score:", stats.zScore(170))28 fmt.Println("Z-Score:", stats.zScore(190))29 fmt.Println("Z-Score:", stats.zScore(210))30 fmt.Println("Z-Score:", stats.zScore(230))31 fmt.Println("Z-Score:", stats.zScore(250))32 fmt.Println("Z-Score:", stats.zScore(270))33 fmt.Println("Z-Score:", stats.zScore(290))34 fmt.Println("Z-Score:", stats.zScore(310))35 fmt.Println("Z-Score:", stats.zScore(330))36 fmt.Println("Z-Score:", stats.zScore(350))37 fmt.Println("Z-Score:", stats.zScore(370))38 fmt.Println("Z-Score:", stats.zScore(390))39 fmt.Println("Z-Score
orderedStats
Using AI Code Generation
1import (2func main() {3 a := new(orderedStats)4 a.orderedStats([]int{5, 3, 1, 2, 4})5}6import (7func main() {8 a := new(orderedStats)9 a.orderedStats([]int{1, 2, 3, 4, 5})10}11import (12func main() {13 a := new(orderedStats)14 a.orderedStats([]int{1, 1, 1, 1, 1})15}16import (17func main() {18 a := new(orderedStats)19 a.orderedStats([]int{5, 4, 3, 2, 1})20}21import (22func main() {23 a := new(orderedStats)24 a.orderedStats([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})25}26import (27func main() {28 a := new(orderedStats)29 a.orderedStats([]int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1})30}
orderedStats
Using AI Code Generation
1import (2func main() {3 var stats = [5]int{1, 2, 3, 4, 5}4 orderedStats(stats)5}6func orderedStats(stats [5]int) {7 sort.Ints(stats[:])8 fmt.Println(stats)9}10import (11func (s sortArray) Len() int {12 return len(s)13}14func (s sortArray) Swap(i, j int) {15}16func (s sortArray) Less(i, j int) bool {17}18func main() {19 var stats = [5]int{1, 2, 3, 4, 5}20 orderedStats(stats)21}22func orderedStats(stats [5]int) {23 sort.Sort(sortArray(stats[:]))24 fmt.Println(stats)25}
orderedStats
Using AI Code Generation
1stats := new(Stats)2orderedStats := stats.orderedStats()3fmt.Println(orderedStats)4stats := new(Stats)5orderedStats := stats.orderedStats()6fmt.Println(orderedStats)7stats := new(Stats)8orderedStats := stats.orderedStats()9fmt.Println(orderedStats)10stats := new(Stats)11orderedStats := stats.orderedStats()12fmt.Println(orderedStats)13stats := new(Stats)14orderedStats := stats.orderedStats()15fmt.Println(orderedStats)16stats := new(Stats)17orderedStats := stats.orderedStats()18fmt.Println(orderedStats)19stats := new(Stats)20orderedStats := stats.orderedStats()21fmt.Println(orderedStats)22stats := new(Stats)23orderedStats := stats.orderedStats()24fmt.Println(orderedStats)25stats := new(Stats)
orderedStats
Using AI Code Generation
1import java.io.*;2import java.util.*;3import java.util.ArrayList;4import java.util.Collections;5import java.util.Comparator;6import java.util.List;7import java.util.Map;8import java.util.HashMap;9import java.util.Map.Entry;10import java.util.Scanner;11import java.util.Set;12import java.util.TreeMap;13import java.util.TreeSet;14public class OrderedStats {15public static void main(String[] args) {16 OrderedStats os = new OrderedStats();17 Scanner input = new Scanner(System.in);18 System.out.println("Enter the name of the file to be read: ");19 String fileName = input.nextLine();20 File file = new File(fileName);21 Scanner fileInput = new Scanner(file);22 TreeMap<String, Integer> map = new TreeMap<String, Integer>();23 while (fileInput.hasNextLine()) {24 String line = fileInput.nextLine();25 String[] words = line.split(" ");26 for (int i = 0; i < words.length; i++) {27 String word = words[i];28 if (!map.containsKey(word)) {29 map.put(word, 1);30 } else {31 int count = map.get(word);32 count++;33 map.put(word, count);34 }35 }36 }37 fileInput.close();38 ArrayList<String> list = new ArrayList<String>();39 for (Map.Entry<String, Integer> entry : map.entrySet()) {
orderedStats
Using AI Code Generation
1import java.util.*;2public class Path {3public static void main(String[] args) {4Scanner sc = new Scanner(System.in);5String path = sc.nextLine();6String[] file = path.split("/");7String fileName = file[file.length - 1];8String[] fileNameSplit = fileName.split("\\.");9String name = fileNameSplit[0];10String extension = fileNameSplit[1];11String[] splitPath = path.split(name + "." + extension);12String directory = splitPath[0];13System.out.println("Directory : " + directory);14System.out.println("File name : " + name);15System.out.println("Extension : " + extension);16}17}18import java.util.*;19public class Path {20public static void main(String[] args) {21Scanner sc = new Scanner(System.in);22String path = sc.nextLine();23String[] file = path.split("/");24String fileName = file[file.length - 1];25String[] fileNameSplit = fileName.split("\\.");26String name = fileNameSplit[0];27String extension = fileNameSplit[1];28String[] splitPath = path.split(name + "." + extension);29String directory = splitPath[0];30System.out.println("Directory : " + directory);31System.out.println("File name : " + name);32System.out.println("Extension : " + extension);33}34}35import java.util.*;36public class Path {37public static void main(String[] args) {38Scanner sc = new Scanner(System.in);39String path = sc.nextLine();40String[] file = path.split("/");41String fileName = file[file.length - 1];42String[] fileNameSplit = fileName.split("\\.");43String name = fileNameSplit[0];44String extension = fileNameSplit[1];45String[] splitPath = path.split(name + "." + extension);46String directory = splitPath[0];47System.out.println("Directory : " + directory);48System.out.println("File name : " + name);49System.out.println("Extension : " + extension);50}51}52import java.util.*;53public class Path {54public static void main(String[] args) {55Scanner sc = new Scanner(System
orderedStats
Using AI Code Generation
1import java.util.Scanner;2import java.util.Arrays;3public class OrderedStats {4 public static void main(String[] args) {5 int[] array = new int[10];6 Scanner input = new Scanner(System.in);7 System.out.print("Enter 10 numbers: ");8 for (int i = 0; i < array.length; i++) {9 array[i] = input.nextInt();10 }11 Stats stats = new Stats();12 int[] orderedArray = stats.orderedStats(array);13 System.out.println("The ordered stats of the array are: " + Arrays.toString(orderedArray));14 }15}16import java.util.Scanner;17import java.util.Arrays;18public class GetStats {19 public static void main(String[] args) {20 int[] array = new int[10];21 Scanner input = new Scanner(System.in);22 System.out.print("Enter 10 numbers: ");23 for (int i = 0; i < array.length; i++) {24 array[i] = input.nextInt();25 }26 Stats stats = new Stats();27 int[] statsArray = stats.getStats(array);28 System.out.println("The stats of the array are: " + Arrays.toString(statsArray));29 }30}31import
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!