How to use run method of metrics Package

Best K6 code snippet using metrics.run

metrics_reporter_test.go

Source:metrics_reporter_test.go Github

copy

Full Screen

...33 workflow := util.NewWorkflow(&workflowapi.Workflow{34 ObjectMeta: metav1.ObjectMeta{35 Namespace: "MY_NAMESPACE",36 Name: "MY_NAME",37 UID: types.UID("run-1"),38 },39 Status: workflowapi.WorkflowStatus{40 Nodes: map[string]workflowapi.NodeStatus{41 "node-1": workflowapi.NodeStatus{42 ID: "node-1",43 Phase: workflowapi.NodeRunning,44 },45 },46 },47 })48 err := reporter.ReportMetrics(workflow)49 assert.Nil(t, err)50 assert.Nil(t, pipelineFake.GetReportedMetricsRequest())51}52func TestReportMetrics_NoRunID_NoOP(t *testing.T) {53 pipelineFake := client.NewPipelineClientFake()54 reporter := NewMetricsReporter(pipelineFake)55 workflow := util.NewWorkflow(&workflowapi.Workflow{56 ObjectMeta: metav1.ObjectMeta{57 Namespace: "MY_NAMESPACE",58 Name: "MY_NAME",59 UID: types.UID("run-1"),60 },61 Status: workflowapi.WorkflowStatus{62 Nodes: map[string]workflowapi.NodeStatus{63 "node-1": workflowapi.NodeStatus{64 ID: "node-1",65 Phase: workflowapi.NodeSucceeded,66 },67 },68 },69 })70 err := reporter.ReportMetrics(workflow)71 assert.Nil(t, err)72 assert.Nil(t, pipelineFake.GetReadArtifactRequest())73 assert.Nil(t, pipelineFake.GetReportedMetricsRequest())74}75func TestReportMetrics_NoArtifact_NoOP(t *testing.T) {76 pipelineFake := client.NewPipelineClientFake()77 reporter := NewMetricsReporter(pipelineFake)78 workflow := util.NewWorkflow(&workflowapi.Workflow{79 ObjectMeta: metav1.ObjectMeta{80 Namespace: "MY_NAMESPACE",81 Name: "MY_NAME",82 UID: types.UID("run-1"),83 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},84 },85 Status: workflowapi.WorkflowStatus{86 Nodes: map[string]workflowapi.NodeStatus{87 "node-1": workflowapi.NodeStatus{88 ID: "node-1",89 Phase: workflowapi.NodeSucceeded,90 },91 },92 },93 })94 err := reporter.ReportMetrics(workflow)95 assert.Nil(t, err)96 assert.Nil(t, pipelineFake.GetReadArtifactRequest())97 assert.Nil(t, pipelineFake.GetReportedMetricsRequest())98}99func TestReportMetrics_NoMetricsArtifact_NoOP(t *testing.T) {100 pipelineFake := client.NewPipelineClientFake()101 reporter := NewMetricsReporter(pipelineFake)102 workflow := util.NewWorkflow(&workflowapi.Workflow{103 ObjectMeta: metav1.ObjectMeta{104 Namespace: "MY_NAMESPACE",105 Name: "MY_NAME",106 UID: types.UID("run-1"),107 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},108 },109 Status: workflowapi.WorkflowStatus{110 Nodes: map[string]workflowapi.NodeStatus{111 "node-1": workflowapi.NodeStatus{112 ID: "node-1",113 Phase: workflowapi.NodeSucceeded,114 Outputs: &workflowapi.Outputs{115 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-ui-metadata"}},116 },117 },118 },119 },120 })121 err := reporter.ReportMetrics(workflow)122 assert.Nil(t, err)123 assert.Nil(t, pipelineFake.GetReadArtifactRequest())124 assert.Nil(t, pipelineFake.GetReportedMetricsRequest())125}126func TestReportMetrics_Succeed(t *testing.T) {127 pipelineFake := client.NewPipelineClientFake()128 reporter := NewMetricsReporter(pipelineFake)129 workflow := util.NewWorkflow(&workflowapi.Workflow{130 ObjectMeta: metav1.ObjectMeta{131 Namespace: "MY_NAMESPACE",132 Name: "MY_NAME",133 UID: types.UID("run-1"),134 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},135 },136 Status: workflowapi.WorkflowStatus{137 Nodes: map[string]workflowapi.NodeStatus{138 "node-1": workflowapi.NodeStatus{139 ID: "node-1",140 Phase: workflowapi.NodeSucceeded,141 Outputs: &workflowapi.Outputs{142 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-metrics"}},143 },144 },145 },146 },147 })148 metricsJSON := `{"metrics": [{"name": "accuracy", "numberValue": 0.77}, {"name": "logloss", "numberValue": 1.2}]}`149 artifactData, _ := util.ArchiveTgz(map[string]string{"file": metricsJSON})150 pipelineFake.StubArtifact(151 &api.ReadArtifactRequest{152 RunId: "run-1",153 NodeId: "node-1",154 ArtifactName: "mlpipeline-metrics",155 },156 &api.ReadArtifactResponse{157 Data: []byte(artifactData),158 })159 pipelineFake.StubReportRunMetrics(&api.ReportRunMetricsResponse{160 Results: []*api.ReportRunMetricsResponse_ReportRunMetricResult{},161 }, nil)162 err := reporter.ReportMetrics(workflow)163 assert.Nil(t, err)164 expectedMetricsRequest := &api.ReportRunMetricsRequest{165 RunId: "run-1",166 Metrics: []*api.RunMetric{167 {168 Name: "accuracy",169 NodeId: "node-1",170 Value: &api.RunMetric_NumberValue{NumberValue: 0.77},171 },172 {173 Name: "logloss",174 NodeId: "node-1",175 Value: &api.RunMetric_NumberValue{NumberValue: 1.2},176 },177 },178 }179 got := pipelineFake.GetReportedMetricsRequest()180 if diff := cmp.Diff(expectedMetricsRequest, got, cmpopts.EquateEmpty(), protocmp.Transform()); diff != "" {181 t.Errorf("parseRuntimeInfo() = %+v, want %+v\nDiff (-want, +got)\n%s", got, expectedMetricsRequest, diff)182 s, _ := json.MarshalIndent(expectedMetricsRequest ,"", " ")183 fmt.Printf("Want %s", s)184 }185}186func TestReportMetrics_EmptyArchive_Fail(t *testing.T) {187 pipelineFake := client.NewPipelineClientFake()188 reporter := NewMetricsReporter(pipelineFake)189 workflow := util.NewWorkflow(&workflowapi.Workflow{190 ObjectMeta: metav1.ObjectMeta{191 Namespace: "MY_NAMESPACE",192 Name: "MY_NAME",193 UID: types.UID("run-1"),194 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},195 },196 Status: workflowapi.WorkflowStatus{197 Nodes: map[string]workflowapi.NodeStatus{198 "node-1": workflowapi.NodeStatus{199 ID: "node-1",200 Phase: workflowapi.NodeSucceeded,201 Outputs: &workflowapi.Outputs{202 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-metrics"}},203 },204 },205 },206 },207 })208 artifactData, _ := util.ArchiveTgz(map[string]string{})209 pipelineFake.StubArtifact(210 &api.ReadArtifactRequest{211 RunId: "run-1",212 NodeId: "node-1",213 ArtifactName: "mlpipeline-metrics",214 },215 &api.ReadArtifactResponse{216 Data: []byte(artifactData),217 })218 err := reporter.ReportMetrics(workflow)219 assert.NotNil(t, err)220 assert.True(t, util.HasCustomCode(err, util.CUSTOM_CODE_PERMANENT))221 // Verify that ReportRunMetrics is not called.222 assert.Nil(t, pipelineFake.GetReportedMetricsRequest())223}224func TestReportMetrics_MultipleFilesInArchive_Fail(t *testing.T) {225 pipelineFake := client.NewPipelineClientFake()226 reporter := NewMetricsReporter(pipelineFake)227 workflow := util.NewWorkflow(&workflowapi.Workflow{228 ObjectMeta: metav1.ObjectMeta{229 Namespace: "MY_NAMESPACE",230 Name: "MY_NAME",231 UID: types.UID("run-1"),232 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},233 },234 Status: workflowapi.WorkflowStatus{235 Nodes: map[string]workflowapi.NodeStatus{236 "node-1": workflowapi.NodeStatus{237 ID: "node-1",238 Phase: workflowapi.NodeSucceeded,239 Outputs: &workflowapi.Outputs{240 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-metrics"}},241 },242 },243 },244 },245 })246 validMetricsJSON := `{"metrics": [{"name": "accuracy", "numberValue": 0.77}, {"name": "logloss", "numberValue": 1.2}]}`247 invalidMetricsJSON := `invalid JSON`248 artifactData, _ := util.ArchiveTgz(map[string]string{"file1": validMetricsJSON, "file2": invalidMetricsJSON})249 pipelineFake.StubArtifact(250 &api.ReadArtifactRequest{251 RunId: "run-1",252 NodeId: "node-1",253 ArtifactName: "mlpipeline-metrics",254 },255 &api.ReadArtifactResponse{256 Data: []byte(artifactData),257 })258 err := reporter.ReportMetrics(workflow)259 assert.NotNil(t, err)260 assert.True(t, util.HasCustomCode(err, util.CUSTOM_CODE_PERMANENT))261 // Verify that ReportRunMetrics is not called.262 assert.Nil(t, pipelineFake.GetReportedMetricsRequest())263}264func TestReportMetrics_InvalidMetricsJSON_Fail(t *testing.T) {265 pipelineFake := client.NewPipelineClientFake()266 reporter := NewMetricsReporter(pipelineFake)267 workflow := util.NewWorkflow(&workflowapi.Workflow{268 ObjectMeta: metav1.ObjectMeta{269 Namespace: "MY_NAMESPACE",270 Name: "MY_NAME",271 UID: types.UID("run-1"),272 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},273 },274 Status: workflowapi.WorkflowStatus{275 Nodes: map[string]workflowapi.NodeStatus{276 "node-1": workflowapi.NodeStatus{277 ID: "node-1",278 Phase: workflowapi.NodeSucceeded,279 Outputs: &workflowapi.Outputs{280 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-metrics"}},281 },282 },283 },284 },285 })286 metricsJSON := `invalid JSON`287 artifactData, _ := util.ArchiveTgz(map[string]string{"file": metricsJSON})288 pipelineFake.StubArtifact(289 &api.ReadArtifactRequest{290 RunId: "run-1",291 NodeId: "node-1",292 ArtifactName: "mlpipeline-metrics",293 },294 &api.ReadArtifactResponse{295 Data: []byte(artifactData),296 })297 err := reporter.ReportMetrics(workflow)298 assert.NotNil(t, err)299 assert.True(t, util.HasCustomCode(err, util.CUSTOM_CODE_PERMANENT))300 // Verify that ReportRunMetrics is not called.301 assert.Nil(t, pipelineFake.GetReportedMetricsRequest())302}303func TestReportMetrics_InvalidMetricsJSON_PartialFail(t *testing.T) {304 pipelineFake := client.NewPipelineClientFake()305 reporter := NewMetricsReporter(pipelineFake)306 workflow := util.NewWorkflow(&workflowapi.Workflow{307 ObjectMeta: metav1.ObjectMeta{308 Namespace: "MY_NAMESPACE",309 Name: "MY_NAME",310 UID: types.UID("run-1"),311 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},312 },313 Status: workflowapi.WorkflowStatus{314 Nodes: map[string]workflowapi.NodeStatus{315 "node-1": workflowapi.NodeStatus{316 ID: "node-1",317 Phase: workflowapi.NodeSucceeded,318 Outputs: &workflowapi.Outputs{319 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-metrics"}},320 },321 },322 "node-2": workflowapi.NodeStatus{323 ID: "node-2",324 Phase: workflowapi.NodeSucceeded,325 Outputs: &workflowapi.Outputs{326 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-metrics"}},327 },328 },329 },330 },331 })332 validMetricsJSON := `{"metrics": [{"name": "accuracy", "numberValue": 0.77}, {"name": "logloss", "numberValue": 1.2}]}`333 invalidMetricsJSON := `invalid JSON`334 validArtifactData, _ := util.ArchiveTgz(map[string]string{"file": validMetricsJSON})335 invalidArtifactData, _ := util.ArchiveTgz(map[string]string{"file": invalidMetricsJSON})336 // Stub two artifacts, node-1 is invalid, node-2 is valid.337 pipelineFake.StubArtifact(338 &api.ReadArtifactRequest{339 RunId: "run-1",340 NodeId: "node-1",341 ArtifactName: "mlpipeline-metrics",342 },343 &api.ReadArtifactResponse{344 Data: []byte(invalidArtifactData),345 })346 pipelineFake.StubArtifact(347 &api.ReadArtifactRequest{348 RunId: "run-1",349 NodeId: "node-2",350 ArtifactName: "mlpipeline-metrics",351 },352 &api.ReadArtifactResponse{353 Data: []byte(validArtifactData),354 })355 err := reporter.ReportMetrics(workflow)356 // Partial failure is reported while valid metrics are reported.357 assert.NotNil(t, err)358 assert.True(t, util.HasCustomCode(err, util.CUSTOM_CODE_PERMANENT))359 expectedMetricsRequest := &api.ReportRunMetricsRequest{360 RunId: "run-1",361 Metrics: []*api.RunMetric{362 &api.RunMetric{363 Name: "accuracy",364 NodeId: "node-2",365 Value: &api.RunMetric_NumberValue{NumberValue: 0.77},366 },367 &api.RunMetric{368 Name: "logloss",369 NodeId: "node-2",370 Value: &api.RunMetric_NumberValue{NumberValue: 1.2},371 },372 },373 }374 got := pipelineFake.GetReportedMetricsRequest()375 if diff := cmp.Diff(expectedMetricsRequest, got, cmpopts.EquateEmpty(), protocmp.Transform()); diff != "" {376 t.Errorf("parseRuntimeInfo() = %+v, want %+v\nDiff (-want, +got)\n%s", got, expectedMetricsRequest, diff)377 s, _ := json.MarshalIndent(expectedMetricsRequest ,"", " ")378 fmt.Printf("Want %s", s)379 }380}381func TestReportMetrics_CorruptedArchiveFile_Fail(t *testing.T) {382 pipelineFake := client.NewPipelineClientFake()383 reporter := NewMetricsReporter(pipelineFake)384 workflow := util.NewWorkflow(&workflowapi.Workflow{385 ObjectMeta: metav1.ObjectMeta{386 Namespace: "MY_NAMESPACE",387 Name: "MY_NAME",388 UID: types.UID("run-1"),389 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},390 },391 Status: workflowapi.WorkflowStatus{392 Nodes: map[string]workflowapi.NodeStatus{393 "node-1": workflowapi.NodeStatus{394 ID: "node-1",395 Phase: workflowapi.NodeSucceeded,396 Outputs: &workflowapi.Outputs{397 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-metrics"}},398 },399 },400 },401 },402 })403 pipelineFake.StubArtifact(404 &api.ReadArtifactRequest{405 RunId: "run-1",406 NodeId: "node-1",407 ArtifactName: "mlpipeline-metrics",408 },409 &api.ReadArtifactResponse{410 Data: []byte("invalid tgz content"),411 })412 err := reporter.ReportMetrics(workflow)413 assert.NotNil(t, err)414 assert.True(t, util.HasCustomCode(err, util.CUSTOM_CODE_PERMANENT))415 // Verify that ReportRunMetrics is not called.416 assert.Nil(t, pipelineFake.GetReportedMetricsRequest())417}418func TestReportMetrics_MultiplMetricErrors_TransientErrowWin(t *testing.T) {419 pipelineFake := client.NewPipelineClientFake()420 reporter := NewMetricsReporter(pipelineFake)421 workflow := util.NewWorkflow(&workflowapi.Workflow{422 ObjectMeta: metav1.ObjectMeta{423 Namespace: "MY_NAMESPACE",424 Name: "MY_NAME",425 UID: types.UID("run-1"),426 Labels: map[string]string{util.LabelKeyWorkflowRunId: "run-1"},427 },428 Status: workflowapi.WorkflowStatus{429 Nodes: map[string]workflowapi.NodeStatus{430 "node-1": workflowapi.NodeStatus{431 ID: "node-1",432 Phase: workflowapi.NodeSucceeded,433 Outputs: &workflowapi.Outputs{434 Artifacts: []workflowapi.Artifact{{Name: "mlpipeline-metrics"}},435 },436 },437 },438 },439 })440 metricsJSON :=441 `{"metrics": [{"name": "accuracy", "numberValue": 0.77}, {"name": "log loss", "numberValue": 1.2}, {"name": "accuracy", "numberValue": 1.2}]}`442 artifactData, _ := util.ArchiveTgz(map[string]string{"file": metricsJSON})443 pipelineFake.StubArtifact(444 &api.ReadArtifactRequest{445 RunId: "run-1",446 NodeId: "node-1",447 ArtifactName: "mlpipeline-metrics",448 },449 &api.ReadArtifactResponse{450 Data: []byte(artifactData),451 })452 pipelineFake.StubReportRunMetrics(&api.ReportRunMetricsResponse{453 Results: []*api.ReportRunMetricsResponse_ReportRunMetricResult{454 &api.ReportRunMetricsResponse_ReportRunMetricResult{455 MetricNodeId: "node-1",456 MetricName: "accuracy",457 Status: api.ReportRunMetricsResponse_ReportRunMetricResult_OK,458 },459 // Invalid argument error triggers permanent error...

Full Screen

Full Screen

compute_test.go

Source:compute_test.go Github

copy

Full Screen

...10 "github.com/w3c/wptdashboard/shared"11)12var timeA = time.Unix(0, 0)13var timeB = time.Unix(0, 1)14var runA = shared.TestRun{15 "ABrowser",16 "1.0",17 "MyOS",18 "1.0",19 "abcd",20 "http://example.com/a_run.json",21 timeA,22}23var runB = shared.TestRun{24 "BBrowser",25 "1.0",26 "MyOS",27 "1.0",28 "dcba",29 "http://example.com/b_run.json",30 timeB,31}32func TestGatherResultsById_TwoRuns_SameTest(t *testing.T) {33 testName := "Do a thing"34 results := &[]metrics.TestRunResults{35 {36 &runA,37 &metrics.TestResults{38 "A test",39 "OK",40 &testName,41 []metrics.SubTest{},42 },43 },44 {45 &runB,46 &metrics.TestResults{47 "A test",48 "ERROR",49 &testName,50 []metrics.SubTest{},51 },52 },53 }54 gathered := GatherResultsById(results)55 assert.Equal(t, 1, len(gathered)) // Merged to single TestId: {"A test",""}.56 for testId, runStatusMap := range gathered {57 assert.Equal(t, metrics.TestId{"A test", ""}, testId)58 assert.Equal(t, 2, len(runStatusMap))59 assert.Equal(t, metrics.CompleteTestStatus{60 metrics.TestStatus_fromString("OK"),61 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),62 }, runStatusMap[runA])63 assert.Equal(t, metrics.CompleteTestStatus{64 metrics.TestStatus_fromString("ERROR"),65 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),66 }, runStatusMap[runB])67 }68}69func TestGatherResultsById_TwoRuns_DiffTests(t *testing.T) {70 testName := "Do a thing"71 results := &[]metrics.TestRunResults{72 {73 &runA,74 &metrics.TestResults{75 "A test",76 "OK",77 &testName,78 []metrics.SubTest{},79 },80 },81 {82 &runA,83 &metrics.TestResults{84 "Shared test",85 "ERROR",86 &testName,87 []metrics.SubTest{},88 },89 },90 {91 &runB,92 &metrics.TestResults{93 "Shared test",94 "OK",95 &testName,96 []metrics.SubTest{},97 },98 },99 {100 &runB,101 &metrics.TestResults{102 "B test",103 "ERROR",104 &testName,105 []metrics.SubTest{},106 },107 },108 }109 gathered := GatherResultsById(results)110 assert.Equal(t, 3, len(gathered)) // A, Shared, B.111 assert.Equal(t, 1, len(gathered[metrics.TestId{"A test", ""}]))112 assert.Equal(t, metrics.CompleteTestStatus{113 metrics.TestStatus_fromString("OK"),114 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),115 }, gathered[metrics.TestId{"A test", ""}][runA])116 assert.Equal(t, 2, len(gathered[metrics.TestId{"Shared test", ""}]))117 assert.Equal(t, metrics.CompleteTestStatus{118 metrics.TestStatus_fromString("ERROR"),119 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),120 }, gathered[metrics.TestId{"Shared test", ""}][runA])121 assert.Equal(t, metrics.CompleteTestStatus{122 metrics.TestStatus_fromString("OK"),123 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),124 }, gathered[metrics.TestId{"Shared test", ""}][runB])125 assert.Equal(t, 1, len(gathered[metrics.TestId{"B test", ""}]))126 assert.Equal(t, metrics.CompleteTestStatus{127 metrics.TestStatus_fromString("ERROR"),128 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),129 }, gathered[metrics.TestId{"B test", ""}][runB])130}131func TestGatherResultsById_OneRun_SubTest(t *testing.T) {132 testName := "Do a thing"133 subName1 := "First sub-test"134 subName2 := "Second sub-test"135 subStatus1 := "A-OK!"136 subStatus2 := "Oops..."137 results := &[]metrics.TestRunResults{138 {139 &runA,140 &metrics.TestResults{141 "A test",142 "OK",143 &testName,144 []metrics.SubTest{145 {146 subName1,147 "PASS",148 &subStatus1,149 },150 {151 subName2,152 "FAIL",153 &subStatus2,154 },155 },156 },157 },158 }159 gathered := GatherResultsById(results)160 assert.Equal(t, 3, len(gathered)) // Top-level test + 2 sub-tests.161 testIds := make([]metrics.TestId, 0, len(gathered))162 for testId, _ := range gathered {163 testIds = append(testIds, testId)164 }165 assert.ElementsMatch(t, [...]metrics.TestId{166 {"A test", ""},167 {"A test", subName1},168 {"A test", subName2},169 }, testIds)170 assert.Equal(t, metrics.CompleteTestStatus{171 metrics.TestStatus_fromString("OK"),172 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),173 }, gathered[metrics.TestId{"A test", ""}][runA])174 assert.Equal(t, metrics.CompleteTestStatus{175 metrics.TestStatus_fromString("OK"),176 metrics.SubTestStatus_fromString("PASS"),177 }, gathered[metrics.TestId{"A test", subName1}][runA])178 assert.Equal(t, metrics.CompleteTestStatus{179 metrics.TestStatus_fromString("OK"),180 metrics.SubTestStatus_fromString("FAIL"),181 }, gathered[metrics.TestId{"A test", subName2}][runA])182}183func TestComputeTotals(t *testing.T) {184 statusz := make(TestRunsStatus)185 status1 := metrics.CompleteTestStatus{186 metrics.TestStatus_fromString("OK"),187 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),188 }189 status2 := metrics.CompleteTestStatus{190 metrics.TestStatus_fromString("ERROR"),191 metrics.SubTestStatus_fromString("STATUS_UNKNOWN"),192 }193 subStatus1 := metrics.CompleteTestStatus{194 metrics.TestStatus_fromString("OK"),195 metrics.SubTestStatus_fromString("PASS"),196 }197 subStatus2 := metrics.CompleteTestStatus{198 metrics.TestStatus_fromString("OK"),199 metrics.SubTestStatus_fromString("NOT_RUN"),200 }201 ab1 := metrics.TestId{"a/b/1", ""}202 ab2 := metrics.TestId{"a/b/2", ""}203 ac1 := metrics.TestId{"a/c/1", ""}204 ac1x := metrics.TestId{"a/c/1", "x"}205 ac1y := metrics.TestId{"a/c/1", "y"}206 ac1z := metrics.TestId{"a/c/1", "z"}207 statusz[ab1] = make(map[shared.TestRun]metrics.CompleteTestStatus)208 statusz[ab2] = make(map[shared.TestRun]metrics.CompleteTestStatus)209 statusz[ac1] = make(map[shared.TestRun]metrics.CompleteTestStatus)210 statusz[ac1x] = make(map[shared.TestRun]metrics.CompleteTestStatus)211 statusz[ac1y] = make(map[shared.TestRun]metrics.CompleteTestStatus)212 statusz[ac1z] = make(map[shared.TestRun]metrics.CompleteTestStatus)213 statusz[ab1][runA] = status1214 statusz[ab1][runB] = status2215 statusz[ab2][runB] = status1216 statusz[ac1][runA] = status1217 statusz[ac1x][runA] = subStatus1218 statusz[ac1y][runA] = subStatus2219 statusz[ac1z][runA] = subStatus2220 totals := ComputeTotals(&statusz)221 assert.Equal(t, 6, len(totals)) // a, a/b, a/c, a/b/1, a/b/2, a/c/1.222 assert.Equal(t, 6, totals["a"]) // a/b/1, a/b/2, a/c/1, a/c/1:x, a/c/1:y, a/c/1:z.223 assert.Equal(t, 2, totals["a/b"]) // a/b/1, a/b/2.224 assert.Equal(t, 1, totals["a/b/1"])225 assert.Equal(t, 1, totals["a/b/2"])226 assert.Equal(t, 4, totals["a/c"]) // a/c/1, a/c/1:x, a/c/1:y, a/c/1:z.227 assert.Equal(t, 4, totals["a/c/1"]) // a/c/1, a/c/1:x, a/c/1:y, a/c/1:z.228}...

Full Screen

Full Screen

metrics.go

Source:metrics.go Github

copy

Full Screen

1// Copyright 2018 Google Inc. All rights reserved.2//3// Licensed under the Apache License, Version 2.0 (the "License");4// you may not use this file except in compliance with the License.5// You may obtain a copy of the License at6//7// http://www.apache.org/licenses/LICENSE-2.08//9// Unless required by applicable law or agreed to in writing, software10// distributed under the License is distributed on an "AS IS" BASIS,11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12// See the License for the specific language governing permissions and13// limitations under the License.14package metrics15import (16 "io/ioutil"17 "os"18 "strconv"19 "android/soong/ui/metrics/metrics_proto"20 "github.com/golang/protobuf/proto"21)22const (23 RunSetupTool = "setup"24 RunKati = "kati"25 RunSoong = "soong"26 PrimaryNinja = "ninja"27 TestRun = "test"28)29type Metrics struct {30 metrics soong_metrics_proto.MetricsBase31 TimeTracer TimeTracer32}33func New() (metrics *Metrics) {34 m := &Metrics{35 metrics: soong_metrics_proto.MetricsBase{},36 TimeTracer: &timeTracerImpl{},37 }38 return m39}40func (m *Metrics) SetTimeMetrics(perf soong_metrics_proto.PerfInfo) {41 switch perf.GetName() {42 case RunKati:43 m.metrics.KatiRuns = append(m.metrics.KatiRuns, &perf)44 break45 case RunSoong:46 m.metrics.SoongRuns = append(m.metrics.SoongRuns, &perf)47 break48 case PrimaryNinja:49 m.metrics.NinjaRuns = append(m.metrics.NinjaRuns, &perf)50 break51 default:52 // ignored53 }54}55func (m *Metrics) SetMetadataMetrics(metadata map[string]string) {56 for k, v := range metadata {57 switch k {58 case "BUILD_ID":59 m.metrics.BuildId = proto.String(v)60 break61 case "PLATFORM_VERSION_CODENAME":62 m.metrics.PlatformVersionCodename = proto.String(v)63 break64 case "TARGET_PRODUCT":65 m.metrics.TargetProduct = proto.String(v)66 break67 case "TARGET_BUILD_VARIANT":68 switch v {69 case "user":70 m.metrics.TargetBuildVariant = soong_metrics_proto.MetricsBase_USER.Enum()71 case "userdebug":72 m.metrics.TargetBuildVariant = soong_metrics_proto.MetricsBase_USERDEBUG.Enum()73 case "eng":74 m.metrics.TargetBuildVariant = soong_metrics_proto.MetricsBase_ENG.Enum()75 default:76 // ignored77 }78 case "TARGET_ARCH":79 m.metrics.TargetArch = m.getArch(v)80 case "TARGET_ARCH_VARIANT":81 m.metrics.TargetArchVariant = proto.String(v)82 case "TARGET_CPU_VARIANT":83 m.metrics.TargetCpuVariant = proto.String(v)84 case "HOST_ARCH":85 m.metrics.HostArch = m.getArch(v)86 case "HOST_2ND_ARCH":87 m.metrics.Host_2NdArch = m.getArch(v)88 case "HOST_OS":89 m.metrics.HostOs = proto.String(v)90 case "HOST_OS_EXTRA":91 m.metrics.HostOsExtra = proto.String(v)92 case "HOST_CROSS_OS":93 m.metrics.HostCrossOs = proto.String(v)94 case "HOST_CROSS_ARCH":95 m.metrics.HostCrossArch = proto.String(v)96 case "HOST_CROSS_2ND_ARCH":97 m.metrics.HostCross_2NdArch = proto.String(v)98 case "OUT_DIR":99 m.metrics.OutDir = proto.String(v)100 default:101 // ignored102 }103 }104}105func (m *Metrics) getArch(arch string) *soong_metrics_proto.MetricsBase_Arch {106 switch arch {107 case "arm":108 return soong_metrics_proto.MetricsBase_ARM.Enum()109 case "arm64":110 return soong_metrics_proto.MetricsBase_ARM64.Enum()111 case "x86":112 return soong_metrics_proto.MetricsBase_X86.Enum()113 case "x86_64":114 return soong_metrics_proto.MetricsBase_X86_64.Enum()115 default:116 return soong_metrics_proto.MetricsBase_UNKNOWN.Enum()117 }118}119func (m *Metrics) SetBuildDateTime(date_time string) {120 if date_time != "" {121 date_time_timestamp, err := strconv.ParseInt(date_time, 10, 64)122 if err != nil {123 panic(err)124 }125 m.metrics.BuildDateTimestamp = &date_time_timestamp126 }127}128func (m *Metrics) Serialize() (data []byte, err error) {129 return proto.Marshal(&m.metrics)130}131// exports the output to the file at outputPath132func (m *Metrics) Dump(outputPath string) (err error) {133 data, err := m.Serialize()134 if err != nil {135 return err136 }137 tempPath := outputPath + ".tmp"138 err = ioutil.WriteFile(tempPath, []byte(data), 0644)139 if err != nil {140 return err141 }142 err = os.Rename(tempPath, outputPath)143 if err != nil {144 return err145 }146 return nil147}...

Full Screen

Full Screen

run

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 prometheus.Register(metrics)4 http.Handle("/metrics", promhttp.Handler())5 log.Fatal(http.ListenAndServe(":8080", nil))6}7import "github.com/prometheus/client_golang/prometheus"8var (9 metrics = newMetrics()10type metrics struct {11}12func newMetrics() *metrics {13 return &metrics{14 requests: promauto.NewCounterVec(prometheus.CounterOpts{15 }, []string{"method", "status"}),16 }17}18func (m *metrics) run() {19 m.requests.WithLabelValues("GET", "200").Inc()20}21require (22net/http.(*conn).serve.func1(0xc0000a4000)23panic(0x4f9ce0, 0x6f4a40)24main.(*metrics).run(0x0)25main.main()26import "github.com/prometheus

Full Screen

Full Screen

run

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 wg.Add(10)4 for i := 0; i < 10; i++ {5 go func() {6 fmt.Println("code to use run method of metrics class")7 wg.Done()8 }()9 }10 wg.Wait()11}

Full Screen

Full Screen

run

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 http.Handle("/metrics", promhttp.Handler())4 log.Fatal(http.ListenAndServe(":8080", nil))5}6import (7func main() {8 http.Handle("/metrics", promhttp.Handler())9 log.Fatal(http.ListenAndServe(":8080", nil))10}11import (12func main() {13 http.Handle("/metrics", promhttp.Handler())14 log.Fatal(http.ListenAndServe(":8080", nil))15}16import (17func main() {18 http.Handle("/metrics", promhttp.Handler())19 log.Fatal(http.ListenAndServe(":8080", nil))20}21import (22func main() {23 http.Handle("/metrics", promhttp.Handler())24 log.Fatal(http.ListenAndServe(":8080", nil))25}26import (27func main() {28 http.Handle("/metrics", promhttp.Handler())29 log.Fatal(http.ListenAndServe(":8080", nil))30}31import (

Full Screen

Full Screen

run

Using AI Code Generation

copy

Full Screen

1metrics := metrics.NewMetrics()2metrics.Run()3metrics := metrics.NewMetrics()4metrics.Run()5metrics := metrics.NewMetrics()6metrics.Run()7metrics := metrics.NewMetrics()8metrics.Run()9metrics := metrics.NewMetrics()10metrics.Run()11metrics := metrics.NewMetrics()12metrics.Run()13metrics := metrics.NewMetrics()14metrics.Run()15metrics := metrics.NewMetrics()16metrics.Run()17metrics := metrics.NewMetrics()18metrics.Run()19metrics := metrics.NewMetrics()20metrics.Run()21metrics := metrics.NewMetrics()22metrics.Run()23metrics := metrics.NewMetrics()24metrics.Run()25metrics := metrics.NewMetrics()26metrics.Run()27metrics := metrics.NewMetrics()28metrics.Run()29metrics := metrics.NewMetrics()30metrics.Run()31metrics := metrics.NewMetrics()32metrics.Run()33metrics := metrics.NewMetrics()34metrics.Run()35metrics := metrics.NewMetrics()36metrics.Run()37metrics := metrics.NewMetrics()38metrics.Run()

Full Screen

Full Screen

run

Using AI Code Generation

copy

Full Screen

1import (2func main() {3 log.SetFlags(0)4 cfg := elasticsearch.Config{5 Addresses: []string{6 },7 Transport: &estransport.Config{8 Metrics: &estransport.Metrics{9 AfterFunc: func(metric *estransport.Metric) {10 log.Printf(11 },12 },13 },14 }15 es, err := elasticsearch.NewClient(cfg)16 if err != nil {17 log.Fatalf("Error creating the client: %s", err)18 }19 log.Println("Elasticsearch:", es.Info())20 res, err := es.Index(21 strings.NewReader(`{"user": "test", "message": "trying out Elasticsearch"}`),22 es.Index.WithDocumentID("1"),23 if err != nil {24 log.Fatalf("Error getting response: %s", err)25 }26 defer res.Body.Close()27 if res.IsError() {28 log.Fatalf("Error indexing document: %s", res.String())29 }30 res, err = es.Get("twitter", "1")31 if err != nil {32 log.Fatalf("Error getting response: %s", err)33 }34 defer res.Body.Close()35 if res.IsError() {36 log.Fatalf("Error getting document: %s", res.String())37 }38 res, err = es.Delete("twitter", "1")39 if err != nil {40 log.Fatalf("Error getting response: %s", err)41 }42 defer res.Body.Close()43 if res.IsError() {44 log.Fatalf("Error deleting document: %s", res.String())45 }46 res, err = es.Search(47 es.Search.WithIndex("twitter"),48 es.Search.WithBody(strings.NewReader

Full Screen

Full Screen

run

Using AI Code Generation

copy

Full Screen

1import "fmt"2func main() {3 fmt.Println("Hello, playground")4 metrics := Metrics{}5 metrics.run()6}7import "fmt"8type Metrics struct {9}10func (m Metrics) run() {11 fmt.Println("Hello, playground")12}13./1.go:7: cannot use metrics (type Metrics) as type Metrics in field value:14 Metrics does not implement Metrics (missing run method)15Traceback (most recent call last):16 print (x)17Traceback (most recent call last):18 print (x)19Traceback (most recent call last):20 print (soup)21Traceback (most recent call last):

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run K6 automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Most used method in

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful