Best K6 code snippet using local.TestExecutionSchedulerRun
local_test.go
Source:local_test.go
...81 }()82 require.NoError(t, execScheduler.Init(ctx, samples))83 return ctx, cancel, execScheduler, samples84}85func TestExecutionSchedulerRun(t *testing.T) {86 t.Parallel()87 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, nil, nil, lib.Options{})88 defer cancel()89 err := make(chan error, 1)90 go func() { err <- execScheduler.Run(ctx, ctx, samples) }()91 assert.NoError(t, <-err)92}93func TestExecutionSchedulerRunNonDefault(t *testing.T) {94 t.Parallel()95 testCases := []struct {96 name, script, expErr string97 }{98 {"defaultOK", `export default function () {}`, ""},99 {"nonDefaultOK", `100 export let options = {101 scenarios: {102 per_vu_iters: {103 executor: "per-vu-iterations",104 vus: 1,105 iterations: 1,106 exec: "nonDefault",107 },108 }109 }110 export function nonDefault() {}`, ""},111 }112 for _, tc := range testCases {113 tc := tc114 t.Run(tc.name, func(t *testing.T) {115 t.Parallel()116 logger := logrus.New()117 logger.SetOutput(testutils.NewTestOutput(t))118 registry := metrics.NewRegistry()119 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)120 runner, err := js.New(121 &lib.RuntimeState{122 Logger: logger,123 BuiltinMetrics: builtinMetrics,124 Registry: registry,125 }, &loader.SourceData{126 URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script),127 }, nil)128 require.NoError(t, err)129 execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger)130 require.NoError(t, err)131 ctx, cancel := context.WithCancel(context.Background())132 defer cancel()133 done := make(chan struct{})134 samples := make(chan metrics.SampleContainer)135 go func() {136 err := execScheduler.Init(ctx, samples)137 if tc.expErr != "" {138 assert.EqualError(t, err, tc.expErr)139 } else {140 assert.NoError(t, err)141 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))142 }143 close(done)144 }()145 for {146 select {147 case <-samples:148 case <-done:149 return150 }151 }152 })153 }154}155func TestExecutionSchedulerRunEnv(t *testing.T) {156 t.Parallel()157 scriptTemplate := `158 import { Counter } from "k6/metrics";159 let errors = new Counter("errors");160 export let options = {161 scenarios: {162 executor: {163 executor: "%[1]s",164 gracefulStop: "0.5s",165 %[2]s166 }167 }168 }169 export default function () {170 if (__ENV.TESTVAR !== "%[3]s") {171 console.error('Wrong env var value. Expected: %[3]s, actual: ', __ENV.TESTVAR);172 errors.add(1);173 }174 }`175 executorConfigs := map[string]string{176 "constant-arrival-rate": `177 rate: 1,178 timeUnit: "0.5s",179 duration: "0.5s",180 preAllocatedVUs: 1,181 maxVUs: 2,`,182 "constant-vus": `183 vus: 1,184 duration: "0.5s",`,185 "externally-controlled": `186 vus: 1,187 duration: "0.5s",`,188 "per-vu-iterations": `189 vus: 1,190 iterations: 1,`,191 "shared-iterations": `192 vus: 1,193 iterations: 1,`,194 "ramping-arrival-rate": `195 startRate: 1,196 timeUnit: "0.5s",197 preAllocatedVUs: 1,198 maxVUs: 2,199 stages: [ { target: 1, duration: "0.5s" } ],`,200 "ramping-vus": `201 startVUs: 1,202 stages: [ { target: 1, duration: "0.5s" } ],`,203 }204 testCases := []struct{ name, script string }{}205 // Generate tests using global env and with env override206 for ename, econf := range executorConfigs {207 testCases = append(testCases, struct{ name, script string }{208 "global/" + ename, fmt.Sprintf(scriptTemplate, ename, econf, "global"),209 })210 configWithEnvOverride := econf + "env: { TESTVAR: 'overridden' }"211 testCases = append(testCases, struct{ name, script string }{212 "override/" + ename, fmt.Sprintf(scriptTemplate, ename, configWithEnvOverride, "overridden"),213 })214 }215 for _, tc := range testCases {216 tc := tc217 t.Run(tc.name, func(t *testing.T) {218 t.Parallel()219 logger := logrus.New()220 logger.SetOutput(testutils.NewTestOutput(t))221 registry := metrics.NewRegistry()222 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)223 runner, err := js.New(224 &lib.RuntimeState{225 Logger: logger,226 BuiltinMetrics: builtinMetrics,227 Registry: registry,228 RuntimeOptions: lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}},229 }, &loader.SourceData{230 URL: &url.URL{Path: "/script.js"},231 Data: []byte(tc.script),232 }, nil)233 require.NoError(t, err)234 execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger)235 require.NoError(t, err)236 ctx, cancel := context.WithCancel(context.Background())237 defer cancel()238 done := make(chan struct{})239 samples := make(chan metrics.SampleContainer)240 go func() {241 assert.NoError(t, execScheduler.Init(ctx, samples))242 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))243 close(done)244 }()245 for {246 select {247 case sample := <-samples:248 if s, ok := sample.(metrics.Sample); ok && s.Metric.Name == "errors" {249 assert.FailNow(t, "received error sample from test")250 }251 case <-done:252 return253 }254 }255 })256 }257}258func TestExecutionSchedulerSystemTags(t *testing.T) {259 t.Parallel()260 tb := httpmultibin.NewHTTPMultiBin(t)261 sr := tb.Replacer.Replace262 script := sr(`263 import http from "k6/http";264 export let options = {265 scenarios: {266 per_vu_test: {267 executor: "per-vu-iterations",268 gracefulStop: "0s",269 vus: 1,270 iterations: 1,271 },272 shared_test: {273 executor: "shared-iterations",274 gracefulStop: "0s",275 vus: 1,276 iterations: 1,277 }278 }279 }280 export default function () {281 http.get("HTTPBIN_IP_URL/");282 }`)283 logger := logrus.New()284 logger.SetOutput(testutils.NewTestOutput(t))285 registry := metrics.NewRegistry()286 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)287 runner, err := js.New(288 &lib.RuntimeState{289 Logger: logger,290 BuiltinMetrics: builtinMetrics,291 Registry: registry,292 }, &loader.SourceData{293 URL: &url.URL{Path: "/script.js"},294 Data: []byte(script),295 }, nil)296 require.NoError(t, err)297 require.NoError(t, runner.SetOptions(runner.GetOptions().Apply(lib.Options{298 SystemTags: &metrics.DefaultSystemTagSet,299 })))300 execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger)301 require.NoError(t, err)302 ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)303 defer cancel()304 samples := make(chan metrics.SampleContainer)305 done := make(chan struct{})306 go func() {307 defer close(done)308 require.NoError(t, execScheduler.Init(ctx, samples))309 require.NoError(t, execScheduler.Run(ctx, ctx, samples))310 }()311 expCommonTrailTags := metrics.IntoSampleTags(&map[string]string{312 "group": "",313 "method": "GET",314 "name": sr("HTTPBIN_IP_URL/"),315 "url": sr("HTTPBIN_IP_URL/"),316 "proto": "HTTP/1.1",317 "status": "200",318 "expected_response": "true",319 })320 expTrailPVUTagsRaw := expCommonTrailTags.CloneTags()321 expTrailPVUTagsRaw["scenario"] = "per_vu_test"322 expTrailPVUTags := metrics.IntoSampleTags(&expTrailPVUTagsRaw)323 expTrailSITagsRaw := expCommonTrailTags.CloneTags()324 expTrailSITagsRaw["scenario"] = "shared_test"325 expTrailSITags := metrics.IntoSampleTags(&expTrailSITagsRaw)326 expNetTrailPVUTags := metrics.IntoSampleTags(&map[string]string{327 "group": "",328 "scenario": "per_vu_test",329 })330 expNetTrailSITags := metrics.IntoSampleTags(&map[string]string{331 "group": "",332 "scenario": "shared_test",333 })334 var gotCorrectTags int335 for {336 select {337 case sample := <-samples:338 switch s := sample.(type) {339 case *httpext.Trail:340 if s.Tags.IsEqual(expTrailPVUTags) || s.Tags.IsEqual(expTrailSITags) {341 gotCorrectTags++342 }343 case *netext.NetTrail:344 if s.Tags.IsEqual(expNetTrailPVUTags) || s.Tags.IsEqual(expNetTrailSITags) {345 gotCorrectTags++346 }347 }348 case <-done:349 require.Equal(t, 4, gotCorrectTags, "received wrong amount of samples with expected tags")350 return351 }352 }353}354func TestExecutionSchedulerRunCustomTags(t *testing.T) {355 t.Parallel()356 tb := httpmultibin.NewHTTPMultiBin(t)357 sr := tb.Replacer.Replace358 scriptTemplate := sr(`359 import http from "k6/http";360 export let options = {361 scenarios: {362 executor: {363 executor: "%s",364 gracefulStop: "0.5s",365 %s366 }367 }368 }369 export default function () {370 http.get("HTTPBIN_IP_URL/");371 }`)372 executorConfigs := map[string]string{373 "constant-arrival-rate": `374 rate: 1,375 timeUnit: "0.5s",376 duration: "0.5s",377 preAllocatedVUs: 1,378 maxVUs: 2,`,379 "constant-vus": `380 vus: 1,381 duration: "0.5s",`,382 "externally-controlled": `383 vus: 1,384 duration: "0.5s",`,385 "per-vu-iterations": `386 vus: 1,387 iterations: 1,`,388 "shared-iterations": `389 vus: 1,390 iterations: 1,`,391 "ramping-arrival-rate": `392 startRate: 5,393 timeUnit: "0.5s",394 preAllocatedVUs: 1,395 maxVUs: 2,396 stages: [ { target: 10, duration: "1s" } ],`,397 "ramping-vus": `398 startVUs: 1,399 stages: [ { target: 1, duration: "0.5s" } ],`,400 }401 testCases := []struct{ name, script string }{}402 // Generate tests using custom tags403 for ename, econf := range executorConfigs {404 configWithCustomTag := econf + "tags: { customTag: 'value' }"405 testCases = append(testCases, struct{ name, script string }{406 ename, fmt.Sprintf(scriptTemplate, ename, configWithCustomTag),407 })408 }409 for _, tc := range testCases {410 tc := tc411 t.Run(tc.name, func(t *testing.T) {412 t.Parallel()413 logger := logrus.New()414 logger.SetOutput(testutils.NewTestOutput(t))415 registry := metrics.NewRegistry()416 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)417 runner, err := js.New(418 &lib.RuntimeState{419 Logger: logger,420 BuiltinMetrics: builtinMetrics,421 Registry: registry,422 },423 &loader.SourceData{424 URL: &url.URL{Path: "/script.js"},425 Data: []byte(tc.script),426 },427 nil)428 require.NoError(t, err)429 execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger)430 require.NoError(t, err)431 ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)432 defer cancel()433 done := make(chan struct{})434 samples := make(chan metrics.SampleContainer)435 go func() {436 defer close(done)437 require.NoError(t, execScheduler.Init(ctx, samples))438 require.NoError(t, execScheduler.Run(ctx, ctx, samples))439 }()440 var gotTrailTag, gotNetTrailTag bool441 for {442 select {443 case sample := <-samples:444 if trail, ok := sample.(*httpext.Trail); ok && !gotTrailTag {445 tags := trail.Tags.CloneTags()446 if v, ok := tags["customTag"]; ok && v == "value" {447 gotTrailTag = true448 }449 }450 if netTrail, ok := sample.(*netext.NetTrail); ok && !gotNetTrailTag {451 tags := netTrail.Tags.CloneTags()452 if v, ok := tags["customTag"]; ok && v == "value" {453 gotNetTrailTag = true454 }455 }456 case <-done:457 if !gotTrailTag || !gotNetTrailTag {458 assert.FailNow(t, "a sample with expected tag wasn't received")459 }460 return461 }462 }463 })464 }465}466// Ensure that custom executor settings are unique per executor and467// that there's no "crossover"/"pollution" between executors.468// Also test that custom tags are properly set on checks and groups metrics.469func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) {470 t.Parallel()471 tb := httpmultibin.NewHTTPMultiBin(t)472 script := tb.Replacer.Replace(`473 import http from "k6/http";474 import ws from 'k6/ws';475 import { Counter } from 'k6/metrics';476 import { check, group } from 'k6';477 let errors = new Counter('errors');478 export let options = {479 // Required for WS tests480 hosts: { 'httpbin.local': '127.0.0.1' },481 scenarios: {482 scenario1: {483 executor: 'per-vu-iterations',484 vus: 1,485 iterations: 1,486 gracefulStop: '0s',487 maxDuration: '1s',488 exec: 's1func',489 env: { TESTVAR1: 'scenario1' },490 tags: { testtag1: 'scenario1' },491 },492 scenario2: {493 executor: 'shared-iterations',494 vus: 1,495 iterations: 1,496 gracefulStop: '1s',497 startTime: '0.5s',498 maxDuration: '2s',499 exec: 's2func',500 env: { TESTVAR2: 'scenario2' },501 tags: { testtag2: 'scenario2' },502 },503 scenario3: {504 executor: 'per-vu-iterations',505 vus: 1,506 iterations: 1,507 gracefulStop: '1s',508 exec: 's3funcWS',509 env: { TESTVAR3: 'scenario3' },510 tags: { testtag3: 'scenario3' },511 },512 }513 }514 function checkVar(name, expected) {515 if (__ENV[name] !== expected) {516 console.error('Wrong ' + name + " env var value. Expected: '"517 + expected + "', actual: '" + __ENV[name] + "'");518 errors.add(1);519 }520 }521 export function s1func() {522 checkVar('TESTVAR1', 'scenario1');523 checkVar('TESTVAR2', undefined);524 checkVar('TESTVAR3', undefined);525 checkVar('TESTGLOBALVAR', 'global');526 // Intentionally try to pollute the env527 __ENV.TESTVAR2 = 'overridden';528 http.get('HTTPBIN_IP_URL/', { tags: { reqtag: 'scenario1' }});529 }530 export function s2func() {531 checkVar('TESTVAR1', undefined);532 checkVar('TESTVAR2', 'scenario2');533 checkVar('TESTVAR3', undefined);534 checkVar('TESTGLOBALVAR', 'global');535 http.get('HTTPBIN_IP_URL/', { tags: { reqtag: 'scenario2' }});536 }537 export function s3funcWS() {538 checkVar('TESTVAR1', undefined);539 checkVar('TESTVAR2', undefined);540 checkVar('TESTVAR3', 'scenario3');541 checkVar('TESTGLOBALVAR', 'global');542 const customTags = { wstag: 'scenario3' };543 group('wsgroup', function() {544 const response = ws.connect('WSBIN_URL/ws-echo', { tags: customTags },545 function (socket) {546 socket.on('open', function() {547 socket.send('hello');548 });549 socket.on('message', function(msg) {550 if (msg != 'hello') {551 console.error("Expected to receive 'hello' but got '" + msg + "' instead!");552 errors.add(1);553 }554 socket.close()555 });556 socket.on('error', function (e) {557 console.log('ws error: ' + e.error());558 errors.add(1);559 });560 }561 );562 check(response, { 'status is 101': (r) => r && r.status === 101 }, customTags);563 });564 }565`)566 logger := logrus.New()567 logger.SetOutput(testutils.NewTestOutput(t))568 registry := metrics.NewRegistry()569 builtinMetrics := metrics.RegisterBuiltinMetrics(registry)570 runner, err := js.New(571 &lib.RuntimeState{572 Logger: logger,573 BuiltinMetrics: builtinMetrics,574 Registry: registry,575 RuntimeOptions: lib.RuntimeOptions{Env: map[string]string{"TESTGLOBALVAR": "global"}},576 }, &loader.SourceData{577 URL: &url.URL{Path: "/script.js"},578 Data: []byte(script),579 },580 nil)581 require.NoError(t, err)582 execScheduler, err := NewExecutionScheduler(runner, builtinMetrics, logger)583 require.NoError(t, err)584 ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)585 defer cancel()586 samples := make(chan metrics.SampleContainer)587 go func() {588 assert.NoError(t, execScheduler.Init(ctx, samples))589 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))590 close(samples)591 }()592 expectedTrailTags := []map[string]string{593 {"testtag1": "scenario1", "reqtag": "scenario1"},594 {"testtag2": "scenario2", "reqtag": "scenario2"},595 }596 expectedNetTrailTags := []map[string]string{597 {"testtag1": "scenario1"},598 {"testtag2": "scenario2"},599 }600 expectedConnSampleTags := map[string]string{601 "testtag3": "scenario3", "wstag": "scenario3",602 }603 expectedPlainSampleTags := []map[string]string{604 {"testtag3": "scenario3"},605 {"testtag3": "scenario3", "wstag": "scenario3"},606 }607 var gotSampleTags int608 for sample := range samples {609 switch s := sample.(type) {610 case metrics.Sample:611 if s.Metric.Name == "errors" {612 assert.FailNow(t, "received error sample from test")613 }614 if s.Metric.Name == "checks" || s.Metric.Name == "group_duration" {615 tags := s.Tags.CloneTags()616 for _, expTags := range expectedPlainSampleTags {617 if reflect.DeepEqual(expTags, tags) {618 gotSampleTags++619 }620 }621 }622 case *httpext.Trail:623 tags := s.Tags.CloneTags()624 for _, expTags := range expectedTrailTags {625 if reflect.DeepEqual(expTags, tags) {626 gotSampleTags++627 }628 }629 case *netext.NetTrail:630 tags := s.Tags.CloneTags()631 for _, expTags := range expectedNetTrailTags {632 if reflect.DeepEqual(expTags, tags) {633 gotSampleTags++634 }635 }636 case metrics.ConnectedSamples:637 for _, sm := range s.Samples {638 tags := sm.Tags.CloneTags()639 if reflect.DeepEqual(expectedConnSampleTags, tags) {640 gotSampleTags++641 }642 }643 }644 }645 require.Equal(t, 8, gotSampleTags, "received wrong amount of samples with expected tags")646}647func TestExecutionSchedulerSetupTeardownRun(t *testing.T) {648 t.Parallel()649 t.Run("Normal", func(t *testing.T) {650 t.Parallel()651 setupC := make(chan struct{})652 teardownC := make(chan struct{})653 runner := &minirunner.MiniRunner{654 SetupFn: func(ctx context.Context, out chan<- metrics.SampleContainer) ([]byte, error) {655 close(setupC)656 return nil, nil657 },658 TeardownFn: func(ctx context.Context, out chan<- metrics.SampleContainer) error {659 close(teardownC)660 return nil661 },662 }663 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{})664 err := make(chan error, 1)665 go func() { err <- execScheduler.Run(ctx, ctx, samples) }()666 defer cancel()667 <-setupC668 <-teardownC669 assert.NoError(t, <-err)670 })671 t.Run("Setup Error", func(t *testing.T) {672 t.Parallel()673 runner := &minirunner.MiniRunner{674 SetupFn: func(ctx context.Context, out chan<- metrics.SampleContainer) ([]byte, error) {675 return nil, errors.New("setup error")676 },677 }678 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{})679 defer cancel()680 assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "setup error")681 })682 t.Run("Don't Run Setup", func(t *testing.T) {683 t.Parallel()684 runner := &minirunner.MiniRunner{685 SetupFn: func(ctx context.Context, out chan<- metrics.SampleContainer) ([]byte, error) {686 return nil, errors.New("setup error")687 },688 TeardownFn: func(ctx context.Context, out chan<- metrics.SampleContainer) error {689 return errors.New("teardown error")690 },691 }692 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{693 NoSetup: null.BoolFrom(true),694 VUs: null.IntFrom(1),695 Iterations: null.IntFrom(1),696 })697 defer cancel()698 assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error")699 })700 t.Run("Teardown Error", func(t *testing.T) {701 t.Parallel()702 runner := &minirunner.MiniRunner{703 SetupFn: func(ctx context.Context, out chan<- metrics.SampleContainer) ([]byte, error) {704 return nil, nil705 },706 TeardownFn: func(ctx context.Context, out chan<- metrics.SampleContainer) error {707 return errors.New("teardown error")708 },709 }710 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{711 VUs: null.IntFrom(1),712 Iterations: null.IntFrom(1),713 })714 defer cancel()715 assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error")716 })717 t.Run("Don't Run Teardown", func(t *testing.T) {718 t.Parallel()719 runner := &minirunner.MiniRunner{720 SetupFn: func(ctx context.Context, out chan<- metrics.SampleContainer) ([]byte, error) {721 return nil, nil722 },723 TeardownFn: func(ctx context.Context, out chan<- metrics.SampleContainer) error {724 return errors.New("teardown error")725 },726 }727 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{728 NoTeardown: null.BoolFrom(true),729 VUs: null.IntFrom(1),730 Iterations: null.IntFrom(1),731 })732 defer cancel()733 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))734 })735}736func TestExecutionSchedulerStages(t *testing.T) {737 t.Parallel()738 testdata := map[string]struct {739 Duration time.Duration740 Stages []lib.Stage741 }{742 "one": {743 1 * time.Second,744 []lib.Stage{{Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(1)}},745 },746 "two": {747 2 * time.Second,748 []lib.Stage{749 {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(1)},750 {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(2)},751 },752 },753 "four": {754 4 * time.Second,755 []lib.Stage{756 {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(5)},757 {Duration: types.NullDurationFrom(3 * time.Second), Target: null.IntFrom(10)},758 },759 },760 }761 for name, data := range testdata {762 data := data763 t.Run(name, func(t *testing.T) {764 t.Parallel()765 runner := &minirunner.MiniRunner{766 Fn: func(ctx context.Context, _ *lib.State, out chan<- metrics.SampleContainer) error {767 time.Sleep(100 * time.Millisecond)768 return nil769 },770 }771 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{772 VUs: null.IntFrom(1),773 Stages: data.Stages,774 })775 defer cancel()776 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))777 assert.True(t, execScheduler.GetState().GetCurrentTestRunDuration() >= data.Duration)778 })779 }780}781func TestExecutionSchedulerEndTime(t *testing.T) {782 t.Parallel()783 runner := &minirunner.MiniRunner{784 Fn: func(ctx context.Context, _ *lib.State, out chan<- metrics.SampleContainer) error {785 time.Sleep(100 * time.Millisecond)786 return nil787 },788 }789 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{790 VUs: null.IntFrom(10),791 Duration: types.NullDurationFrom(1 * time.Second),792 })793 defer cancel()794 endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan())795 assert.Equal(t, 31*time.Second, endTime) // because of the default 30s gracefulStop796 assert.True(t, isFinal)797 startTime := time.Now()798 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))799 runTime := time.Since(startTime)800 assert.True(t, runTime > 1*time.Second, "test did not take 1s")801 assert.True(t, runTime < 10*time.Second, "took more than 10 seconds")802}803func TestExecutionSchedulerRuntimeErrors(t *testing.T) {804 t.Parallel()805 runner := &minirunner.MiniRunner{806 Fn: func(ctx context.Context, _ *lib.State, out chan<- metrics.SampleContainer) error {807 time.Sleep(10 * time.Millisecond)808 return errors.New("hi")809 },810 Options: lib.Options{811 VUs: null.IntFrom(10),812 Duration: types.NullDurationFrom(1 * time.Second),813 },814 }815 logger, hook := logtest.NewNullLogger()816 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, logger, lib.Options{})817 defer cancel()...
TestExecutionSchedulerRun
Using AI Code Generation
1import (2func main() {3 fmt.Println("Hello, playground")4 testExecutionScheduler.TestExecutionSchedulerRun()5}6import (7func main() {8 fmt.Println("Hello, playground")9 testExecutionScheduler.TestExecutionSchedulerRun()10}11import (12func main() {13 fmt.Println("Hello, playground")14 testExecutionScheduler.TestExecutionSchedulerRun()15}16import (17func main() {18 fmt.Println("Hello, playground")19 testExecutionScheduler.TestExecutionSchedulerRun()20}21import (22func main() {23 fmt.Println("Hello, playground")24 testExecutionScheduler.TestExecutionSchedulerRun()25}26import (27func main() {28 fmt.Println("Hello, playground")29 testExecutionScheduler.TestExecutionSchedulerRun()30}31import (32func main() {33 fmt.Println("Hello, playground")34 testExecutionScheduler.TestExecutionSchedulerRun()35}36import (37func main() {
TestExecutionSchedulerRun
Using AI Code Generation
1func main(){2 scheduler := TestExecutionScheduler{}3 scheduler.TestExecutionSchedulerRun()4}5func main(){6 scheduler := TestExecutionScheduler{}7 scheduler.TestExecutionSchedulerRun()8}9func main(){10 scheduler := TestExecutionScheduler{}11 scheduler.TestExecutionSchedulerRun()12}13func main(){14 scheduler := TestExecutionScheduler{}15 scheduler.TestExecutionSchedulerRun()16}17func main(){18 scheduler := TestExecutionScheduler{}19 scheduler.TestExecutionSchedulerRun()20}21func main(){22 scheduler := TestExecutionScheduler{}23 scheduler.TestExecutionSchedulerRun()24}25func main(){26 scheduler := TestExecutionScheduler{}27 scheduler.TestExecutionSchedulerRun()28}29func main(){30 scheduler := TestExecutionScheduler{}31 scheduler.TestExecutionSchedulerRun()32}33func main(){34 scheduler := TestExecutionScheduler{}35 scheduler.TestExecutionSchedulerRun()36}37func main(){38 scheduler := TestExecutionScheduler{}39 scheduler.TestExecutionSchedulerRun()40}41func main(){42 scheduler := TestExecutionScheduler{}43 scheduler.TestExecutionSchedulerRun()44}
TestExecutionSchedulerRun
Using AI Code Generation
1func main() {2 testExecutionScheduler = NewTestExecutionScheduler()3 testExecutionScheduler.Run()4}5func main() {6 testExecutionScheduler = NewTestExecutionScheduler()7 testExecutionScheduler.Run()8}9func main() {10 testExecutionScheduler = NewTestExecutionScheduler()11 testExecutionScheduler.Run()12}13func main() {14 testExecutionScheduler = NewTestExecutionScheduler()15 testExecutionScheduler.Run()16}17func main() {18 testExecutionScheduler = NewTestExecutionScheduler()19 testExecutionScheduler.Run()20}21func main() {22 testExecutionScheduler = NewTestExecutionScheduler()23 testExecutionScheduler.Run()24}25func main() {26 testExecutionScheduler = NewTestExecutionScheduler()27 testExecutionScheduler.Run()28}29func main() {30 testExecutionScheduler = NewTestExecutionScheduler()31 testExecutionScheduler.Run()32}33func main() {34 testExecutionScheduler = NewTestExecutionScheduler()35 testExecutionScheduler.Run()36}37func main() {38 testExecutionScheduler = NewTestExecutionScheduler()39 testExecutionScheduler.Run()40}41func main() {
TestExecutionSchedulerRun
Using AI Code Generation
1import (2func main() {3 tes := tes.NewTestExecutionScheduler()4 tes.TestExecutionSchedulerRun()5 fmt.Println("Test Execution Scheduler Run")6}72. tes := tes.NewTestExecutionScheduler()82. tes := tes.NewTestExecutionScheduler()92. tes := new(tes.TestExecutionScheduler)102. tes := &tes.TestExecutionScheduler{}112. tes := &tes.TestExecutionScheduler{testExecutionSchedulerRun: tes.TestExecutionSchedulerRun}
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!