Best K6 code snippet using tests.newTestExecutionScheduler
local_test.go
Source:local_test.go
...46 "github.com/loadimpact/k6/lib/types"47 "github.com/loadimpact/k6/loader"48 "github.com/loadimpact/k6/stats"49)50func newTestExecutionScheduler(51 t *testing.T, runner lib.Runner, logger *logrus.Logger, opts lib.Options,52) (ctx context.Context, cancel func(), execScheduler *ExecutionScheduler, samples chan stats.SampleContainer) {53 if runner == nil {54 runner = &minirunner.MiniRunner{}55 }56 ctx, cancel = context.WithCancel(context.Background())57 newOpts, err := executor.DeriveScenariosFromShortcuts(lib.Options{58 MetricSamplesBufferSize: null.NewInt(200, false),59 }.Apply(runner.GetOptions()).Apply(opts))60 require.NoError(t, err)61 require.Empty(t, newOpts.Validate())62 require.NoError(t, runner.SetOptions(newOpts))63 if logger == nil {64 logger = logrus.New()65 logger.SetOutput(testutils.NewTestOutput(t))66 }67 execScheduler, err = NewExecutionScheduler(runner, logger)68 require.NoError(t, err)69 samples = make(chan stats.SampleContainer, newOpts.MetricSamplesBufferSize.Int64)70 go func() {71 for {72 select {73 case <-samples:74 case <-ctx.Done():75 return76 }77 }78 }()79 require.NoError(t, execScheduler.Init(ctx, samples))80 return ctx, cancel, execScheduler, samples81}82func TestExecutionSchedulerRun(t *testing.T) {83 t.Parallel()84 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, nil, nil, lib.Options{})85 defer cancel()86 err := make(chan error, 1)87 go func() { err <- execScheduler.Run(ctx, ctx, samples) }()88 assert.NoError(t, <-err)89}90func TestExecutionSchedulerRunNonDefault(t *testing.T) {91 t.Parallel()92 testCases := []struct {93 name, script, expErr string94 }{95 {"defaultOK", `export default function () {}`, ""},96 {"nonDefaultOK", `97 export let options = {98 scenarios: {99 per_vu_iters: {100 executor: "per-vu-iterations",101 vus: 1,102 iterations: 1,103 exec: "nonDefault",104 },105 }106 }107 export function nonDefault() {}`, ""},108 }109 for _, tc := range testCases {110 tc := tc111 t.Run(tc.name, func(t *testing.T) {112 runner, err := js.New(&loader.SourceData{113 URL: &url.URL{Path: "/script.js"}, Data: []byte(tc.script)},114 nil, lib.RuntimeOptions{})115 require.NoError(t, err)116 logger := logrus.New()117 logger.SetOutput(testutils.NewTestOutput(t))118 execScheduler, err := NewExecutionScheduler(runner, logger)119 require.NoError(t, err)120 ctx, cancel := context.WithCancel(context.Background())121 defer cancel()122 done := make(chan struct{})123 samples := make(chan stats.SampleContainer)124 go func() {125 err := execScheduler.Init(ctx, samples)126 if tc.expErr != "" {127 assert.EqualError(t, err, tc.expErr)128 } else {129 assert.NoError(t, err)130 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))131 }132 close(done)133 }()134 for {135 select {136 case <-samples:137 case <-done:138 return139 }140 }141 })142 }143}144func TestExecutionSchedulerRunEnv(t *testing.T) {145 t.Parallel()146 scriptTemplate := `147 import { Counter } from "k6/metrics";148 let errors = new Counter("errors");149 export let options = {150 scenarios: {151 executor: {152 executor: "%[1]s",153 gracefulStop: "0.5s",154 %[2]s155 }156 }157 }158 export default function () {159 if (__ENV.TESTVAR !== "%[3]s") {160 console.error('Wrong env var value. Expected: %[3]s, actual: ', __ENV.TESTVAR);161 errors.add(1);162 }163 }`164 executorConfigs := map[string]string{165 "constant-arrival-rate": `166 rate: 1,167 timeUnit: "0.5s",168 duration: "0.5s",169 preAllocatedVUs: 1,170 maxVUs: 2,`,171 "constant-vus": `172 vus: 1,173 duration: "0.5s",`,174 "externally-controlled": `175 vus: 1,176 duration: "0.5s",`,177 "per-vu-iterations": `178 vus: 1,179 iterations: 1,`,180 "shared-iterations": `181 vus: 1,182 iterations: 1,`,183 "ramping-arrival-rate": `184 startRate: 1,185 timeUnit: "0.5s",186 preAllocatedVUs: 1,187 maxVUs: 2,188 stages: [ { target: 1, duration: "0.5s" } ],`,189 "ramping-vus": `190 startVUs: 1,191 stages: [ { target: 1, duration: "0.5s" } ],`,192 }193 testCases := []struct{ name, script string }{}194 // Generate tests using global env and with env override195 for ename, econf := range executorConfigs {196 testCases = append(testCases, struct{ name, script string }{197 "global/" + ename, fmt.Sprintf(scriptTemplate, ename, econf, "global")})198 configWithEnvOverride := econf + "env: { TESTVAR: 'overridden' }"199 testCases = append(testCases, struct{ name, script string }{200 "override/" + ename, fmt.Sprintf(scriptTemplate, ename, configWithEnvOverride, "overridden")})201 }202 for _, tc := range testCases {203 tc := tc204 t.Run(tc.name, func(t *testing.T) {205 runner, err := js.New(&loader.SourceData{206 URL: &url.URL{Path: "/script.js"},207 Data: []byte(tc.script)},208 nil, lib.RuntimeOptions{Env: map[string]string{"TESTVAR": "global"}})209 require.NoError(t, err)210 logger := logrus.New()211 logger.SetOutput(testutils.NewTestOutput(t))212 execScheduler, err := NewExecutionScheduler(runner, logger)213 require.NoError(t, err)214 ctx, cancel := context.WithCancel(context.Background())215 defer cancel()216 done := make(chan struct{})217 samples := make(chan stats.SampleContainer)218 go func() {219 assert.NoError(t, execScheduler.Init(ctx, samples))220 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))221 close(done)222 }()223 for {224 select {225 case sample := <-samples:226 if s, ok := sample.(stats.Sample); ok && s.Metric.Name == "errors" {227 assert.FailNow(t, "received error sample from test")228 }229 case <-done:230 return231 }232 }233 })234 }235}236func TestExecutionSchedulerSystemTags(t *testing.T) {237 t.Parallel()238 tb := httpmultibin.NewHTTPMultiBin(t)239 defer tb.Cleanup()240 sr := tb.Replacer.Replace241 script := sr(`242 import http from "k6/http";243 export let options = {244 scenarios: {245 per_vu_test: {246 executor: "per-vu-iterations",247 gracefulStop: "0s",248 vus: 1,249 iterations: 1,250 },251 shared_test: {252 executor: "shared-iterations",253 gracefulStop: "0s",254 vus: 1,255 iterations: 1,256 }257 }258 }259 export default function () {260 http.get("HTTPBIN_IP_URL/");261 }`)262 runner, err := js.New(&loader.SourceData{263 URL: &url.URL{Path: "/script.js"},264 Data: []byte(script)},265 nil, lib.RuntimeOptions{})266 require.NoError(t, err)267 require.NoError(t, runner.SetOptions(runner.GetOptions().Apply(lib.Options{268 SystemTags: &stats.DefaultSystemTagSet,269 })))270 logger := logrus.New()271 logger.SetOutput(testutils.NewTestOutput(t))272 execScheduler, err := NewExecutionScheduler(runner, logger)273 require.NoError(t, err)274 ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)275 defer cancel()276 samples := make(chan stats.SampleContainer)277 done := make(chan struct{})278 go func() {279 defer close(done)280 require.NoError(t, execScheduler.Init(ctx, samples))281 require.NoError(t, execScheduler.Run(ctx, ctx, samples))282 }()283 expCommonTrailTags := stats.IntoSampleTags(&map[string]string{284 "group": "",285 "method": "GET",286 "name": sr("HTTPBIN_IP_URL/"),287 "url": sr("HTTPBIN_IP_URL/"),288 "proto": "HTTP/1.1",289 "status": "200",290 })291 expTrailPVUTagsRaw := expCommonTrailTags.CloneTags()292 expTrailPVUTagsRaw["scenario"] = "per_vu_test"293 expTrailPVUTags := stats.IntoSampleTags(&expTrailPVUTagsRaw)294 expTrailSITagsRaw := expCommonTrailTags.CloneTags()295 expTrailSITagsRaw["scenario"] = "shared_test"296 expTrailSITags := stats.IntoSampleTags(&expTrailSITagsRaw)297 expNetTrailPVUTags := stats.IntoSampleTags(&map[string]string{298 "group": "",299 "scenario": "per_vu_test",300 })301 expNetTrailSITags := stats.IntoSampleTags(&map[string]string{302 "group": "",303 "scenario": "shared_test",304 })305 var gotCorrectTags int306 for {307 select {308 case sample := <-samples:309 switch s := sample.(type) {310 case *httpext.Trail:311 if s.Tags.IsEqual(expTrailPVUTags) || s.Tags.IsEqual(expTrailSITags) {312 gotCorrectTags++313 }314 case *netext.NetTrail:315 if s.Tags.IsEqual(expNetTrailPVUTags) || s.Tags.IsEqual(expNetTrailSITags) {316 gotCorrectTags++317 }318 }319 case <-done:320 require.Equal(t, 4, gotCorrectTags, "received wrong amount of samples with expected tags")321 return322 }323 }324}325func TestExecutionSchedulerRunCustomTags(t *testing.T) {326 t.Parallel()327 tb := httpmultibin.NewHTTPMultiBin(t)328 defer tb.Cleanup()329 sr := tb.Replacer.Replace330 scriptTemplate := sr(`331 import http from "k6/http";332 export let options = {333 scenarios: {334 executor: {335 executor: "%s",336 gracefulStop: "0.5s",337 %s338 }339 }340 }341 export default function () {342 http.get("HTTPBIN_IP_URL/");343 }`)344 executorConfigs := map[string]string{345 "constant-arrival-rate": `346 rate: 1,347 timeUnit: "0.5s",348 duration: "0.5s",349 preAllocatedVUs: 1,350 maxVUs: 2,`,351 "constant-vus": `352 vus: 1,353 duration: "0.5s",`,354 "externally-controlled": `355 vus: 1,356 duration: "0.5s",`,357 "per-vu-iterations": `358 vus: 1,359 iterations: 1,`,360 "shared-iterations": `361 vus: 1,362 iterations: 1,`,363 "ramping-arrival-rate": `364 startRate: 5,365 timeUnit: "0.5s",366 preAllocatedVUs: 1,367 maxVUs: 2,368 stages: [ { target: 10, duration: "1s" } ],`,369 "ramping-vus": `370 startVUs: 1,371 stages: [ { target: 1, duration: "0.5s" } ],`,372 }373 testCases := []struct{ name, script string }{}374 // Generate tests using custom tags375 for ename, econf := range executorConfigs {376 configWithCustomTag := econf + "tags: { customTag: 'value' }"377 testCases = append(testCases, struct{ name, script string }{378 ename, fmt.Sprintf(scriptTemplate, ename, configWithCustomTag)})379 }380 for _, tc := range testCases {381 tc := tc382 t.Run(tc.name, func(t *testing.T) {383 runner, err := js.New(&loader.SourceData{384 URL: &url.URL{Path: "/script.js"},385 Data: []byte(tc.script)},386 nil, lib.RuntimeOptions{})387 require.NoError(t, err)388 logger := logrus.New()389 logger.SetOutput(testutils.NewTestOutput(t))390 execScheduler, err := NewExecutionScheduler(runner, logger)391 require.NoError(t, err)392 ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)393 defer cancel()394 done := make(chan struct{})395 samples := make(chan stats.SampleContainer)396 go func() {397 defer close(done)398 require.NoError(t, execScheduler.Init(ctx, samples))399 require.NoError(t, execScheduler.Run(ctx, ctx, samples))400 }()401 var gotTrailTag, gotNetTrailTag bool402 for {403 select {404 case sample := <-samples:405 if trail, ok := sample.(*httpext.Trail); ok && !gotTrailTag {406 tags := trail.Tags.CloneTags()407 if v, ok := tags["customTag"]; ok && v == "value" {408 gotTrailTag = true409 }410 }411 if netTrail, ok := sample.(*netext.NetTrail); ok && !gotNetTrailTag {412 tags := netTrail.Tags.CloneTags()413 if v, ok := tags["customTag"]; ok && v == "value" {414 gotNetTrailTag = true415 }416 }417 case <-done:418 if !gotTrailTag || !gotNetTrailTag {419 assert.FailNow(t, "a sample with expected tag wasn't received")420 }421 return422 }423 }424 })425 }426}427// Ensure that custom executor settings are unique per executor and428// that there's no "crossover"/"pollution" between executors.429// Also test that custom tags are properly set on checks and groups metrics.430func TestExecutionSchedulerRunCustomConfigNoCrossover(t *testing.T) {431 t.Parallel()432 tb := httpmultibin.NewHTTPMultiBin(t)433 defer tb.Cleanup()434 script := tb.Replacer.Replace(`435 import http from "k6/http";436 import ws from 'k6/ws';437 import { Counter } from 'k6/metrics';438 import { check, group } from 'k6';439 let errors = new Counter('errors');440 export let options = {441 // Required for WS tests442 hosts: { 'httpbin.local': '127.0.0.1' },443 scenarios: {444 scenario1: {445 executor: 'per-vu-iterations',446 vus: 1,447 iterations: 1,448 gracefulStop: '0s',449 maxDuration: '1s',450 exec: 's1func',451 env: { TESTVAR1: 'scenario1' },452 tags: { testtag1: 'scenario1' },453 },454 scenario2: {455 executor: 'shared-iterations',456 vus: 1,457 iterations: 1,458 gracefulStop: '1s',459 startTime: '0.5s',460 maxDuration: '2s',461 exec: 's2func',462 env: { TESTVAR2: 'scenario2' },463 tags: { testtag2: 'scenario2' },464 },465 scenario3: {466 executor: 'per-vu-iterations',467 vus: 1,468 iterations: 1,469 gracefulStop: '1s',470 exec: 's3funcWS',471 env: { TESTVAR3: 'scenario3' },472 tags: { testtag3: 'scenario3' },473 },474 }475 }476 function checkVar(name, expected) {477 if (__ENV[name] !== expected) {478 console.error('Wrong ' + name + " env var value. Expected: '"479 + expected + "', actual: '" + __ENV[name] + "'");480 errors.add(1);481 }482 }483 export function s1func() {484 checkVar('TESTVAR1', 'scenario1');485 checkVar('TESTVAR2', undefined);486 checkVar('TESTVAR3', undefined);487 checkVar('TESTGLOBALVAR', 'global');488 // Intentionally try to pollute the env489 __ENV.TESTVAR2 = 'overridden';490 http.get('HTTPBIN_IP_URL/', { tags: { reqtag: 'scenario1' }});491 }492 export function s2func() {493 checkVar('TESTVAR1', undefined);494 checkVar('TESTVAR2', 'scenario2');495 checkVar('TESTVAR3', undefined);496 checkVar('TESTGLOBALVAR', 'global');497 http.get('HTTPBIN_IP_URL/', { tags: { reqtag: 'scenario2' }});498 }499 export function s3funcWS() {500 checkVar('TESTVAR1', undefined);501 checkVar('TESTVAR2', undefined);502 checkVar('TESTVAR3', 'scenario3');503 checkVar('TESTGLOBALVAR', 'global');504 const customTags = { wstag: 'scenario3' };505 group('wsgroup', function() {506 const response = ws.connect('WSBIN_URL/ws-echo', { tags: customTags },507 function (socket) {508 socket.on('open', function() {509 socket.send('hello');510 });511 socket.on('message', function(msg) {512 if (msg != 'hello') {513 console.error("Expected to receive 'hello' but got '" + msg + "' instead!");514 errors.add(1);515 }516 socket.close()517 });518 socket.on('error', function (e) {519 console.log('ws error: ' + e.error());520 errors.add(1);521 });522 }523 );524 check(response, { 'status is 101': (r) => r && r.status === 101 }, customTags);525 });526 }527`)528 runner, err := js.New(&loader.SourceData{529 URL: &url.URL{Path: "/script.js"},530 Data: []byte(script)},531 nil, lib.RuntimeOptions{Env: map[string]string{"TESTGLOBALVAR": "global"}})532 require.NoError(t, err)533 logger := logrus.New()534 logger.SetOutput(testutils.NewTestOutput(t))535 execScheduler, err := NewExecutionScheduler(runner, logger)536 require.NoError(t, err)537 ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)538 defer cancel()539 samples := make(chan stats.SampleContainer)540 go func() {541 assert.NoError(t, execScheduler.Init(ctx, samples))542 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))543 close(samples)544 }()545 expectedTrailTags := []map[string]string{546 {"testtag1": "scenario1", "reqtag": "scenario1"},547 {"testtag2": "scenario2", "reqtag": "scenario2"},548 }549 expectedNetTrailTags := []map[string]string{550 {"testtag1": "scenario1"},551 {"testtag2": "scenario2"},552 }553 expectedConnSampleTags := map[string]string{554 "testtag3": "scenario3", "wstag": "scenario3",555 }556 expectedPlainSampleTags := []map[string]string{557 {"testtag3": "scenario3"},558 {"testtag3": "scenario3", "wstag": "scenario3"},559 }560 var gotSampleTags int561 for sample := range samples {562 switch s := sample.(type) {563 case stats.Sample:564 if s.Metric.Name == "errors" {565 assert.FailNow(t, "received error sample from test")566 }567 if s.Metric.Name == "checks" || s.Metric.Name == "group_duration" {568 tags := s.Tags.CloneTags()569 for _, expTags := range expectedPlainSampleTags {570 if reflect.DeepEqual(expTags, tags) {571 gotSampleTags++572 }573 }574 }575 case *httpext.Trail:576 tags := s.Tags.CloneTags()577 for _, expTags := range expectedTrailTags {578 if reflect.DeepEqual(expTags, tags) {579 gotSampleTags++580 }581 }582 case *netext.NetTrail:583 tags := s.Tags.CloneTags()584 for _, expTags := range expectedNetTrailTags {585 if reflect.DeepEqual(expTags, tags) {586 gotSampleTags++587 }588 }589 case stats.ConnectedSamples:590 for _, sm := range s.Samples {591 tags := sm.Tags.CloneTags()592 if reflect.DeepEqual(expectedConnSampleTags, tags) {593 gotSampleTags++594 }595 }596 }597 }598 require.Equal(t, 8, gotSampleTags, "received wrong amount of samples with expected tags")599}600func TestExecutionSchedulerSetupTeardownRun(t *testing.T) {601 t.Parallel()602 t.Run("Normal", func(t *testing.T) {603 setupC := make(chan struct{})604 teardownC := make(chan struct{})605 runner := &minirunner.MiniRunner{606 SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) {607 close(setupC)608 return nil, nil609 },610 TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error {611 close(teardownC)612 return nil613 },614 }615 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{})616 err := make(chan error, 1)617 go func() { err <- execScheduler.Run(ctx, ctx, samples) }()618 defer cancel()619 <-setupC620 <-teardownC621 assert.NoError(t, <-err)622 })623 t.Run("Setup Error", func(t *testing.T) {624 runner := &minirunner.MiniRunner{625 SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) {626 return nil, errors.New("setup error")627 },628 }629 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{})630 defer cancel()631 assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "setup error")632 })633 t.Run("Don't Run Setup", func(t *testing.T) {634 runner := &minirunner.MiniRunner{635 SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) {636 return nil, errors.New("setup error")637 },638 TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error {639 return errors.New("teardown error")640 },641 }642 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{643 NoSetup: null.BoolFrom(true),644 VUs: null.IntFrom(1),645 Iterations: null.IntFrom(1),646 })647 defer cancel()648 assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error")649 })650 t.Run("Teardown Error", func(t *testing.T) {651 runner := &minirunner.MiniRunner{652 SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) {653 return nil, nil654 },655 TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error {656 return errors.New("teardown error")657 },658 }659 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{660 VUs: null.IntFrom(1),661 Iterations: null.IntFrom(1),662 })663 defer cancel()664 assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error")665 })666 t.Run("Don't Run Teardown", func(t *testing.T) {667 runner := &minirunner.MiniRunner{668 SetupFn: func(ctx context.Context, out chan<- stats.SampleContainer) ([]byte, error) {669 return nil, nil670 },671 TeardownFn: func(ctx context.Context, out chan<- stats.SampleContainer) error {672 return errors.New("teardown error")673 },674 }675 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{676 NoTeardown: null.BoolFrom(true),677 VUs: null.IntFrom(1),678 Iterations: null.IntFrom(1),679 })680 defer cancel()681 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))682 })683}684func TestExecutionSchedulerStages(t *testing.T) {685 t.Parallel()686 testdata := map[string]struct {687 Duration time.Duration688 Stages []lib.Stage689 }{690 "one": {691 1 * time.Second,692 []lib.Stage{{Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(1)}},693 },694 "two": {695 2 * time.Second,696 []lib.Stage{697 {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(1)},698 {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(2)},699 },700 },701 "four": {702 4 * time.Second,703 []lib.Stage{704 {Duration: types.NullDurationFrom(1 * time.Second), Target: null.IntFrom(5)},705 {Duration: types.NullDurationFrom(3 * time.Second), Target: null.IntFrom(10)},706 },707 },708 }709 for name, data := range testdata {710 data := data711 t.Run(name, func(t *testing.T) {712 t.Parallel()713 runner := &minirunner.MiniRunner{714 Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error {715 time.Sleep(100 * time.Millisecond)716 return nil717 },718 }719 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{720 VUs: null.IntFrom(1),721 Stages: data.Stages,722 })723 defer cancel()724 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))725 assert.True(t, execScheduler.GetState().GetCurrentTestRunDuration() >= data.Duration)726 })727 }728}729func TestExecutionSchedulerEndTime(t *testing.T) {730 t.Parallel()731 runner := &minirunner.MiniRunner{732 Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error {733 time.Sleep(100 * time.Millisecond)734 return nil735 },736 }737 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, nil, lib.Options{738 VUs: null.IntFrom(10),739 Duration: types.NullDurationFrom(1 * time.Second),740 })741 defer cancel()742 endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan())743 assert.Equal(t, 31*time.Second, endTime) // because of the default 30s gracefulStop744 assert.True(t, isFinal)745 startTime := time.Now()746 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))747 runTime := time.Since(startTime)748 assert.True(t, runTime > 1*time.Second, "test did not take 1s")749 assert.True(t, runTime < 10*time.Second, "took more than 10 seconds")750}751func TestExecutionSchedulerRuntimeErrors(t *testing.T) {752 t.Parallel()753 runner := &minirunner.MiniRunner{754 Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error {755 time.Sleep(10 * time.Millisecond)756 return errors.New("hi")757 },758 Options: lib.Options{759 VUs: null.IntFrom(10),760 Duration: types.NullDurationFrom(1 * time.Second),761 },762 }763 logger, hook := logtest.NewNullLogger()764 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, logger, lib.Options{})765 defer cancel()766 endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan())767 assert.Equal(t, 31*time.Second, endTime) // because of the default 30s gracefulStop768 assert.True(t, isFinal)769 startTime := time.Now()770 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))771 runTime := time.Since(startTime)772 assert.True(t, runTime > 1*time.Second, "test did not take 1s")773 assert.True(t, runTime < 10*time.Second, "took more than 10 seconds")774 assert.NotEmpty(t, hook.Entries)775 for _, e := range hook.Entries {776 assert.Equal(t, "hi", e.Message)777 }778}779func TestExecutionSchedulerEndErrors(t *testing.T) {780 t.Parallel()781 exec := executor.NewConstantVUsConfig("we_need_hard_stop")782 exec.VUs = null.IntFrom(10)783 exec.Duration = types.NullDurationFrom(1 * time.Second)784 exec.GracefulStop = types.NullDurationFrom(0 * time.Second)785 runner := &minirunner.MiniRunner{786 Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error {787 <-ctx.Done()788 return errors.New("hi")789 },790 Options: lib.Options{791 Scenarios: lib.ScenarioConfigs{exec.GetName(): exec},792 },793 }794 logger, hook := logtest.NewNullLogger()795 ctx, cancel, execScheduler, samples := newTestExecutionScheduler(t, runner, logger, lib.Options{})796 defer cancel()797 endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan())798 assert.Equal(t, 1*time.Second, endTime) // because of the 0s gracefulStop799 assert.True(t, isFinal)800 startTime := time.Now()801 assert.NoError(t, execScheduler.Run(ctx, ctx, samples))802 runTime := time.Since(startTime)803 assert.True(t, runTime > 1*time.Second, "test did not take 1s")804 assert.True(t, runTime < 10*time.Second, "took more than 10 seconds")805 assert.Empty(t, hook.Entries)806}807func TestExecutionSchedulerEndIterations(t *testing.T) {808 t.Parallel()809 metric := &stats.Metric{Name: "test_metric"}810 options, err := executor.DeriveScenariosFromShortcuts(lib.Options{811 VUs: null.IntFrom(1),812 Iterations: null.IntFrom(100),813 })814 require.NoError(t, err)815 require.Empty(t, options.Validate())816 var i int64817 runner := &minirunner.MiniRunner{818 Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error {819 select {820 case <-ctx.Done():821 default:822 atomic.AddInt64(&i, 1)823 }824 out <- stats.Sample{Metric: metric, Value: 1.0}825 return nil826 },827 Options: options,828 }829 ctx, cancel := context.WithCancel(context.Background())830 defer cancel()831 logger := logrus.New()832 logger.SetOutput(testutils.NewTestOutput(t))833 execScheduler, err := NewExecutionScheduler(runner, logger)834 require.NoError(t, err)835 samples := make(chan stats.SampleContainer, 300)836 require.NoError(t, execScheduler.Init(ctx, samples))837 require.NoError(t, execScheduler.Run(ctx, ctx, samples))838 assert.Equal(t, uint64(100), execScheduler.GetState().GetFullIterationCount())839 assert.Equal(t, uint64(0), execScheduler.GetState().GetPartialIterationCount())840 assert.Equal(t, int64(100), i)841 require.Equal(t, 100, len(samples)) // TODO: change to 200 https://github.com/loadimpact/k6/issues/1250842 for i := 0; i < 100; i++ {843 mySample, ok := <-samples844 require.True(t, ok)845 assert.Equal(t, stats.Sample{Metric: metric, Value: 1.0}, mySample)846 }847}848func TestExecutionSchedulerIsRunning(t *testing.T) {849 t.Parallel()850 runner := &minirunner.MiniRunner{851 Fn: func(ctx context.Context, out chan<- stats.SampleContainer) error {852 <-ctx.Done()853 return nil854 },855 }856 ctx, cancel, execScheduler, _ := newTestExecutionScheduler(t, runner, nil, lib.Options{})857 state := execScheduler.GetState()858 err := make(chan error)859 go func() { err <- execScheduler.Run(ctx, ctx, nil) }()860 for !state.HasStarted() {861 time.Sleep(10 * time.Microsecond)862 }863 cancel()864 for !state.HasEnded() {865 time.Sleep(10 * time.Microsecond)866 }867 assert.NoError(t, <-err)868}869func TestRealTimeAndSetupTeardownMetrics(t *testing.T) {870 if runtime.GOOS == "windows" {...
newTestExecutionScheduler
Using AI Code Generation
1import (2func main() {3 gomega.RegisterFailHandler(ginkgo.Fail)4 ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Test Suite", []ginkgo.Reporter{reporters.NewJUnitReporter("junit.xml")})5}6var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {7 fmt.Println("SynchronizedBeforeSuite - Setup")8}, func(data []byte) {9 fmt.Println("SynchronizedBeforeSuite - Setup - Done")10})11var _ = ginkgo.SynchronizedAfterSuite(func() {12 fmt.Println("SynchronizedAfterSuite - Teardown")13}, func() {14 fmt.Println("SynchronizedAfterSuite - Teardown - Done")15})16var _ = ginkgo.Describe("Test Suite", func() {17 ginkgo.It("Test Case 1", func() {18 fmt.Println("Test Case 1")19 })20 ginkgo.It("Test Case 2", func() {21 fmt.Println("Test Case 2")22 })23 ginkgo.It("Test Case 3", func() {24 fmt.Println("Test Case 3")25 })26 ginkgo.It("Test Case 4", func() {27 fmt.Println("Test Case 4")28 })29 ginkgo.It("Test Case 5", func() {30 fmt.Println("Test Case 5")31 })32})33import (
newTestExecutionScheduler
Using AI Code Generation
1import (2func main() {3 fmt.Println("Hello, playground")4}5import (6func main() {7 fmt.Println("Hello, playground")8}9import (10func main() {11 fmt.Println("Hello, playground")12}13import (14func main() {15 fmt.Println("Hello, playground")16}17import (18func main() {19 fmt.Println("Hello, playground")20}21import (22func main() {23 fmt.Println("Hello, playground")24}25import (26func main() {27 fmt.Println("Hello, playground")28}29import (30func main() {31 fmt.Println("Hello, playground")32}33import (34func main() {35 fmt.Println("Hello, playground")36}37import (38func main() {39 fmt.Println("Hello, playground")40}41import (42func main() {43 fmt.Println("Hello, playground")44}45import (46func main() {47 fmt.Println("Hello, playground")48}49import (
newTestExecutionScheduler
Using AI Code Generation
1func main() {2 tests := newTestExecutionScheduler()3 tests.runTests()4}5func newTestExecutionScheduler() *tests {6 return &tests{}7}8func (t *tests) runTests() {9 fmt.Println("tests are running")10}11type tests struct {12}13type TestExecutionScheduler interface {14 runTests()15}16Your name to display (optional):17Your name to display (optional):18func main()19func main() {20}21func main() {22}23func main() {24}25func main() {26}27func main() {28}
newTestExecutionScheduler
Using AI Code Generation
1import (2func main() {3 testScheduler := test.NewTestExecutionScheduler()4 fmt.Println(testScheduler)5}6import (7func main() {8 testScheduler := test.NewTestExecutionScheduler()9 fmt.Println(testScheduler)10}11&{[]}12&{[]}
newTestExecutionScheduler
Using AI Code Generation
1import (2func main() {3 c := cron.New()4 c.AddFunc("0 0 0 * * *", func() { fmt.Println("Every day at midnight") })5 c.AddFunc("@hourly", func() { fmt.Println("Every hour") })6 c.AddFunc("@every 1m30s", func() { fmt.Println("Every minute thirty") })7 c.Start()8 time.Sleep(5 * time.Second)9 c.Stop()10}
newTestExecutionScheduler
Using AI Code Generation
1func main() {2 tests := make([]*Test, 0)3 tests = append(tests, NewTest("Test 1", 5))4 tests = append(tests, NewTest("Test 2", 10))5 tests = append(tests, NewTest("Test 3", 20))6 tests = append(tests, NewTest("Test 4", 5))7 tests = append(tests, NewTest("Test 5", 10))8 tests = append(tests, NewTest("Test 6", 20))9 tests = append(tests, NewTest("Test 7", 5))10 tests = append(tests, NewTest("Test 8", 10))11 tests = append(tests, NewTest("Test 9", 20))12 tests = append(tests, NewTest("Test 10", 5))13 tests = append(tests, NewTest("Test 11", 10))14 tests = append(tests, NewTest("Test 12", 20))15 testExecutionScheduler := NewTestExecutionScheduler(tests)16 testExecutionScheduler.run()17}18import (19type Test struct {20}21func NewTest(name string, executionTime int) *Test {22 return &Test{name: name, executionTime: executionTime}23}24type TestExecutionScheduler struct {25}26func NewTestExecutionScheduler(tests []*Test) *TestExecutionScheduler {27 return &TestExecutionScheduler{tests: tests}28}29func (t *TestExecutionScheduler) run() {30 fmt.Println("test execution started")31 for _, test := range t.tests {32 fmt.Printf("test %s execution started33 t.wg.Add(1)34 go func(test *Test) {35 time.Sleep(time.Duration(test.executionTime) * time.Second)36 fmt.Printf("test %s execution completed37 t.wg.Done()38 }(test)39 }40 t.wg.Wait()41 fmt.Println("test execution completed")42}
newTestExecutionScheduler
Using AI Code Generation
1import (2func main() {3 fmt.Println("Hello, playground")4}5import (6func main() {7 fmt.Println("Hello, playground")8 tests.NewTestExecutionScheduler()9}10./1.go:11: cannot use cmd.NewTestExecutionScheduler() (type *tests.TestExecutionScheduler) as type "github.com/openshift/origin/pkg/oc/cli/cmd/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util".Factory in return argument:11 *tests.TestExecutionScheduler does not implement "github.com/openshift/origin/pkg/oc/cli/cmd/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util".Factory (wrong type for NewBuilder method)12 have NewBuilder() *"github.com/openshift/origin/pkg/oc/cli/cmd/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util".Builder13 want NewBuilder() *"github.com/openshift/origin/pkg/oc/cli/cmd/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util".Builder14./1.go:11: cannot use cmd.NewTestExecutionScheduler() (type *tests.TestExecutionScheduler) as type "github.com/openshift/origin/pkg/oc/cli/cmd/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util".Factory in return argument:15 *tests.TestExecutionScheduler does not implement "github.com/openshift/origin/pkg/oc/cli/cmd/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util".Factory (wrong type for NewBuilder method)16 have NewBuilder() *"github.com/openshift/origin/pkg/oc/cli/cmd/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util".Builder17 want NewBuilder() *"github.com/openshift/origin/pkg/oc/cli/cmd/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util".Builder
newTestExecutionScheduler
Using AI Code Generation
1import (2func main() {3 tests := newTestExecutionScheduler()4 tests.addTest("Test 1", func() {5 time.Sleep(2 * time.Second)6 fmt.Println("Test 1")7 })8 tests.addTest("Test 2", func() {9 time.Sleep(1 * time.Second)10 fmt.Println("Test 2")11 })12 tests.addTest("Test 3", func() {13 time.Sleep(3 * time.Second)14 fmt.Println("Test 3")15 })16 tests.runTests()17}
newTestExecutionScheduler
Using AI Code Generation
1var testExecutionScheduler = tests.newTestExecutionScheduler();2var testExecution = testExecutionScheduler.createNewTestExecution();3testExecution.setTestSuite(testSuite);4testExecution.setTestEnvironment(testEnvironment);5testExecution.setTestExecutionType(testExecutionType);6testExecution.setTestExecutionMode(testExecutionMode);7testExecution.setTestExecutionName(testExecutionName);8testExecution.setTestExecutionDescription(testExecutionDescription);9testExecution.setTestExecutionOwner(testExecutionOwner);10testExecution.setTestExecutionStartDate(testExecutionStartDate);11testExecution.setTestExecutionEndDate(testExecutionEndDate);12testExecution.setTestExecutionStatus(testExecutionStatus);13testExecution.setTestExecutionResult(testExecutionResult);14testExecution.setTestExecutionResultSummary(testExecutionResultSummary);15testExecution.setTestExecutionResultDetails(test
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!