Best Testkube code snippet using workerpool.TestWorkerPool
clusterautoscaler.go
Source:clusterautoscaler.go
1// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file2//3// Licensed under the Apache License, Version 2.0 (the "License");4// you may not use this file except in compliance with the License.5// You may obtain a copy of the License at6//7// http://www.apache.org/licenses/LICENSE-2.08//9// Unless required by applicable law or agreed to in writing, software10// distributed under the License is distributed on an "AS IS" BASIS,11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12// See the License for the specific language governing permissions and13// limitations under the License.14/**15 Overview16 - Tests Shoot cluster autoscaling17 AfterSuite18 - Cleanup Workload in Shoot19 Test:20 1. Create a Deployment with affinity that does not allow Pods to be co-located in the same Node21 2. Scale up the Deployment and see one Node added (because of the Pod affinity)22 3. Scale down the Deployment and see one Node removed (after spec.kubernetes.clusterAutoscaler.scaleDownUnneededTime|scaleDownDelayAfterAdd)23 Expected Output24 - Scale-up/down should work properly25 **/26package operations27import (28 "context"29 "time"30 corev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"31 "github.com/gardener/gardener/pkg/client/kubernetes"32 kutil "github.com/gardener/gardener/pkg/utils/kubernetes"33 "github.com/gardener/gardener/test/framework"34 "github.com/gardener/gardener/test/framework/resources/templates"35 "github.com/onsi/ginkgo"36 "github.com/onsi/gomega"37 appsv1 "k8s.io/api/apps/v1"38 corev1 "k8s.io/api/core/v1"39 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"40 "sigs.k8s.io/controller-runtime/pkg/client"41)42const (43 podAntiAffinityDeploymentName = "pod-anti-affinity"44 podAntiAffinityDeploymentNamespace = metav1.NamespaceDefault45 scaleDownDelayAfterAdd = 1 * time.Minute46 scaleDownUnneededTime = 1 * time.Minute47 testTimeout = 60 * time.Minute48 scaleUpTimeout = 20 * time.Minute49 scaleDownTimeout = 20 * time.Minute50 cleanupTimeout = 20 * time.Minute51)52var _ = ginkgo.Describe("Shoot clusterautoscaler testing", func() {53 var (54 f = framework.NewShootFramework(nil)55 testWorkerPoolName = "ca-test"56 origClusterAutoscalerConfig *corev1beta1.ClusterAutoscaler57 origWorkers []corev1beta1.Worker58 origMinWorkers int3259 origMaxWorkers int3260 )61 f.Default().Serial().CIt("should autoscale a single worker group", func(ctx context.Context) {62 var (63 shoot = f.Shoot64 workerPoolName = shoot.Spec.Provider.Workers[0].Name65 )66 origClusterAutoscalerConfig = shoot.Spec.Kubernetes.ClusterAutoscaler.DeepCopy()67 origMinWorkers = shoot.Spec.Provider.Workers[0].Minimum68 origMaxWorkers = shoot.Spec.Provider.Workers[0].Maximum69 ginkgo.By("updating shoot spec for test")70 // set clusterautoscaler params to lower values so we don't have to wait too long71 // and ensure the worker pool has maximum > minimum72 err := f.UpdateShoot(ctx, func(s *corev1beta1.Shoot) error {73 if s.Spec.Kubernetes.ClusterAutoscaler == nil {74 s.Spec.Kubernetes.ClusterAutoscaler = &corev1beta1.ClusterAutoscaler{}75 }76 s.Spec.Kubernetes.ClusterAutoscaler.ScaleDownDelayAfterAdd = &metav1.Duration{Duration: scaleDownDelayAfterAdd}77 s.Spec.Kubernetes.ClusterAutoscaler.ScaleDownUnneededTime = &metav1.Duration{Duration: scaleDownUnneededTime}78 if origMaxWorkers != origMinWorkers+1 {79 s.Spec.Provider.Workers[0].Maximum = origMinWorkers + 180 }81 return nil82 })83 framework.ExpectNoError(err)84 ginkgo.By("creating pod-anti-affinity deployment")85 values := podAntiAffinityValues{86 Name: podAntiAffinityDeploymentName,87 Namespace: podAntiAffinityDeploymentNamespace,88 Replicas: origMinWorkers,89 WorkerPool: workerPoolName,90 }91 err = f.RenderAndDeployTemplate(ctx, f.ShootClient, templates.PodAntiAffinityDeploymentName, values)92 framework.ExpectNoError(err)93 err = f.WaitUntilDeploymentIsReady(ctx, values.Name, values.Namespace, f.ShootClient)94 framework.ExpectNoError(err)95 ginkgo.By("scaling up pod-anti-affinity deployment")96 err = kubernetes.ScaleDeployment(ctx, f.ShootClient.Client(), client.ObjectKey{Namespace: values.Namespace, Name: values.Name}, origMinWorkers+1)97 framework.ExpectNoError(err)98 ginkgo.By("one node should be added to the worker pool")99 err = framework.WaitForNNodesToBeHealthyInWorkerPool(ctx, f.ShootClient, int(origMinWorkers+1), &workerPoolName, scaleUpTimeout)100 framework.ExpectNoError(err)101 ginkgo.By("pod-anti-affinity deployment should get healthy again")102 err = f.WaitUntilDeploymentIsReady(ctx, values.Name, values.Namespace, f.ShootClient)103 framework.ExpectNoError(err)104 ginkgo.By("scaling down pod-anti-affinity deployment")105 err = kubernetes.ScaleDeployment(ctx, f.ShootClient.Client(), client.ObjectKey{Namespace: values.Namespace, Name: values.Name}, origMinWorkers)106 framework.ExpectNoError(err)107 ginkgo.By("one node should be removed from the worker pool")108 err = framework.WaitForNNodesToBeHealthyInWorkerPool(ctx, f.ShootClient, int(origMinWorkers), &workerPoolName, scaleDownTimeout)109 framework.ExpectNoError(err)110 }, testTimeout, framework.WithCAfterTest(func(ctx context.Context) {111 ginkgo.By("reverting shoot spec changes by test")112 err := f.UpdateShoot(ctx, func(s *corev1beta1.Shoot) error {113 s.Spec.Kubernetes.ClusterAutoscaler = origClusterAutoscalerConfig114 s.Spec.Provider.Workers[0].Maximum = origMaxWorkers115 return nil116 })117 framework.ExpectNoError(err)118 ginkgo.By("deleting pod-anti-affinity deployment")119 err = kutil.DeleteObject(ctx, f.ShootClient.Client(), &appsv1.Deployment{120 ObjectMeta: metav1.ObjectMeta{121 Name: podAntiAffinityDeploymentName,122 Namespace: podAntiAffinityDeploymentNamespace,123 },124 })125 framework.ExpectNoError(err)126 }, cleanupTimeout))127 f.Default().Serial().CIt("should autoscale a single worker group to/from zero", func(ctx context.Context) {128 var shoot = f.Shoot129 origClusterAutoscalerConfig = shoot.Spec.Kubernetes.ClusterAutoscaler.DeepCopy()130 origWorkers = shoot.Spec.Provider.Workers131 if shoot.Spec.Provider.Type != "aws" && shoot.Spec.Provider.Type != "azure" {132 ginkgo.Skip("not applicable")133 }134 // Create a dedicated worker-pool for cluster autoscaler.135 testWorkerPool := origWorkers[0]136 testWorkerPool.Name = testWorkerPoolName137 testWorkerPool.Minimum = 0138 testWorkerPool.Maximum = 2139 testWorkerPool.Taints = []corev1.Taint{140 {141 Key: testWorkerPoolName,142 Effect: corev1.TaintEffectNoSchedule,143 Value: testWorkerPoolName,144 },145 }146 ginkgo.By("updating shoot spec for test")147 err := f.UpdateShoot(ctx, func(s *corev1beta1.Shoot) error {148 s.Spec.Provider.Workers = append(shoot.Spec.Provider.Workers, testWorkerPool)149 if s.Spec.Kubernetes.ClusterAutoscaler == nil {150 s.Spec.Kubernetes.ClusterAutoscaler = &corev1beta1.ClusterAutoscaler{}151 }152 s.Spec.Kubernetes.ClusterAutoscaler.ScaleDownDelayAfterAdd = &metav1.Duration{Duration: scaleDownDelayAfterAdd}153 s.Spec.Kubernetes.ClusterAutoscaler.ScaleDownUnneededTime = &metav1.Duration{Duration: scaleDownUnneededTime}154 return nil155 })156 framework.ExpectNoError(err)157 nodeList, err := framework.GetAllNodesInWorkerPool(ctx, f.ShootClient, &testWorkerPoolName)158 framework.ExpectNoError(err)159 nodeCount := len(nodeList.Items)160 gomega.Expect(nodeCount).To(gomega.BeEquivalentTo(testWorkerPool.Minimum), "shoot should have minimum node count before the test")161 ginkgo.By("creating pod-anti-affinity deployment")162 values := podAntiAffinityValues{163 Name: podAntiAffinityDeploymentName,164 Namespace: podAntiAffinityDeploymentNamespace,165 Replicas: 0, // This is to test the scale-from-zero.166 WorkerPool: testWorkerPoolName,167 TolerationKey: testWorkerPoolName,168 }169 err = f.RenderAndDeployTemplate(ctx, f.ShootClient, templates.PodAntiAffinityDeploymentName, values)170 framework.ExpectNoError(err)171 err = f.WaitUntilDeploymentIsReady(ctx, values.Name, values.Namespace, f.ShootClient)172 framework.ExpectNoError(err)173 ginkgo.By("scaling up pod-anti-affinity deployment")174 err = kubernetes.ScaleDeployment(ctx, f.ShootClient.Client(), client.ObjectKey{Namespace: values.Namespace, Name: values.Name}, 1)175 framework.ExpectNoError(err)176 ginkgo.By("one node should be added to the worker pool")177 err = framework.WaitForNNodesToBeHealthyInWorkerPool(ctx, f.ShootClient, 1, &testWorkerPoolName, scaleUpTimeout)178 framework.ExpectNoError(err)179 ginkgo.By("pod-anti-affinity deployment should get healthy again")180 err = f.WaitUntilDeploymentIsReady(ctx, values.Name, values.Namespace, f.ShootClient)181 framework.ExpectNoError(err)182 ginkgo.By("scaling down pod-anti-affinity deployment")183 err = kubernetes.ScaleDeployment(ctx, f.ShootClient.Client(), client.ObjectKey{Namespace: values.Namespace, Name: values.Name}, 0)184 framework.ExpectNoError(err)185 ginkgo.By("worker pool should be scaled-down to 0")186 err = framework.WaitForNNodesToBeHealthyInWorkerPool(ctx, f.ShootClient, 0, &testWorkerPoolName, scaleDownTimeout)187 framework.ExpectNoError(err)188 }, testTimeout, framework.WithCAfterTest(func(ctx context.Context) {189 ginkgo.By("reverting shoot spec changes by test")190 err := f.UpdateShoot(ctx, func(s *corev1beta1.Shoot) error {191 s.Spec.Kubernetes.ClusterAutoscaler = origClusterAutoscalerConfig192 for i, worker := range s.Spec.Provider.Workers {193 if worker.Name == testWorkerPoolName {194 // Remove the dedicated ca-test workerpool195 s.Spec.Provider.Workers[i] = s.Spec.Provider.Workers[len(s.Spec.Provider.Workers)-1]196 s.Spec.Provider.Workers = s.Spec.Provider.Workers[:len(s.Spec.Provider.Workers)-1]197 break198 }199 }200 return nil201 })202 framework.ExpectNoError(err)203 ginkgo.By("deleting pod-anti-affinity deployment")204 err = kutil.DeleteObject(ctx, f.ShootClient.Client(), &appsv1.Deployment{205 ObjectMeta: metav1.ObjectMeta{206 Name: podAntiAffinityDeploymentName,207 Namespace: podAntiAffinityDeploymentNamespace,208 },209 })210 framework.ExpectNoError(err)211 }, cleanupTimeout))212})213type podAntiAffinityValues struct {214 Name string215 Namespace string216 Replicas int32217 WorkerPool string218 TolerationKey string219}...
worker-pool_test.go
Source:worker-pool_test.go
1package tools2import "testing"3func TestWorkerPool(t *testing.T) {4 t.Run("should Not create a worker if number of workers to create is less than one", func(t *testing.T) {5 numberOfWorkers := 06 _, err := NewWorker(uint8(numberOfWorkers))7 if err == nil {8 t.Fatalf("should not have createe an instance of the worker %v", err)9 }10 })11 t.Run("should Acquire an worker and successfully release it back to the pool", func(t *testing.T) {12 numberOfWorkers := 513 workerPool, err := NewWorker(uint8(numberOfWorkers))14 if err != nil {15 t.Fatalf("failed to create an instance of the worker %v", err)16 }17 releaseFirstWorker := workerPool.Acquire()...
workerpool_suite_test.go
Source:workerpool_suite_test.go
1package workerpool_test2import (3 "testing"4 . "github.com/onsi/ginkgo"5 . "github.com/onsi/gomega"6)7func TestWorkerpool(t *testing.T) {8 RegisterFailHandler(Fail)9 RunSpecs(t, "Workerpool Suite")10}...
TestWorkerPool
Using AI Code Generation
1import (2func main() {3 wp := workerpool.New(10)4 for i := 0; i < 100; i++ {5 wp.Submit(func() {6 time.Sleep(1 * time.Second)7 fmt.Println("task", i)8 })9 }10 wp.StopWait()11}
TestWorkerPool
Using AI Code Generation
1import (2func main() {3 var wp = workerpool.New(10)4 for i := 0; i < 10; i++ {5 wp.Submit(func() {6 fmt.Println("Hello, World")7 })8 }9 wp.StopWait()10}11WorkerPool.Submit() Method12type WorkerPool struct {13}14func (wp *WorkerPool) Submit(task func())15WorkerPool.Submit() Method Example16import (17func main() {18 var wp = workerpool.New(10)19 for i := 0; i < 10; i++ {20 wp.Submit(func() {21 fmt.Println("Hello, World")22 })23 }24 wp.StopWait()25}26WorkerPool.StopWait() Method27type WorkerPool struct {28}
TestWorkerPool
Using AI Code Generation
1import (2func main() {3 fmt.Println("Hello World!")4 workerpool.TestWorkerPool()5}6import (7type WorkerPool struct {8}9type Job struct {10 Payload interface{}11}12func NewWorkerPool(maxWorker int, maxQueue int) *WorkerPool {13 return &WorkerPool{14 Worker: make(chan chan Job, maxWorker),15 Queue: make(chan Job, maxQueue),16 }17}18func (wp *WorkerPool) Start() {19 for i := 0; i < wp.MaxWorker; i++ {20 worker := NewWorker(wp.Worker)21 worker.Start()22 }23 go wp.dispatch()24}25func (wp *WorkerPool) dispatch() {26 for {27 select {28 go func(job Job) {29 }(job)30 }31 }32}33func (wp *WorkerPool) Run(job Job) {34}35func (wp *WorkerPool) Stop() {36 close(wp.Queue)37 close(wp.Worker)38}39type Worker struct {40}41func NewWorker(workerPool chan chan Job) Worker {42 return Worker{43 JobChannel: make(chan Job),44 quit: make(chan bool),45 }46}47func (w *Worker) Start() {48 go func() {49 for {50 select {51 fmt.Println("Worker: received a job")52 time.Sleep(time.Second * 1)53 fmt.Println("Worker: processed job")
TestWorkerPool
Using AI Code Generation
1import (2func main() {3 wp := workerpool.New(10)4 for i := 0; i < 20; i++ {5 wp.Submit(func() {6 time.Sleep(10 * time.Second)7 fmt.Println("done")8 })9 }10 wp.StopWait()11}12import (13func main() {14 wp := workerpool.New(10)15 for i := 0; i < 20; i++ {16 wp.Submit(func() {17 time.Sleep(10 * time.Second)18 fmt.Println("done")19 })20 }21 wp.StopWait()22}23import (24func main() {25 wp := workerpool.New(10)26 for i := 0; i < 20; i++ {27 wp.Submit(func() {28 time.Sleep(10 * time.Second)29 fmt.Println("done")30 })31 }32 wp.StopWait()33}34import (
TestWorkerPool
Using AI Code Generation
1import (2func main() {3 workerpool.TestWorkerPool()4 fmt.Println("Hello World")5}6import (7func TestWorkerPool() {8 pool := New(2, 100)9 for i := 0; i < 20; i++ {10 job := func() {11 fmt.Println("Hello World")12 }13 }14 pool.Shutdown()15}16import (17type WorkerPool struct {18}19type Job func()20func New(workerCount, jobQueueSize int) *WorkerPool {21 p := &WorkerPool{22 JobQueue: make(chan Job, jobQueueSize),23 quit: make(chan bool),24 }25 p.wg.Add(workerCount)26 for i := 0; i < workerCount; i++ {27 go p.worker(i)28 }29}30func (p *WorkerPool) worker(id int) {31 defer p.wg.Done()32 for {33 select {34 job()35 }36 }37}38func (p *WorkerPool) Shutdown() {39 close(p.quit)40 p.wg.Wait()41}42func TestWorkerPool() {43 pool := New(2, 100)44 for i := 0; i < 20; i++ {45 job := func() {46 fmt.Println("Hello World")47 }
TestWorkerPool
Using AI Code Generation
1import (2func main() {3 wp := workerpool.New(10)4 for i := 0; i < 100; i++ {5 wp.Submit(func() {6 fmt.Println("task", i, "started")7 fmt.Println("task", i, "finished")8 })9 }10 wp.StopWait()11}
TestWorkerPool
Using AI Code Generation
1import (2func main() {3 wp := workerpool.NewWorkerPool(2)4 tasks := make([]workerpool.Task, 5)5 for i := 0; i < 5; i++ {6 tasks[i] = workerpool.Task{7 Name: fmt.Sprintf("Task %d", i),8 Func: func() error {9 fmt.Println("Running task")10 },11 }12 }13 wp.TestWorkerPool(tasks)14}
TestWorkerPool
Using AI Code Generation
1import (2func main() {3 wp := workerpool.NewWorkerPool(10)4 ch := make(chan int)5 wp.TestWorkerPool(ch)6 for i := range ch {7 fmt.Println("workerpool output:", i)8 }9}
TestWorkerPool
Using AI Code Generation
1func main() {2 wg.Add(1)3 jobs = make(chan Job, 5)4 wp = NewWorkerPool(jobs, 5)5 wp.Start()6 go TestWorkerPool(wp, &wg)7 wg.Wait()8}9func main() {10 wg.Add(1)11 jobs = make(chan Job, 5)12 wp = NewWorkerPool(jobs, 5)13 wp.Start()14 go TestWorkerPool(wp, &wg)15 wg.Wait()16}17func main() {18 wg.Add(1)19 jobs = make(chan Job, 5)20 wp = NewWorkerPool(jobs, 5)21 wp.Start()22 go TestWorkerPool(wp, &wg)23 wg.Wait()24}25func main() {26 wg.Add(1)27 jobs = make(chan Job, 5)28 wp = NewWorkerPool(jobs, 5)29 wp.Start()30 go TestWorkerPool(wp, &wg)31 wg.Wait()32}33func main() {34 wg.Add(1)35 jobs = make(chan Job, 5)36 wp = NewWorkerPool(jobs, 5)37 wp.Start()38 go TestWorkerPool(wp, &wg)
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!