Best Python code snippet using Kiwi_python
test_optimizers.py
Source:test_optimizers.py
...38 def test_set_initial(self):39 self.optimizer.set_initial([1, 1])40 self.assertTupleEqual(self.optimizer._initial.shape, (1, 2), msg="Invalid shape of initial points array")41 self.assertTrue(np.allclose(self.optimizer._initial, 1), msg="Specified initial point not loaded.")42 def test_set_domain(self):43 self.optimizer.domain = gpflowopt.domain.UnitCube(3)44 self.assertNotEqual(self.optimizer.domain, self.domain)45 self.assertEqual(self.optimizer.domain, gpflowopt.domain.UnitCube(3))46 self.assertTrue(np.allclose(self.optimizer.get_initial(), 0.5))47class TestCandidateOptimizer(_TestOptimizer, GPflowOptTestCase):48 def setUp(self):49 super(TestCandidateOptimizer, self).setUp()50 design = gpflowopt.design.FactorialDesign(4, self.domain)51 self.optimizer = gpflowopt.optim.CandidateOptimizer(self.domain, design.generate())52 def test_default_initial(self):53 self.assertTupleEqual(self.optimizer._initial.shape, (0, 2), msg="Invalid shape of initial points array")54 def test_set_initial(self):55 # When run separately this test works, however when calling nose to run all tests on python 2.7 this records56 # no warnings57 with warnings.catch_warnings(record=True) as w:58 self.optimizer.set_initial([1, 1])59 assert len(w) == 160 assert issubclass(w[-1].category, UserWarning)61 def test_object_integrity(self):62 self.assertTupleEqual(self.optimizer.candidates.shape, (16, 2), msg="Invalid shape of candidate property.")63 self.assertTupleEqual(self.optimizer._get_eval_points().shape, (16, 2))64 self.assertTupleEqual(self.optimizer.get_initial().shape, (0, 2), msg="Invalid shape of initial points")65 self.assertFalse(self.optimizer.gradient_enabled(), msg="CandidateOptimizer supports no gradients.")66 def test_set_domain(self):67 with self.assertRaises(AssertionError):68 super(TestCandidateOptimizer, self).test_set_domain()69 self.optimizer.domain = gpflowopt.domain.UnitCube(2)70 self.assertNotEqual(self.optimizer.domain, self.domain)71 self.assertEqual(self.optimizer.domain, gpflowopt.domain.UnitCube(2))72 rescaled_candidates = gpflowopt.design.FactorialDesign(4, gpflowopt.domain.UnitCube(2)).generate()73 self.assertTrue(np.allclose(self.optimizer.candidates, rescaled_candidates))74 def test_optimize(self):75 self.optimizer.candidates = np.vstack((self.optimizer.candidates, np.zeros((1,2))))76 result = self.optimizer.optimize(parabola2d)77 self.assertTrue(result.success, msg="Optimization should succeed.")78 self.assertTrue(np.allclose(result.x, 0), msg="Optimum should be identified")79 self.assertTrue(np.allclose(result.fun, 0), msg="Function value in optimum is 0")80 self.assertEqual(result.nfev, 17, msg="Number of function evaluations equals candidates + initial points")81 def test_optimize_second(self):82 result = self.optimizer.optimize(parabola2d)83 self.assertGreater(result.fun, 0, msg="Optimum is not amongst candidates and initial points")84 self.assertLess(result.fun, 2, msg="Function value not reachable within domain")85class TestSciPyOptimizer(_TestOptimizer, GPflowOptTestCase):86 def setUp(self):87 super(TestSciPyOptimizer, self).setUp()88 self.optimizer = gpflowopt.optim.SciPyOptimizer(self.domain, maxiter=10)89 def test_object_integrity(self):90 self.assertDictEqual(self.optimizer.config, {'tol': None, 'method': 'L-BFGS-B',91 'options': {'maxiter': 10, 'disp': False}},92 msg="Config dict contains invalid entries.")93 self.assertTrue(self.optimizer.gradient_enabled(), msg="Gradient is supported.")94 def test_optimize(self):95 self.optimizer.set_initial([-1, -1])96 result = self.optimizer.optimize(parabola2d)97 self.assertTrue(result.success)98 self.assertLessEqual(result.nit, 10, "Only 10 Iterations permitted")99 self.assertLessEqual(result.nfev, 20, "Max 20 evaluations permitted")100 self.assertTrue(np.allclose(result.x, 0), msg="Optimizer failed to find optimum")101 self.assertTrue(np.allclose(result.fun, 0), msg="Incorrect function value returned")102 def test_optimizer_interrupt(self):103 self.optimizer.set_initial([-1, -1])104 result = self.optimizer.optimize(KeyboardRaiser(2, parabola2d))105 self.assertFalse(result.success, msg="After one evaluation, a keyboard interrupt is raised, "106 "non-succesfull result expected.")107 self.assertFalse(np.allclose(result.x, 0), msg="After one iteration, the optimum will not be found")108class TestStagedOptimizer(_TestOptimizer, GPflowOptTestCase):109 def setUp(self):110 super(TestStagedOptimizer, self).setUp()111 self.optimizer = gpflowopt.optim.StagedOptimizer([gpflowopt.optim.MCOptimizer(self.domain, 5),112 gpflowopt.optim.MCOptimizer(self.domain, 5),113 gpflowopt.optim.SciPyOptimizer(self.domain, maxiter=10)])114 def test_default_initial(self):115 self.assertTupleEqual(self.optimizer.optimizers[0]._initial.shape, (0,2))116 def test_set_initial(self):117 self.optimizer.set_initial([1, 1])118 self.assertTupleEqual(self.optimizer.optimizers[0]._initial.shape, (0, 2))119 self.assertTupleEqual(self.optimizer.optimizers[1]._initial.shape, (0, 2))120 self.assertTupleEqual(self.optimizer.optimizers[2]._initial.shape, (1, 2))121 self.assertTupleEqual(self.optimizer.get_initial().shape, (0, 2))122 def test_object_integrity(self):123 self.assertEqual(len(self.optimizer.optimizers), 3, msg="Two optimizers expected in optimizerlist")124 self.assertFalse(self.optimizer.gradient_enabled(), msg="MCOptimizer supports no gradients => neither "125 "does stagedoptimizer.")126 def test_optimize(self):127 with warnings.catch_warnings():128 warnings.filterwarnings("ignore", category=UserWarning)129 result = self.optimizer.optimize(parabola2d)130 self.assertTrue(result.success)131 self.assertLessEqual(result.nfev, 20, "Only 20 Iterations permitted")132 self.assertTrue(np.allclose(result.x, 0), msg="Optimizer failed to find optimum")133 self.assertTrue(np.allclose(result.fun, 0), msg="Incorrect function value returned")134 def test_optimizer_interrupt(self):135 with warnings.catch_warnings():136 warnings.filterwarnings("ignore", category=UserWarning)137 result = self.optimizer.optimize(KeyboardRaiser(0, parabola2d))138 self.assertFalse(result.success, msg="non-succesfull result expected.")139 self.assertEqual(result.nstages, 1, msg="Stage 2 should be in progress during interrupt")140 self.assertEqual(result.nfev, 0)141 result = self.optimizer.optimize(KeyboardRaiser(3, parabola2d))142 self.assertFalse(result.success, msg="non-succesfull result expected.")143 self.assertFalse(np.allclose(result.x, 0.0), msg="The optimum will not be found")144 self.assertEqual(result.nstages, 2, msg="Stage 2 should be in progress during interrupt")145 self.assertEqual(result.nfev, 5)146 result = self.optimizer.optimize(KeyboardRaiser(12, parabola2d))147 self.assertFalse(result.success, msg="non-succesfull result expected.")148 self.assertEqual(result.nfev, 12)149 self.assertFalse(np.allclose(result.x[0, :], 0.0), msg="The optimum should not be found yet")150 self.assertEqual(result.nstages, 3, msg="Stage 3 should be in progress during interrupt")151 def test_set_domain(self):152 super(TestStagedOptimizer, self).test_set_domain()153 for opt in self.optimizer.optimizers:154 self.assertEqual(opt.domain, gpflowopt.domain.UnitCube(3))155class TestBayesianOptimizer(_TestOptimizer, GPflowOptTestCase):156 def setUp(self):157 super(TestBayesianOptimizer, self).setUp()158 acquisition = gpflowopt.acquisition.ExpectedImprovement(create_parabola_model(self.domain))159 self.optimizer = gpflowopt.BayesianOptimizer(self.domain, acquisition)160 def test_default_initial(self):161 self.assertTupleEqual(self.optimizer._initial.shape, (0, 2), msg="Invalid shape of initial points array")162 def test_optimize(self):163 for verbose in [False, True]:164 with self.test_session():165 acquisition = gpflowopt.acquisition.ExpectedImprovement(create_parabola_model(self.domain))166 optimizer = gpflowopt.BayesianOptimizer(self.domain, acquisition, verbose=verbose)167 result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=20)168 self.assertTrue(result.success)169 self.assertEqual(result.nfev, 20, "Only 20 evaluations permitted")170 self.assertTrue(np.allclose(result.x, 0), msg="Optimizer failed to find optimum")171 self.assertTrue(np.allclose(result.fun, 0), msg="Incorrect function value returned")172 def test_optimize_multi_objective(self):173 for verbose in [False, True]:174 with self.test_session():175 m1, m2 = create_vlmop2_model()176 acquisition = gpflowopt.acquisition.ExpectedImprovement(m1) + gpflowopt.acquisition.ExpectedImprovement(m2)177 optimizer = gpflowopt.BayesianOptimizer(self.domain, acquisition, verbose=verbose)178 result = optimizer.optimize(vlmop2, n_iter=2)179 self.assertTrue(result.success)180 self.assertEqual(result.nfev, 2, "Only 2 evaluations permitted")181 self.assertTupleEqual(result.x.shape, (7, 2))182 self.assertTupleEqual(result.fun.shape, (7, 2))183 _, dom = gpflowopt.pareto.non_dominated_sort(result.fun)184 self.assertTrue(np.all(dom == 0))185 def test_optimize_constraint(self):186 for verbose in [False, True]:187 with self.test_session():188 acquisition = gpflowopt.acquisition.ProbabilityOfFeasibility(create_parabola_model(self.domain), threshold=-1)189 optimizer = gpflowopt.BayesianOptimizer(self.domain, acquisition, verbose=verbose)190 result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1)191 self.assertFalse(result.success)192 self.assertEqual(result.message, 'No evaluations satisfied all the constraints')193 self.assertEqual(result.nfev, 1, "Only 1 evaluations permitted")194 self.assertTupleEqual(result.x.shape, (17, 2))195 self.assertTupleEqual(result.fun.shape, (17, 0))196 self.assertTupleEqual(result.constraints.shape, (17, 1))197 acquisition = gpflowopt.acquisition.ProbabilityOfFeasibility(create_parabola_model(self.domain), threshold=0.3)198 optimizer = gpflowopt.BayesianOptimizer(self.domain, acquisition, verbose=verbose)199 result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1)200 self.assertTrue(result.success)201 self.assertEqual(result.nfev, 1, "Only 1 evaluation permitted")202 self.assertTupleEqual(result.x.shape, (5, 2))203 self.assertTupleEqual(result.fun.shape, (5, 0))204 self.assertTupleEqual(result.constraints.shape, (5, 1))205 def test_optimizer_interrupt(self):206 with self.test_session():207 result = self.optimizer.optimize(KeyboardRaiser(3, lambda X: parabola2d(X)[0]), n_iter=20)208 self.assertFalse(result.success, msg="After 2 evaluations, a keyboard interrupt is raised, "209 "failed result expected.")210 self.assertTrue(np.allclose(result.x, 0.0), msg="The optimum will not be identified nonetheless")211 def test_failsafe(self):212 with self.test_session():213 X, Y = self.optimizer.acquisition.data[0], self.optimizer.acquisition.data[1]214 # Provoke cholesky faillure215 self.optimizer.acquisition.optimize_restarts = 1216 self.optimizer.acquisition.models[0].likelihood.variance.transform = gpflow.transforms.Identity()217 self.optimizer.acquisition.models[0].likelihood.variance = -5.0218 self.optimizer.acquisition.models[0]._needs_recompile = True219 with self.assertRaises(RuntimeError) as e:220 with self.optimizer.failsafe():221 self.optimizer.acquisition.set_data(X, Y)222 self.optimizer.acquisition.evaluate(X)223 fname = 'failed_bopt_{0}.npz'.format(id(e.exception))224 self.assertTrue(os.path.isfile(fname))225 with np.load(fname) as data:226 np.testing.assert_almost_equal(data['X'], X)227 np.testing.assert_almost_equal(data['Y'], Y)228 os.remove(fname)229 def test_set_domain(self):230 with self.test_session():231 with self.assertRaises(AssertionError):232 super(TestBayesianOptimizer, self).test_set_domain()233 domain = gpflowopt.domain.ContinuousParameter("x1", -2.0, 2.0) + \234 gpflowopt.domain.ContinuousParameter("x2", -2.0, 2.0)235 self.optimizer.domain = domain236 expected = gpflowopt.design.LatinHyperCube(16, self.domain).generate() / 4 + 0.5237 self.assertTrue(np.allclose(expected, self.optimizer.acquisition.models[0].wrapped.X.value))238class TestBayesianOptimizerConfigurations(GPflowOptTestCase):239 def setUp(self):240 self.domain = gpflowopt.domain.ContinuousParameter("x1", 0.0, 1.0) + \241 gpflowopt.domain.ContinuousParameter("x2", 0.0, 1.0)242 self.acquisition = gpflowopt.acquisition.ExpectedImprovement(create_parabola_model(self.domain))243 def test_initial_design(self):244 with self.test_session():245 design = gpflowopt.design.RandomDesign(5, self.domain)246 optimizer = gpflowopt.BayesianOptimizer(self.domain, self.acquisition, initial=design)...
test1.py
Source:test1.py
1# coding: utf-82import pandas as pd3from sklearn.datasets.samples_generator import make_blobs4from sklearn.ensemble import RandomForestClassifier5import random6from tl_algs import peters, tnb, trbag, tl_baseline, burak, tca, tca_plus7RAND_SEED = 2016 8random.seed(RAND_SEED) # change this to see new random data!9# randomly generate some data10X, domain_index = make_blobs(n_samples=15, centers=3, n_features=2, cluster_std=5)11# randomly assigning domain and label12all_instances = pd.DataFrame({"x_coord" : [x[0] for x in X],13 "y_coord" : [x[1] for x in X],14 "domain_index" : domain_index,15 "label" : [random.choice([True,False]) for _ in X]},16 columns = ['x_coord','y_coord','domain_index', 'label']17 )18#arbitrarily set domain index 0 as target19test_set_domain = 020# we are going to set the first three instances as test data21# note that this means that some of the training set has target instances!22test_set = all_instances[all_instances.domain_index == test_set_domain].sample(3, random_state=RAND_SEED)23test_set_X = test_set.loc[:, ["x_coord", "y_coord"]].reset_index(drop=True)24test_set_y = test_set.loc[:, ["label"]].reset_index(drop=True)25# gather all non-test indexes 26train_pool = all_instances.iloc[all_instances.index.difference(test_set.index), ] 27train_pool_X = train_pool.loc[:, ["x_coord", "y_coord"]].reset_index(drop=True)28train_pool_y = train_pool["label"].reset_index(drop=True)29train_pool_domain = train_pool.domain_index30# We don't have much training data, but we got some predictions with confidence levels!31transfer_learners = [32 tca_plus.TCAPlus(test_set_X=test_set_X, 33 test_set_domain=test_set_domain, 34 train_pool_X=train_pool_X, 35 train_pool_y=train_pool_y, 36 train_pool_domain=train_pool_domain, 37 Base_Classifier=RandomForestClassifier,38 rand_seed=RAND_SEED39 ),40 tca.TCA(test_set_X=test_set_X, 41 test_set_domain=test_set_domain, 42 train_pool_X=train_pool_X, 43 train_pool_y=train_pool_y, 44 train_pool_domain=train_pool_domain, 45 Base_Classifier=RandomForestClassifier,46 rand_seed=RAND_SEED47 ),48 tl_baseline.Source_Baseline(49 test_set_X=test_set_X, 50 test_set_domain=test_set_domain, 51 train_pool_X=train_pool_X, 52 train_pool_y=train_pool_y, 53 train_pool_domain=train_pool_domain, 54 Base_Classifier=RandomForestClassifier,55 rand_seed=RAND_SEED56 ),57 burak.Burak(58 test_set_X=test_set_X, 59 test_set_domain=test_set_domain, 60 train_pool_X=train_pool_X, 61 train_pool_y=train_pool_y, 62 train_pool_domain=train_pool_domain,63 cluster_factor = 15,64 k = 2,65 Base_Classifier=RandomForestClassifier,66 rand_seed=RAND_SEED67 ),68 peters.Peters(test_set_X=test_set_X, 69 test_set_domain=test_set_domain, 70 train_pool_X=train_pool_X, 71 train_pool_y=train_pool_y, 72 train_pool_domain=train_pool_domain, 73 cluster_factor=15,74 Base_Classifier=RandomForestClassifier,75 rand_seed=RAND_SEED76 ),77 tnb.TransferNaiveBayes(test_set_X=test_set_X, 78 test_set_domain=test_set_domain, 79 train_pool_X=train_pool_X, 80 train_pool_y=train_pool_y, 81 train_pool_domain=train_pool_domain, 82 rand_seed=RAND_SEED83 ),84 trbag.TrBag(test_set_X=test_set_X, 85 test_set_domain=test_set_domain, 86 train_pool_X=train_pool_X, 87 train_pool_y=train_pool_y, 88 train_pool_domain=train_pool_domain, 89 Base_Classifier=RandomForestClassifier,90 sample_size=test_set_y.shape[0],91 rand_seed=RAND_SEED92 ),93 tl_baseline.Hybrid_Baseline(test_set_X=test_set_X, 94 test_set_domain=test_set_domain, 95 train_pool_X=train_pool_X, 96 train_pool_y=train_pool_y, 97 train_pool_domain=train_pool_domain, 98 Base_Classifier=RandomForestClassifier,99 rand_seed=RAND_SEED100 ),101 tl_baseline.Target_Baseline(test_set_X=test_set_X, 102 test_set_domain=test_set_domain, 103 train_pool_X=train_pool_X, 104 train_pool_y=train_pool_y, 105 train_pool_domain=train_pool_domain, 106 Base_Classifier=RandomForestClassifier,107 rand_seed=RAND_SEED108 )109]110for transfer_learner in transfer_learners:...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!