Best Python code snippet using SeleniumBase
test_log_parser.py
Source:test_log_parser.py
1import os2import numpy as np3import pytest4import copy5from alchemistry_toolkit.parsers.log_parser import *6current_path = os.path.dirname(os.path.abspath(__file__))7data_path = os.path.join(current_path, 'sample_inputs')8file_lambda_MetaD = data_path + '/lambda_MetaD.log'9file_EXE_updating = data_path + '/EXE_updating.log'10file_EXE_equilibrated = data_path + '/EXE_equilibrated.log'11file_EXE_fixed = data_path + '/EXE_fixed.log'12# Test_1: lambda-MetaD simulaiton13test_1 = EXE_LogInfo(file_lambda_MetaD)14# Test 2: EXE with weights being updated by the WL algorithm (not yet equilibrated)15test_2 = EXE_LogInfo(file_EXE_updating)16# Test 3: EXE with weights being updated by the WL algorithm (equilibrated)17test_3 = EXE_LogInfo(file_EXE_equilibrated)18# Test_4: EXE with fixed weights19test_4 = EXE_LogInfo(file_EXE_fixed)20class Test_EXE_LogInfo:21 22 def test_init(self):23 expected_1 = {'init_w': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],24 'input': file_lambda_MetaD, 'dt': 0.002, 25 'nstlog': 1000, 'N_states': 9, 'fixed': False, 'cutoff': 0.001, 26 'wl_scale': 0.8, 'wl_ratio': 0.8, 'init_wl': 0.5, 'temp': 298.0, 27 'plumed_ver': '2.7.0-dev', 'type': 'lambda-MetaD', 'start': 506}28 # the metadata of EXE_upating.log and EXE_equilibrated.log are the same29 expected_2 = {'init_w': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 30 'input': file_EXE_updating, 'dt': 0.002, 31 'nstlog': 1000, 'N_states': 9, 'fixed': False, 'cutoff': 0.001, 32 'wl_scale': 0.8, 'wl_ratio': 0.8, 'init_wl': 0.5, 'temp': 298.0, 33 'start': 455, 'type': 'expanded_ensemble'} 34 35 expected_3 = copy.deepcopy(expected_2)36 expected_3['input'] = file_EXE_equilibrated37 expected_4 = {'init_w': [0.0, 7.67256, 13.8818, 16.9028, 18.8082, 20.5498, 21.2318, 17.6905, 14.8862], 38 'input': file_EXE_fixed, 'dt': 0.002, 39 'nstlog': 1000, 'N_states': 9, 'fixed': True, 'wl_scale': 0.8, 40 'wl_ratio': 0.8, 'init_wl': 1, 'temp': 298.0, 'start': 454, 41 'type': 'expanded_ensemble'}42 assert vars(test_1) == expected_143 assert vars(test_2) == expected_244 assert vars(test_3) == expected_345 assert vars(test_4) == expected_446 def test_get_final_data(self):47 c1 = np.array([31385, 29431, 27909, 27179, 26740, 26431, 26432, 26926, 27567])48 w1 = np.array([0, 7.74194, 14.13403, 17.32355, 19.28278, 20.68103, 20.67645, 18.44879, 15.61786])49 c2 = np.array([435, 370, 218, 222, 272, 313, 343, 197, 135])50 w2 = np.array([0, 7.82554, 14.33914, 17.30932, 18.97291, 20.22864, 20.66241, 18.78573, 16.29028])51 c3 = np.array([13776, 11901, 11877, 13516, 12248, 14788, 15091, 6630, 6415])52 w3 = np.array([0, 7.67256, 13.88177, 16.90285, 18.80824, 20.54981, 21.23185, 17.69051, 14.88619])53 c4 = np.array([35254, 29835, 28314, 31311, 28662, 34624, 37771, 13790, 10439])54 w4 = copy.deepcopy(w3)55 np.testing.assert_array_almost_equal(c1, test_1.get_final_data()[0], 10)56 np.testing.assert_array_almost_equal(w1, test_1.get_final_data()[1], 10)57 np.testing.assert_array_almost_equal(c2, test_2.get_final_data()[0], 10)58 np.testing.assert_array_almost_equal(w2, test_2.get_final_data()[1], 10)59 np.testing.assert_array_almost_equal(c3, test_3.get_final_data()[0], 10)60 np.testing.assert_array_almost_equal(w3, test_3.get_final_data()[1], 10)61 np.testing.assert_array_almost_equal(c4, test_4.get_final_data()[0], 10)62 np.testing.assert_array_almost_equal(w4, test_4.get_final_data()[1], 10)63 assert test_1.final_t == 500064 assert test_2.final_t == 100065 assert test_3.final_t == 500066 assert test_4.final_t == 500067 assert test_1.err_kt_f == 0.1297168 assert test_1.err_kcal_f == 0.0768169 assert test_2.err_kt_f == 1.1700770 assert test_2.err_kcal_f == 0.692971 assert test_3.err_kt_f == 0.7642972 assert test_3.err_kcal_f == 0.452673 assert test_4.err_kt_f == 1.2170374 assert test_4.err_kcal_f == 0.7207175 assert test_2.EXE_status == 'updating'76 77 def test_get_WL_data(self):78 # Test 1: EXE_updating79 t1 = np.array([0, 0.01758, 0.02574, 0.04432, 0.062, 0.07464, 0.10316, 80 0.12102, 0.1402, 0.1864, 0.24034, 0.26614, 0.31396, 81 0.32938, 0.42008, 0.5014, 0.56918, 0.6576, 0.7434, 0.9499])82 w1 = np.array([0.5, 0.4, 0.32, 0.256, 0.2048, 0.16384, 0.131072 , 0.1048576, 83 0.0838861, 0.0671089, 0.0536871, 0.0429497, 0.0343597, 84 0.0274878, 0.0219902, 0.0175922, 0.0140737, 0.011259 ,85 0.0090072, 0.0072058])86 np.testing.assert_array_almost_equal(t1, test_2.get_WL_data()[0], 10)87 np.testing.assert_array_almost_equal(w1, test_2.get_WL_data()[1], 10)88 assert test_2.EXE_status == 'updating'89 # Test 2: EXE_equilibrated90 t2 = np.array([0, 0.01498, 0.02138, 0.0353 , 0.0536 , 0.07438, 0.09064,91 0.11154, 0.13876, 0.18484, 0.21016, 0.26386, 0.31684, 0.33002,92 0.3672, 0.43502, 0.50318, 0.58672, 0.67496, 0.7618 , 0.89586,93 0.9931, 1.175, 1.27582, 1.57056, 1.66066, 1.90268, 2.19516,94 2.87514])95 w2 = np.array([0.5, 0.4, 0.32, 0.256, 0.2048, 0.16384, 0.131072 , 0.1048576, 96 0.0838861, 0.0671089, 0.0536871, 0.0429497, 0.0343597, 0.0274878, 97 0.0219902, 0.0175922, 0.0140737, 0.011259, 0.0090072, 0.0072058, 98 0.0057646, 0.0046117, 0.0036893, 0.0029515, 0.0023612, 0.0018889, 99 0.0015112, 0.0012089, 0.0009671])100 equil_c = np.array([3158.0, 3449.0, 3599.0, 3654.0, 3723.0, 3576.0, 3408.0, 4653.0, 4722.0])101 equil_w = np.array([0.0, 7.67256, 13.88177, 16.90285, 18.80824, 20.54981, 21.23185, 17.69051, 14.88619])102 np.testing.assert_array_almost_equal(t2, test_3.get_WL_data()[0], 10)103 np.testing.assert_array_almost_equal(w2, test_3.get_WL_data()[1], 10)104 np.testing.assert_array_almost_equal(equil_c, test_3.equil_c, 10)105 np.testing.assert_array_almost_equal(equil_w, test_3.equil_w, 10)106 assert test_3.EXE_status == 'equilibrated'107 assert test_3.equil_t == 2.87516108 assert test_3.max_Nratio == 1.25208109 assert test_3.min_Nratio == 0.83737110 assert test_3.err_kt_eq == 0.40229111 assert test_3.err_kcal_eq == 0.23823112 def test_log_avg_weights(self):113 warning_msg_1 = 'Warning: The starting point of the weights average calculation is less than 0!'114 warning_msg_2 = 'Warning: The method does not apply to the simulation being analyzed!'115 warning_msg_3 = 'Warning: Invalid parameter specified!'116 117 # Test 1: lambda_MetaD118 # 3 cases with the final method: 119 # (1) 0 avg_len (last time frame), (2) avg_len = 0.01 ns and (3) avg_len that makes avg_start < 0120 expected_1 = np.array([ 0, 7.64644, 13.94576, 17.01108, 18.99344, 20.40077,121 20.41216, 18.10093, 15.26629])122 f1 = np.array([14.72797, 14.8941, 15.22571, 15.53723, 15.59485, 15.61786])123 np.testing.assert_array_almost_equal(test_1.get_avg_weights(0)[0], test_1.get_final_data()[1], 10)124 np.testing.assert_array_almost_equal(test_1.get_avg_weights(0)[1], test_1.get_final_data()[1][-1], 10)125 126 np.testing.assert_array_almost_equal(test_1.get_avg_weights(0.01)[0], expected_1)127 np.testing.assert_array_almost_equal(test_1.get_avg_weights(0.01)[1], f1)128 129 with pytest.raises(ParameterError) as excinfo:130 test_1.get_avg_weights(10)131 assert warning_msg_1 in str(excinfo.value)132 # equilibrated method with lambda-MetaD133 with pytest.raises(SimulationTypeError) as excinfo:134 test_1.get_avg_weights(0.5, method='equilibrated')135 assert warning_msg_2 in str(excinfo.value)136 # invalid parameter137 with pytest.raises(ParameterError) as excinfo:138 test_1.get_avg_weights(0.5, method='test')139 assert warning_msg_3 in str(excinfo.value)140 # Test 2: EXE_equilibrated with the final method (same 3 cases)141 expected_2 = np.array([0, 7.67256, 13.88177, 16.90285, 18.80824, 20.54981,142 21.23185, 17.69051, 14.88619])143 f2 = np.array([14.88619, 14.88619, 14.88619, 14.88619, 14.88619, 14.88619])144 np.testing.assert_array_almost_equal(test_3.get_avg_weights(0)[0], test_3.get_final_data()[1], 10)145 np.testing.assert_array_almost_equal(test_3.get_avg_weights(0)[1], test_3.get_final_data()[1][-1], 10)146 np.testing.assert_array_almost_equal(test_3.get_avg_weights(0.01)[0], expected_2)147 np.testing.assert_array_almost_equal(test_3.get_avg_weights(0.01)[1], f2)148 with pytest.raises(ParameterError) as excinfo:149 test_3.get_avg_weights(10)150 assert warning_msg_1 in str(excinfo.value)151 # Test 3: EXE_equilibrated with the equilibrated method152 expected_3 = np.array([0, 7.55507, 13.70642, 16.71106, 18.61573, 20.35754,153 21.03571, 17.49389, 14.68957])154 f3 = np.array([14.62137, 14.62379, 14.6516 , 14.73141, 14.81968])155 test_3.get_WL_data()156 np.testing.assert_array_almost_equal(test_3.get_avg_weights(0, method='equilibrated')[0], test_3.equil_w, 10) 157 np.testing.assert_array_almost_equal(test_3.get_avg_weights(0, method='equilibrated')[1], test_3.equil_w[-1], 10)158 np.testing.assert_array_almost_equal(test_3.get_avg_weights(0.01, method='equilibrated')[0], expected_3)159 np.testing.assert_array_almost_equal(test_3.get_avg_weights(0.01, method='equilibrated')[1], f3)160 with pytest.raises(ParameterError) as excinfo:161 test_3.get_avg_weights(5, method='equilibrated')...
test_transactions_autocommit_2.py
Source:test_transactions_autocommit_2.py
1#coding:utf-82"""3ID: gtcs.transactions-autocommit-024TITLE: Changes within AUTO COMMIT must be cancelled when exception raises in some TRIGGER5DESCRIPTION:6 Original test see in:7 https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/AUTO_COMMIT.2.ESQL.script8 Test creates three tables (test_1, test_2 and test_3) and AI-trigger for one of them (test_1).9 This trigger does INSERTs into test_2 and test_3.10 For test_3 we create UNIQUE index that will prevent from insertion of duplicates.11 Then we add one record into test_3 with value = 1000.12 Finally, we try to add record into test_1 and after this INSERT its trigger attempts to add records,13 into test_2 and test_3. The latter will fail because of UK violation (we try to insert apropriate value14 into test-1 in order this exception be raised).15 Expected result: NONE of just performed INSERTS must be saved in DB. The only existing record must be16 in the table test_3 that we added there on initial phase.17 NB: we use custom TPB with fdb.isc_tpb_autocommit in order to start DML transactions in AUTOCOMMIT=1 mode.18FBTEST: functional.gtcs.transactions_autocommit_219"""20import pytest21from firebird.qa import *22db = db_factory()23act = python_act('db', substitutions=[('[ \t]+', ' ')])24expected_stdout = """25 mon$auto_commit: 126 exception occured, gdscode: 33554434927 test_3 100028"""29@pytest.mark.skip('FIXME: Not IMPLEMENTED')30@pytest.mark.version('>=3')31def test_1(act: Action):32 pytest.fail("Not IMPLEMENTED")33# test_script_134#---35#36# import os37# import sys38# import subprocess39# import inspect40# import time41#42# os.environ["ISC_USER"] = user_name43# os.environ["ISC_PASSWORD"] = user_password44# db_conn.close()45#46# #--------------------------------------------47#48# def flush_and_close( file_handle ):49# # https://docs.python.org/2/library/os.html#os.fsync50# # If you're starting with a Python file object f,51# # first do f.flush(), and52# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.53# global os54#55# file_handle.flush()56# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:57# # otherwise: "OSError: [Errno 9] Bad file descriptor"!58# os.fsync(file_handle.fileno())59# file_handle.close()60#61# #--------------------------------------------62#63# def cleanup( f_names_list ):64# global os65# for f in f_names_list:66# if type(f) == file:67# del_name = f.name68# elif type(f) == str:69# del_name = f70# else:71# print('Unrecognized type of element:', f, ' - can not be treated as file.')72# del_name = None73#74# if del_name and os.path.isfile( del_name ):75# os.remove( del_name )76#77# #--------------------------------------------78#79# sql_init='''80# set bail on;81# recreate table test_1 (x integer);82# recreate table test_2 (x integer);83# recreate table test_3 (x integer);84# create unique index test_3_x_uniq on test_3 (x);85# commit;86# set term ^;87# create or alter trigger trg_test1_ai for test_1 active after insert position 0 as88# begin89# insert into test_2 values (new.x * 10);90# insert into test_3 values (new.x * 100);91# end ^92# set term ;^93#94# insert into test_3 values (1000);95# commit;96# '''97#98# f_init_sql = open( os.path.join(context['temp_directory'],'tmp_gtcs_tx_ac2.sql'), 'w', buffering = 0)99# f_init_sql.write( sql_init )100# flush_and_close( f_init_sql )101#102# f_init_log = open( '.'.join( (os.path.splitext( f_init_sql.name )[0], 'log') ), 'w', buffering = 0)103# f_init_err = open( '.'.join( (os.path.splitext( f_init_sql.name )[0], 'err') ), 'w', buffering = 0)104#105# # This can take about 25-30 seconds:106# ####################################107# subprocess.call( [ context['isql_path'], dsn, '-q', '-i', f_init_sql.name ], stdout = f_init_log, stderr = f_init_err)108#109# flush_and_close( f_init_log )110# flush_and_close( f_init_err )111#112# #CUSTOM_TX_PARAMS = ( [ fdb.isc_tpb_read_committed, fdb.isc_tpb_no_rec_version, fdb.isc_tpb_nowait, fdb.isc_tpb_autocommit ] )113# CUSTOM_TX_PARAMS = ( [ fdb.isc_tpb_nowait, fdb.isc_tpb_autocommit ] )114#115# con = fdb.connect( dsn = dsn )116# tx = con.trans( default_tpb = CUSTOM_TX_PARAMS )117#118# tx.begin()119# cx=tx.cursor()120#121# cx.execute('select mon$auto_commit from mon$transactions where mon$transaction_id = current_transaction')122# for r in cx:123# print( 'mon$auto_commit:', r[0] )124#125# try:126# cx.execute( 'insert into test_1 values(?)', (10,) ) # this leads to PK/UK violation in the table 'test_3'127# except Exception as e:128# #print('exception in ', inspect.stack()[0][3], ': ', sys.exc_info()[0])129# print('exception occured, gdscode:', e[2])130#131# tx.commit()132#133# cx.execute("select 'test_1' tab_name, x from test_1 union all select 'test_2', x from test_2 union all select 'test_3', x from test_3")134# for r in cx:135# print( r[0], r[1] )136#137# cx.close()138# tx.close()139# con.close()140#141# # cleanup142# #########143# time.sleep(1)144# cleanup( ( f_init_sql, f_init_log, f_init_err) )145#...
gen_plist.py
Source:gen_plist.py
1# -*- coding: utf-8 -*-2from __future__ import unicode_literals3import collections4import datetime5import os6import dictdumper7ROOT = os.path.dirname(os.path.realpath(__file__))8dumper_0 = dictdumper.PLIST(os.path.join(ROOT, '..', 'plist', 'test_0.py2.plist'))9test_1 = collections.OrderedDict()10test_1['foo'] = -111test_1['bar'] = u'Hello, world!'12test_1['boo'] = collections.OrderedDict()13test_1['boo']['foo_again'] = True14test_1['boo']['bar_again'] = memoryview(b'bytes')15test_1['boo']['boo_again'] = None16dumper_1 = dictdumper.PLIST(os.path.join(ROOT, 'plist', 'test_1.py2.plist'))17dumper_1(test_1, 'test_1')18test_2 = collections.OrderedDict()19test_2['foo'] = [1, 2.0, 3]20test_2['bar'] = (1.0, bytearray(b'a long long bytes'), 3.0)21test_2['boo'] = collections.OrderedDict()22test_2['boo']['foo_again'] = b'bytestring'23test_2['boo']['bar_again'] = datetime.datetime(2020, 1, 31, 20, 15, 10, 163010)24test_2['boo']['boo_again'] = float('-inf')25dumper_2 = dictdumper.PLIST(os.path.join(ROOT, 'plist', 'test_2.py2.plist'))26dumper_2(test_1, 'test_1')27dumper_2(test_2, 'test_2')28test_3 = collections.OrderedDict()29test_3['foo'] = u"stringstringstringstringstringstringstringstringstringstring"30test_3['bar'] = [31 u"s1", False, u"s3",32]33test_3['boo'] = [34 u"s4", collections.OrderedDict(), u"s6"35]36test_3['boo'][1]['s'] = u"5"37test_3['boo'][1]['j'] = u"5"38test_3['far'] = collections.OrderedDict()39test_3['far']['far_foo'] = [u"s1", u"s2", u"s3"]40test_3['far']['far_var'] = u"s4"41test_3['biu'] = float('nan')42dumper_3 = dictdumper.PLIST(os.path.join(ROOT, 'plist', 'test_3.py2.plist'))43dumper_3(test_1, 'test_1')44dumper_3(test_2, 'test_2')...
gen_json.py
Source:gen_json.py
1# -*- coding: utf-8 -*-2from __future__ import unicode_literals3import collections4import datetime5import os6import dictdumper7ROOT = os.path.dirname(os.path.realpath(__file__))8dumper_0 = dictdumper.JSON(os.path.join(ROOT, '..', 'json', 'test_0.py2.json'))9test_1 = collections.OrderedDict()10test_1['foo'] = -111test_1['bar'] = u'Hello, world!'12test_1['boo'] = collections.OrderedDict()13test_1['boo']['foo_again'] = True14test_1['boo']['bar_again'] = memoryview(b'bytes')15test_1['boo']['boo_again'] = None16dumper_1 = dictdumper.JSON(os.path.join(ROOT, 'json', 'test_1.py2.json'))17dumper_1(test_1, 'test_1')18test_2 = collections.OrderedDict()19test_2['foo'] = [1, 2.0, 3]20test_2['bar'] = (1.0, bytearray(b'a long long bytes'), 3.0)21test_2['boo'] = collections.OrderedDict()22test_2['boo']['foo_again'] = b'bytestring'23test_2['boo']['bar_again'] = datetime.datetime(2020, 1, 31, 20, 15, 10, 163010)24test_2['boo']['boo_again'] = float('-inf')25dumper_2 = dictdumper.JSON(os.path.join(ROOT, 'json', 'test_2.py2.json'))26dumper_2(test_1, 'test_1')27dumper_2(test_2, 'test_2')28test_3 = collections.OrderedDict()29test_3['foo'] = u"stringstringstringstringstringstringstringstringstringstring"30test_3['bar'] = [31 u"s1", False, u"s3",32]33test_3['boo'] = [34 u"s4", collections.OrderedDict(), u"s6"35]36test_3['boo'][1]['s'] = u"5"37test_3['boo'][1]['j'] = u"5"38test_3['far'] = collections.OrderedDict()39test_3['far']['far_foo'] = [u"s1", u"s2", u"s3"]40test_3['far']['far_var'] = u"s4"41test_3['biu'] = float('nan')42dumper_3 = dictdumper.JSON(os.path.join(ROOT, 'json', 'test_3.py2.json'))43dumper_3(test_1, 'test_1')44dumper_3(test_2, 'test_2')...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!