Best Python code snippet using localstack_python
AC_process1.py
Source:AC_process1.py
1#!/usr/bin/env python22# -*- coding: utf-8 -*-3"""4Created on Fri Mar 1 10:16:31 20195@author: happywjt6"""7#GPU setting8import os9os.environ['CUDA_VISIBLE_DEVICES']='1'10import sys11import layer as l12import unit 13import numpy as np14import tensorflow as tf15from random import shuffle16from copy import deepcopy as dc17from PIL import Image18import matplotlib.pylab as plt19import scipy.io as sio20from sklearn.metrics import confusion_matrix,classification_report21#function to plot confusion matrix22def plotconfusion(cm,title,num_classes,cmap = plt.cm.binary):23 plt.figure()24 plt.imshow(cm,interpolation='nearest',cmap=cmap)25 plt.title(title)26 plt.colorbar()27 xlocations = np.array(range(num_classes))28 plt.xticks(xlocations,xlocations,rotation=90)29 plt.yticks(xlocations,xlocations)30 plt.ylabel('True label')31 plt.xlabel('predicted label')32 savename=title+'.png'33 plt.savefig(savename, format='png')34#import matplotlib.pyplot as plt35#parameter setting36MODEL_INIT = np.load('./bvlc_alexnet_new.npy').item() #Loading pre-trained imagenet model, a pre-downloaded model file are required37CLASS_NUM = 20 #Amount of classes to be classify38BATCH_SIZE = 20 #Batch size39EPOCH = 50 #Epoch of initialization stage40EPOCH_all = 30 #è¿å
¥ä¸»å¨å¦ä¹ åï¼æ¯æ¬¡è¿ä»£è®ç»çepoch个æ°41trainset_num = 3 #Sample amount of 42ACTIVE_TIME = 5 #Query round for active learning43QUEUE_SIZE = 20 #High entropy sample amount in every query round44GOOD_SIZE = 20 #High conficence sample amount in every query round45TRAIN_TIME = 30046TEST = 547'''Car Fine grained'''48MODEL_PATH = './model/' #model saved path49FILE_PATH = './cars_list/'50Experiment_NAME = 'comps_sv_20(3)_2' #Experiments name, your model saved files will be named under this name51#Experiment_NAME_m = 'comps_sv_20(3)_3'52PRETRAIN_MODEL_NAME = MODEL_PATH + Experiment_NAME +'.ckpt'53#Data selection, source directory, target directory and selected car serial number54data_dir = '/home/happywjt/carpictures/image/' #Data directory, the format of data storage is in readme file55item_id = [1,15,20,35,48,60,73,86,111,124,137,162,175,188,212,225,238,250,263,276] #This is the serial number of 20 types of cars selected randomly56#item_id = range(1,101) 57item_label = [0,4,8,15,16,17,18,19,1,2,3,5,6,7,9,10,11,12,13,14]58#item_label = range(CLASS_NUM)59global active_list_name60global good_list_name61assert(len(item_label)==len(item_id) and len(item_label)==CLASS_NUM)62##############################################################################63##############################################################################64#Model defination65def SharePart(input, drop_out_rate):66 67 def pre_process(input):68 rgb_scaled = input69 Mean = [103.939,116.779,123.68]70 71 red,green,blue = tf.split(rgb_scaled,3,3)72 bgr = tf.concat([73 red - Mean[2],74 green - Mean[1],75 blue - Mean[0]],3)76 return bgr77 78 input = pre_process(input)79 80 with tf.variable_scope('Share_Part'):81 82 conv1 = l.conv2d('conv1',input,(11,11),96,strides = [1,4,4,1],decay = (0.0,0.0),pad='VALID',Init = MODEL_INIT['conv1'])83 maxpool1 = l.max_pooling('maxpool',conv1,3,2)84 norm1 = tf.nn.lrn(maxpool1,depth_radius=2,alpha=2e-05,beta=0.75,name='conv1')85 86 conv2 = l.conv2d_with_group('conv2',norm1,(5,5),256,2,decay = (0.0,0.0),pad = 'SAME', Init = MODEL_INIT['conv2'])87 maxpool2 = l.max_pooling('maxpool2',conv2,3,2)88 norm2 = tf.nn.lrn(maxpool2,depth_radius=2,alpha=2e-05,beta=0.75,name='conv2')89 conv3 = l.conv2d('conv3',norm2,(3,3),384,pad = 'SAME',Init = MODEL_INIT['conv3'])90 91 92 conv4 = l.conv2d_with_group('conv4',conv3,(3,3),384,2,pad = 'SAME',Init = MODEL_INIT['conv4'])93 94 conv5 = l.conv2d_with_group('conv5',conv4,(3,3),256,2,pad = 'SAME',Init = MODEL_INIT['conv5'])95 maxpool5 = l.max_pooling('maxpool5',conv5,3,2)96 print maxpool5.shape97 98 dim=199 shape = maxpool5.get_shape().as_list()100 for d in shape[1:]:101 dim*=d102 103 reshape = tf.reshape(maxpool5,[-1,dim])104 105 fc6 = l.fully_connect('fc6',reshape,4096,Init = MODEL_INIT['fc6'])106 fc6 = l.dropout('drop_6',fc6,drop_out_rate)107 fc7 = l.fully_connect('fc7',fc6,4096,Init = MODEL_INIT['fc7'])108 fc7 = l.dropout('drop_7',fc7,drop_out_rate)109 110 return fc7111#Mission layer112def MissionPart(input):113 114 with tf.variable_scope('Classifier'):115 result = l.fully_connect('classifier',input,CLASS_NUM,active=None)116 return result117#loss function118def SoftmaxWithLoss(logistic,label):119 120 label = tf.one_hot(label,depth = CLASS_NUM)121 loss = tf.losses.softmax_cross_entropy(label,logistic)122 123 return loss124#training125def train_net(loss,base_lr=0.00001):126 127 128 var_list = tf.trainable_variables()129 trn_list = []130 for i in var_list:131 if 'conv1' not in i.name and 'conv2' not in i.name:132 trn_list.append(i)133 tf.summary.histogram('weight',i)134 135 loss = tf.add_n(tf.get_collection('losses'),name='all_loss')136 opt = tf.train.AdamOptimizer(base_lr).minimize(loss,var_list=trn_list)137 return opt138#test139def Test(logistic,label):140 141 result = tf.cast(tf.argmax(logistic,axis = 1),tf.uint8)142 compare = tf.cast(tf.equal(result,label),tf.float32)143 acc = tf.reduce_mean(compare)144 return acc145#############################################################################################################################146################################################################################################################################147#148#Step1: data processing. Generate training data list, test data list and unlabeled data list. The names of these files are related to Experiment_NAME.149def data_process():150 151 train_list,oracle_samples_list,test_samples_list = unit.GetListTvT(item_id,item_label,data_dir,trainset_num) 152 153 file_train = open(FILE_PATH + Experiment_NAME + '_x_train.txt','w')154 for fp in train_list:155 file_train.write(str(fp))156 file_train.write('\n')157 file_train.close()158 159 file_oracle = open(FILE_PATH + Experiment_NAME + '_x_oracle.txt','w')160 for fp in oracle_samples_list:161 file_oracle.write(str(fp))162 file_oracle.write('\n')163 file_oracle.close()164 165 file_test = open(FILE_PATH + Experiment_NAME + '_x_test.txt','w')166 for fp in test_samples_list:167 file_test.write(str(fp))168 file_test.write('\n')169 file_test.close()170#load data list171def load_process():172 file_train = FILE_PATH + Experiment_NAME + '_x_train.txt'173 file_oracle = FILE_PATH + Experiment_NAME + '_x_oracle.txt'174 file_test = FILE_PATH + Experiment_NAME + '_x_test.txt'175 TrainList = unit.LoadCarTxT(file_train)176 OracleList = unit.LoadCarTxT(file_oracle)177 TestList = unit.LoadCarTxT(file_test)178 TrainData, TrainLabels = unit.Getlist(TrainList)179 OracleData, OracleLabels = unit.Getlist(OracleList)180 TestData, TestLabels = unit.Getlist(TestList)181 UnlabelData = np.concatenate([OracleData, TestData])182 UndataLabels = np.concatenate([OracleLabels, TestLabels])183#Data of initialization and unlabeled data should be merged when training under full supervised manner and this will get the best model184# TrainData = np.concatenate([TrainData, OracleData]) 185# TrainLabels = np.concatenate([TrainLabels, OracleLabels])186 187 lenn_s = len(TrainData)/BATCH_SIZE188 lenn_t = len(TestData)/BATCH_SIZE189 lenn_u = len(UnlabelData)/BATCH_SIZE190 if len(TrainData)%BATCH_SIZE != 0:191 lenn_s += 1192 TrainData = np.concatenate((TrainData,TrainData[0:(lenn_s*BATCH_SIZE-len(TrainData))]))193 TrainLabels = np.concatenate((TrainLabels,TrainLabels[0:(lenn_s*BATCH_SIZE-len(TrainLabels))]))194 195 if len(TestData)%BATCH_SIZE != 0:196 lenn_t += 1197 TestData = np.concatenate((TestData,TestData[0:(lenn_t*BATCH_SIZE-len(TestData))]))198 TestLabels = np.concatenate((TestLabels,TestLabels[0:(lenn_t*BATCH_SIZE-len(TestLabels))]))199 200 if len(UnlabelData)%BATCH_SIZE != 0:201 lenn_u += 1202 UnlabelData = np.concatenate((UnlabelData,UnlabelData[0:(lenn_u*BATCH_SIZE-len(UnlabelData))]))203 UndataLabels = np.concatenate((UndataLabels,TestLabels[0:(lenn_u*BATCH_SIZE-len(UndataLabels))]))204 205 return TrainData, TrainLabels, OracleData, OracleLabels, TestData, TestLabels, UnlabelData, UndataLabels, lenn_s, lenn_t, lenn_u206 207######################################################################################208######################################################################################209#Model initialization: using initial dataset to pretrain network, and keep the best model in validation dataset. Model name is related to Experiment_NAME 210def pretrain():211 #load data212 TrainData, TrainLabels,_ ,_ ,\213 _, _, UnlabelData, UndataLabels,\214 lenn_s, _, lenn_u = load_process()215 216 batch = tf.placeholder(tf.float32,[None,unit.H,unit.W,unit.Channel])217 label = tf.placeholder(tf.uint8,[None])218 keep_prop = tf.placeholder(tf.float32)219 220 feature = SharePart(batch,keep_prop)221 result = MissionPart(feature)222 loss = SoftmaxWithLoss(result,label)223 acc = Test(result,label)224 opt = train_net(loss)225 226 saver = tf.train.Saver()227 config = tf.ConfigProto()228 config.gpu_options.per_process_gpu_memory_fraction = 0.9229 config.gpu_options.allow_growth = False230 init = tf.global_variables_initializer()231 sess = tf.Session(config = config)232 merged = tf.summary.merge_all()233 writer = tf.summary.FileWriter("logs/", sess.graph)234 sess.run(init)235 236 best_test_acc = 0237 238 train_queue = np.arange(len(TrainData)) #the id of training data239 test_queue = np.arange(len(UnlabelData)) #the id of test data240 #choose data through its id241 for i in range(EPOCH):242 shuffle(train_queue), shuffle(test_queue)243 train_accuracy = 0244 test_accuracy = 0245 test_cost = 0246 for j in range(lenn_s):247 immmg = j248 trainbatch = TrainData[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]249 trainlabels = TrainLabels[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]250 sess.run(opt,feed_dict={batch:trainbatch,label:trainlabels,keep_prop:0.5})251 train_accuracy += sess.run(acc,feed_dict={batch:trainbatch,label:trainlabels,keep_prop:1.0})252# for j in range(len(target_samples_list)/BATCH_SIZE + 1):253 for j in range(lenn_u):254 immmg = j255# targetbatch,targetlabels = unit.GetBatch(target_samples_list,BATCH_SIZE,j*BATCH_SIZE)256 testbatch = UnlabelData[test_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]257 testlabels = UndataLabels[test_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]258 test_accuracy+=sess.run(acc ,feed_dict={batch:testbatch,label:testlabels,keep_prop:1.0})259 test_cost+=sess.run(loss,feed_dict={batch:testbatch,label:testlabels,keep_prop:1.0})260 rs = sess.run(merged)261 writer.add_summary(rs, i)262 train_accuracy /= lenn_s263 test_accuracy /= lenn_u264 test_cost /= lenn_u265 266 print "this is the ",i," epoch"267 print "target accuracy is: ",test_accuracy268 print "source accuracy is: ",train_accuracy269 print "target cost is: ",test_cost270 271 if test_accuracy>best_test_acc:272 best_test_acc = test_accuracy273 saver.save(sess,MODEL_PATH+PRETRAIN_MODEL_NAME)274 print "the best test acc is:",best_test_acc275 else:276 print "the best test acc is:",best_test_acc 277 278 return279######################################################################################################################################280####################################################################################################################################281#active learning with random selection strategy282def random_active():283 #load data284 TrainData, TrainLabels, OracleData, OracleLabels,\285 TestData, TestLabels, _, _, lenn_s, lenn_t, _ = load_process()286 287 batch = tf.placeholder(tf.float32,[None,unit.H,unit.W,unit.Channel])288 label = tf.placeholder(tf.uint8,[None])289 keep_prop = tf.placeholder(tf.float32)290 291 feature = SharePart(batch,keep_prop)292 result = MissionPart(feature)293 loss = SoftmaxWithLoss(result,label)294 acc = Test(result,label)295 opt = train_net(loss)296 297 saver = tf.train.Saver(max_to_keep = ACTIVE_TIME)298 config = tf.ConfigProto()299 config.gpu_options.per_process_gpu_memory_fraction = 0.45300 config.gpu_options.allow_growth = False301 init = tf.global_variables_initializer()302 sess = tf.Session(config = config)303 sess.run(init)304 saver.restore(sess,PRETRAIN_MODEL_NAME) 305 306 def test():307 ACC = 0308 for i in range(lenn_t):309 test_batch = TestData[i*BATCH_SIZE:(i+1)*BATCH_SIZE]310 test_label = TestLabels[i*BATCH_SIZE:(i+1)*BATCH_SIZE]311 ACC+=sess.run(acc,feed_dict={batch:test_batch,label:test_label,keep_prop:1.0})312 313 return ACC/lenn_t314 315 '''Begin Active Learning!'''316 log_file = open('./simple_log/'+Experiment_NAME+'_ALRA.txt','w')317 pretrain_accuracy = test()318 print 'the pre train model accuracy is: ',pretrain_accuracy319 320 log_file.write("the pretrain model acc is " + str(pretrain_accuracy))321 log_file.write('\n')322 oracle_idx = np.arange(len(OracleData))323 for a in range(ACTIVE_TIME):324 shuffle(oracle_idx)325 tag_queue = oracle_idx[0:QUEUE_SIZE]326 oracle_idx = oracle_idx[QUEUE_SIZE:]327 328 if a == 0:329 TrainData = OracleData[tag_queue]330 TrainLabels = OracleLabels[tag_queue]331 else:332 TrainData = np.concatenate((TrainData,OracleData[tag_queue]))333 TrainLabels = np.concatenate((TrainLabels, OracleLabels[tag_queue]))334 335 train_queue = np.arange(len(TrainData))336 best = 0337 for i in range(EPOCH_all):338 shuffle(train_queue)339 for j in range(len(TrainData)/BATCH_SIZE):340 trainbatch = TrainData[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]341 trainlabels = TrainLabels[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]342 sess.run(opt, feed_dict = {batch:trainbatch, label:trainlabels,keep_prop:0.5})343 accuracy = test()344 print 'the ',a+1, 'time acmodel acc is:', accuracy345 if accuracy > best:346 best = accuracy347 saver.save(sess,MODEL_PATH+Experiment_NAME+'_ALRA_'+str(a+1)+'.ckpt')348 print 'the ',a+1,' time acmodel best acc is: ', best349 log_file.write("the " + str(a+1) + " time acmodel best acc is " + str(best))350 log_file.write("\n")351 log_file.close()352 return 353#############################################################################################################################################354############################################################################################################################################355#Active learning with high entropy selection strategy356def entropy_active():357 #load data358 TrainData, TrainLabels, OracleData, OracleLabels,\359 TestData, TestLabels, _, _, lenn_s, lenn_t, _ = load_process()360 361 batch = tf.placeholder(tf.float32,[None,unit.H,unit.W,unit.Channel])362 label = tf.placeholder(tf.uint8,[None])363 keep_prop = tf.placeholder(tf.float32)364 365 feature = SharePart(batch,keep_prop)366 result = MissionPart(feature)367 loss = SoftmaxWithLoss(result,label)368 acc = Test(result,label)369 opt = train_net(loss)370 371 softmax = tf.nn.softmax(result)372 entropy = tf.reduce_sum(-softmax * tf.log(softmax),1)373 predict_class = tf.cast(tf.argmax(softmax,axis = 1),tf.uint8)374 375 saver = tf.train.Saver(max_to_keep = ACTIVE_TIME)376 config = tf.ConfigProto()377 config.gpu_options.per_process_gpu_memory_fraction = 0.45378 config.gpu_options.allow_growth = False379 init = tf.global_variables_initializer()380 sess = tf.Session(config = config)381 sess.run(init)382 saver.restore(sess,PRETRAIN_MODEL_NAME) 383 384 def test():385 ACC = 0386 for i in range(lenn_t):387 test_batch = TestData[i*BATCH_SIZE:(i+1)*BATCH_SIZE]388 test_label = TestLabels[i*BATCH_SIZE:(i+1)*BATCH_SIZE]389 ACC+=sess.run(acc,feed_dict={batch:test_batch,label:test_label,keep_prop:1.0})390 391 return ACC/lenn_t392 393 '''Begin Active Learning!'''394 log_file = open('./simple_log/'+Experiment_NAME+'_ALST.txt','w')395 pretrain_accuracy = test()396 print 'the pre train model accuracy is : ', pretrain_accuracy397 log_file.write("the pre train model accuracy is " + str(pretrain_accuracy))398 log_file.write("\n")399 400#active learning 401 for a in range(ACTIVE_TIME):402 oracle_idx = np.arange(len(OracleData))403 oracle_que = []404 #calculate the entropy of each sample405 for i in oracle_idx:406 candidate_entropy = sess.run(entropy, feed_dict={batch:unit.changeshape_1(OracleData[i]),keep_prop:1.0})407 candidate_predict = sess.run(predict_class, feed_dict={batch:unit.changeshape_1(OracleData[i]),keep_prop:1.0})408 oracle_que.append((i,candidate_entropy[0],candidate_predict[0]))409 oracle_que = sorted(oracle_que, key = lambda candidate:candidate[1], reverse = True)410 #oracle_que contians 3 veriable, [image id; entropy; predicted label]411 #temp saves every entropy under every class and in every class entropies are sorted decreasingly412 temp = {}413 tag_queue = []414 for k in range(CLASS_NUM):415 temp[str(k)] = []416 for k in range(len(oracle_que)):417 temp[str(oracle_que[k][2])].append(oracle_que[k])418 for k in temp:419 temp[k] = sorted(temp[k], key=lambda x:x[1], reverse=True)420 #shuffle in class axis and pick utill QUEUE_SIZE421 idx = 0422 temp_class = 0423 temp_order = range(CLASS_NUM)424 shuffle(temp_order)425 while(idx<QUEUE_SIZE):426 if len(temp[str(temp_order[temp_class])]) != 0:427 tag_queue.append(temp[str(temp_order[temp_class])].pop(0)[0])428 idx += 1429 temp_class = (temp_class+1)%(CLASS_NUM)430 else:431 temp_class = (temp_class+1)%(CLASS_NUM)432 if a == 0 : 433 TrainData = OracleData[tag_queue]434 TrainLabels = OracleLabels[tag_queue]435 np.delete(OracleData, tag_queue), np.delete(OracleLabels,tag_queue) #labeled samples are deleted for unlabeled pool436 else:437 TrainData = np.concatenate((TrainData,OracleData[tag_queue]))438 TrainLabels = np.concatenate((TrainLabels, OracleLabels[tag_queue]))439 np.delete(OracleData, tag_queue), np.delete(OracleLabels,tag_queue)440 441 train_queue = np.arange(len(TrainData))442 best = 0443 for i in range(EPOCH_all):444 shuffle(train_queue)445 for j in range(len(TrainData)/BATCH_SIZE):446 trainbatch = TrainData[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]447 trainlabels = TrainLabels[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]448 sess.run(opt, feed_dict = {batch:trainbatch, label:trainlabels,keep_prop:0.5})449 accuracy = test()450 print 'the ',a+1, 'time acmodel acc is:', accuracy451 if accuracy > best:452 best = accuracy453 saver.save(sess,MODEL_PATH+Experiment_NAME+'_ALST_'+str(a+1)+'.ckpt')454 print 'the ',a+1,' time acmodel best acc is: ', best455 log_file.write("the " + str(a+1) + "time acmodel best acc is " + str(best))456 log_file.write("\n")457 return 458 459#################################################################################################################460###############################################################################################################461#Bi-direction active learning with high entropy samples and high confidence samples.462def bidirectional_active():463#load data464 TrainData, TrainLabels, OracleData, OracleLabels,\465 TestData, TestLabels, _, _, lenn_s, lenn_t, _ = load_process()466 467 batch = tf.placeholder(tf.float32,[None,unit.H,unit.W,unit.Channel])468 label = tf.placeholder(tf.uint8,[None])469 keep_prop = tf.placeholder(tf.float32)470 471 feature = SharePart(batch,keep_prop)472 result = MissionPart(feature)473 loss = SoftmaxWithLoss(result,label)474 acc = Test(result,label)475 opt = train_net(loss)476 477 softmax = tf.nn.softmax(result)478 entropy = tf.reduce_sum(-softmax * tf.log(softmax),1)479 predict_class = tf.cast(tf.argmax(softmax,axis = 1),tf.uint8)480 481 saver = tf.train.Saver(max_to_keep = ACTIVE_TIME)482 config = tf.ConfigProto()483 config.gpu_options.per_process_gpu_memory_fraction = 0.45484 config.gpu_options.allow_growth = False485 init = tf.global_variables_initializer()486 sess = tf.Session(config = config)487 sess.run(init)488 489 saver.restore(sess,PRETRAIN_MODEL_NAME) 490 491 def test():492 ACC = 0493 for i in range(lenn_t):494 test_batch = TestData[i*BATCH_SIZE:(i+1)*BATCH_SIZE]495 test_label = TestLabels[i*BATCH_SIZE:(i+1)*BATCH_SIZE]496 ACC+=sess.run(acc,feed_dict={batch:test_batch,label:test_label,keep_prop:1.0})497 498 return ACC/lenn_t499 500 '''Begin Active Learning!'''501 log_file = open('./simple_log/'+Experiment_NAME+'_ALBT.txt','w')502 pretrain_accuracy = test()503 print 'the pre train model accuracy is : ', pretrain_accuracy504 log_file.write("the pre train model accuracy is " + str(pretrain_accuracy))505 log_file.write("\n")506 507 for a in range(ACTIVE_TIME):508 oracle_idx = np.arange(len(OracleData))509 oracle_que = []510 for i in oracle_idx:511 candidate_entropy = sess.run(entropy, feed_dict={batch:unit.changeshape_1(OracleData[i]),keep_prop:1.0})512 candidate_predict = sess.run(predict_class, feed_dict={batch:unit.changeshape_1(OracleData[i]),keep_prop:1.0})513 oracle_que.append((i,candidate_entropy[0],candidate_predict[0]))514 oracle_que = sorted(oracle_que, key = lambda candidate:candidate[1], reverse = True)515 temp = {}516 tag_queue = []517 tag_queue2 = []518 tag_queue2_labels = []519 for k in range(CLASS_NUM):520 temp[str(k)] = []521 for k in range(len(oracle_que)):522 temp[str(oracle_que[k][2])].append(oracle_que[k])523 for k in temp:524 temp[k] = sorted(temp[k], key=lambda x:x[1], reverse=True)525 526 idx = 0527 temp_class = 0528 temp_order = range(CLASS_NUM)529 shuffle(temp_order)530 while(idx<QUEUE_SIZE):531 if len(temp[str(temp_order[temp_class])]) != 0:532 tag_queue.append(temp[str(temp_order[temp_class])].pop(0)[0])533 idx += 1534 temp_class = (temp_class+1)%(CLASS_NUM)535 else:536 temp_class = (temp_class+1)%(CLASS_NUM)537 idx = 0 538 temp_class = 0539 while(idx< GOOD_SIZE ):540 if len(temp[str(temp_order[temp_class])]) != 0:541 tag_temporary = temp[str(temp_order[temp_class])].pop()542 tag_queue2.append(tag_temporary[0])543 tag_queue2_labels.append(tag_temporary[2])544 idx += 1545 temp_class = (temp_class + 1)%(CLASS_NUM)546 else:547 temp_class = (temp_class + 1)%(CLASS_NUM)548 549#########################################'''not put back, x_train+x_oracle''' 550 551# TrainData = np.concatenate((TrainData,OracleData[tag_queue]))552# TrainData = np.concatenate((TrainData,OracleData[tag_queue2]))553# TrainLabels = np.concatenate((TrainLabels, OracleLabels[tag_queue]))554# TrainLabels = np.concatenate((TrainLabels, np.array(tag_queue2_labels))) 555# tag_queue2_rlabels = OracleLabels[tag_queue2]556# np.delete(OracleData, tag_queue + tag_queue2), np.delete(OracleLabels,tag_queue + tag_queue2)557 558###############################################''' put back , x_train+x_oracle'''559# TrainData0 = dc(TrainData)560# TrainLabels0 = dc(TrainLabels)561# TrainData0 = np.concatenate((TrainData0,OracleData[tag_queue]))562# TrainLabels0 = np.concatenate((TrainLabels0,OracleLabels[tag_queue]))563# TrainData = np.concatenate((TrainData0, OracleData[tag_queue2]))564# TrainLabels = np.concatenate((TrainLabels0, OracleLabels[tag_queue2]))565# tag_queue2_rlabels = OracleLabels[tag_queue2]566# np.delete(OracleData, tag_queue), np.delete(OracleLabels,tag_queue)567################################################not put back,x_oracle 568 if a == 0:569 TrainData = np.concatenate((OracleData[tag_queue],OracleData[tag_queue2]))570 TrainLabels = np.concatenate((OracleLabels[tag_queue], np.array(tag_queue2_labels)))571 else:572 TrainData = np.concatenate((TrainData,OracleData[tag_queue]))573 TrainData = np.concatenate((TrainData,OracleData[tag_queue2]))574 TrainLabels = np.concatenate((TrainLabels, OracleLabels[tag_queue]))575 TrainLabels = np.concatenate((TrainLabels, np.array(tag_queue2_labels))) 576 tag_queue2_rlabels = OracleLabels[tag_queue2]577 np.delete(OracleData, tag_queue + tag_queue2), np.delete(OracleLabels,tag_queue + tag_queue2)578############################################################################################################# 579 580 train_queue = np.arange(len(TrainData))581 best = 0582 for i in range(EPOCH_all):583 shuffle(train_queue)584 for j in range(len(TrainData)/BATCH_SIZE):585 trainbatch = TrainData[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]586 trainlabels = TrainLabels[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]587 sess.run(opt, feed_dict = {batch:trainbatch, label:trainlabels,keep_prop:0.5})588 accuracy = test()589 print 'the ',a+1, 'time acmodel acc is:', accuracy590 if accuracy > best:591 best = accuracy592 saver.save(sess,MODEL_PATH+Experiment_NAME+'_ALBT_'+str(a+1)+'.ckpt')593 print 'the ',a+1,' time acmodel best acc is: ', best594 cnn_acc = np.float(np.sum(np.equal(tag_queue2_rlabels,np.array(tag_queue2_labels)))) / GOOD_SIZE595 log_file.write("the " + str(a+1) + "time acmodel best acc is " + str(best))596 log_file.write("\n")597 log_file.write("the " + str(a+1) + "time cnn_que acc is " + str(cnn_acc))598 log_file.write("\n")599 log_file.close()600 return 601 602###################################################################################################################603##################################################################################################################604#active learning with strategy taking ground truth label as model predictionç¨çå®æ ç¾ä»£æ¿æ¨¡åæ ç¾çååå¦ä¹ æ¹æ³605def bidirectional_active_expert():606 TrainData, TrainLabels, OracleData, OracleLabels,\607 TestData, TestLabels, _, _, lenn_s, lenn_t, _ = load_process()608 609 batch = tf.placeholder(tf.float32,[None,unit.H,unit.W,unit.Channel])610 label = tf.placeholder(tf.uint8,[None])611 keep_prop = tf.placeholder(tf.float32)612 613 feature = SharePart(batch,keep_prop)614 result = MissionPart(feature)615 loss = SoftmaxWithLoss(result,label)616 acc = Test(result,label)617 opt = train_net(loss)618 619 softmax = tf.nn.softmax(result)620 entropy = tf.reduce_sum(-softmax * tf.log(softmax),1)621 predict_class = tf.cast(tf.argmax(softmax,axis = 1),tf.uint8)622 623 saver = tf.train.Saver(max_to_keep = ACTIVE_TIME)624 config = tf.ConfigProto()625 config.gpu_options.per_process_gpu_memory_fraction = 0.45626 config.gpu_options.allow_growth = False627 init = tf.global_variables_initializer()628 sess = tf.Session(config = config)629 sess.run(init)630 631 saver.restore(sess,PRETRAIN_MODEL_NAME) 632 633 def test():634 ACC = 0635 for i in range(lenn_t):636 test_batch = TestData[i*BATCH_SIZE:(i+1)*BATCH_SIZE]637 test_label = TestLabels[i*BATCH_SIZE:(i+1)*BATCH_SIZE]638 ACC+=sess.run(acc,feed_dict={batch:test_batch,label:test_label,keep_prop:1.0})639 640 return ACC/lenn_t641 642 '''Begin Active Learning!'''643 log_file = open('./simple_log/'+Experiment_NAME+'_ALBT_man.txt','w')644 pretrain_accuracy = test()645 print 'the pre train model accuracy is : ', pretrain_accuracy646 log_file.write("the pre train model accuracy is " + str(pretrain_accuracy))647 log_file.write("\n")648 649 for a in range(ACTIVE_TIME):650 oracle_idx = np.arange(len(OracleData))651 oracle_que = []652 for i in oracle_idx:653 candidate_entropy = sess.run(entropy, feed_dict={batch:unit.changeshape_1(OracleData[i]),keep_prop:1.0})654 candidate_predict = sess.run(predict_class, feed_dict={batch:unit.changeshape_1(OracleData[i]),keep_prop:1.0})655 oracle_que.append((i,candidate_entropy[0],candidate_predict[0]))656 oracle_que = sorted(oracle_que, key = lambda candidate:candidate[1], reverse = True)657 temp = {}658 tag_queue = []659 tag_queue2 = []660 tag_queue2_labels = []661 for k in range(CLASS_NUM):662 temp[str(k)] = []663 for k in range(len(oracle_que)):664 temp[str(oracle_que[k][2])].append(oracle_que[k])665 for k in temp:666 temp[k] = sorted(temp[k], key=lambda x:x[1], reverse=True)667 668 idx = 0669 temp_class = 0670 temp_order = range(CLASS_NUM)671 shuffle(temp_order)672 while(idx<QUEUE_SIZE):673 if len(temp[str(temp_order[temp_class])]) != 0:674 tag_queue.append(temp[str(temp_order[temp_class])].pop(0)[0])675 idx += 1676 temp_class = (temp_class+1)%(CLASS_NUM)677 else:678 temp_class = (temp_class+1)%(CLASS_NUM)679 idx = 0 680 temp_class = 0681 while(idx< GOOD_SIZE ):682 if len(temp[str(temp_order[temp_class])]) != 0:683 tag_temporary = temp[str(temp_order[temp_class])].pop()684 tag_queue2.append(tag_temporary[0])685 tag_queue2_labels.append(tag_temporary[2])686 idx += 1687 temp_class = (temp_class + 1)%(CLASS_NUM)688 else:689 temp_class = (temp_class + 1)%(CLASS_NUM)690 691##################################not put back ,x_train+x_oracle 692# TrainData = np.concatenate((TrainData,OracleData[tag_queue]))693# TrainData = np.concatenate((TrainData,OracleData[tag_queue2]))694# TrainLabels = np.concatenate((TrainLabels, OracleLabels[tag_queue]))695# TrainLabels = np.concatenate((TrainLabels, OracleLabels[tag_queue2])) 696# tag_queue2_rlabels = OracleLabels[tag_queue2]697# np.delete(OracleData, tag_queue + tag_queue2), np.delete(OracleLabels,tag_queue + tag_queue2)698##########################################put back, x_train+x_oracle 699# TrainData0 = dc(TrainData)700# TrainLabels0 = dc(TrainLabels)701# TrainData0 = np.concatenate((TrainData0,OracleData[tag_queue]))702# TrainLabels0 = np.concatenate((TrainLabels0,OracleLabels[tag_queue]))703# TrainData = np.concatenate((TrainData0, OracleData[tag_queue2]))704# TrainLabels = np.concatenate((TrainLabels0, OracleLabels[tag_queue2]))705# tag_queue2_rlabels = OracleLabels[tag_queue2]706# np.delete(OracleData, tag_queue), np.delete(OracleLabels,tag_queue)707 708################################################not put back,x_oracle709 if a == 0:710 TrainData = np.concatenate((OracleData[tag_queue],OracleData[tag_queue2]))711 TrainLabels = np.concatenate((OracleLabels[tag_queue], np.array(tag_queue2_labels)))712 else:713 TrainData = np.concatenate((TrainData,OracleData[tag_queue]))714 TrainData = np.concatenate((TrainData,OracleData[tag_queue2]))715 TrainLabels = np.concatenate((TrainLabels, OracleLabels[tag_queue]))716 TrainLabels = np.concatenate((TrainLabels, np.array(tag_queue2_labels))) 717 tag_queue2_rlabels = OracleLabels[tag_queue2]718 np.delete(OracleData, tag_queue + tag_queue2), np.delete(OracleLabels,tag_queue + tag_queue2)719 720 train_queue = np.arange(len(TrainData))721 best = 0722 for i in range(EPOCH_all):723 shuffle(train_queue)724 for j in range(len(TrainData)/BATCH_SIZE):725 trainbatch = TrainData[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]726 trainlabels = TrainLabels[train_queue[j*BATCH_SIZE:(j+1)*BATCH_SIZE]]727 sess.run(opt, feed_dict = {batch:trainbatch, label:trainlabels,keep_prop:0.5})728 accuracy = test()729 print 'the ',a+1, 'time acmodel acc is:', accuracy730 if accuracy > best:731 best = accuracy732 saver.save(sess,MODEL_PATH+Experiment_NAME+'_ALBT_man_'+str(a+1)+'.ckpt')733 print 'the ',a+1,' time acmodel best acc is: ', best734 cnn_acc = np.float(np.sum(np.equal(tag_queue2_rlabels,np.array(tag_queue2_labels)))) / GOOD_SIZE735 log_file.write("the " + str(a+1) + "time acmodel best acc is " + str(best))736 log_file.write("\n")737 log_file.write("the " + str(a+1) + "time cnn_que acc is " + str(cnn_acc))738 log_file.write("\n")739 log_file.close()740 return741#############################################################################################742#test and generate confusion matrixes/precision/recall/f1 for each model743def Test_model_process():744 file_test = FILE_PATH + Experiment_NAME + '_x_test.txt'745# file_test2 = FILE_PATH + Experiment_NAME + '_x_oracle.txt'746 ac_methods = ['_ALBT_','_ALBT_man_','_ALST_','_ALRA_'] #_ALBT_,_ALST_,_ALRA_747# OList = unit.LoadCarTxT(file_test2)748 TestList = unit.LoadCarTxT(file_test)749# OData, OLabels = unit.Getlist(OList)750 TestData, TestLabels = unit.Getlist(TestList)751# TestData = np.concatenate((TestData,OData))752# TestLabels = np.concatenate((TestLabels, OLabels))753 754 batch = tf.placeholder(tf.float32,[None,unit.H,unit.W,unit.Channel])755 label = tf.placeholder(tf.uint8,[None])756 keep_prop = tf.placeholder(tf.float32)757 758 feature = SharePart(batch,keep_prop)759 result = MissionPart(feature)760 loss = SoftmaxWithLoss(result,label)761 acc = Test(result,label)762 opt = train_net(loss)763 764 softmax = tf.nn.softmax(result)765 entropy = tf.reduce_sum(-softmax * tf.log(softmax),1)766 predict_class = tf.cast(tf.argmax(result,axis = 1),tf.uint8)767 768 saver = tf.train.Saver()769 config = tf.ConfigProto()770 config.gpu_options.per_process_gpu_memory_fraction = 0.9771 config.gpu_options.allow_growth = False772 init = tf.global_variables_initializer()773 sess = tf.Session(config = config)774 sess.run(init)775 776# confunsion_matix = []777# file_report = open(MODEL_PATH+'confusion_matrix/'+Experiment_NAME+'_pre_'+'report.txt','w')778# recall = []779# precision = []780# f1 = []781# support = []782# accuracy = 0783# c_m_t = np.zeros([CLASS_NUM,CLASS_NUM])784# saver.restore(sess,MODEL_PATH+PRETRAIN_MODEL_NAME)785# for i in range(len(TestData)):786# predict_label = sess.run(predict_class,feed_dict = { batch:unit.changeshape_1(TestData[i]),keep_prop:1.0})787# c_m_t[TestLabels[i],predict_label] += 1788# confunsion_matix.append(c_m_t)789# np.save(MODEL_PATH+'confusion_matrix/'+Experiment_NAME+'_pre'+'.npy',c_m_t)790# file_report.write(" stage " + "confusion_matrix with testsets\n" )791# file_report.write(" "+"precision".rjust(10,)+"recall".rjust(10,)+"f1-score".rjust(10,)+"support".rjust(10,)+'\n')792# plotconfusion(c_m_t,MODEL_PATH+'confusion_matrix/'+Experiment_NAME+'_pre',CLASS_NUM)793# for i in range(CLASS_NUM):794# accuracy += c_m_t[i,i]795# try:796# recall.append(round(c_m_t[i,i]/np.sum(c_m_t[i]),3))797# except:798# recall.apprend(round(0,3))799# try:800# precision.append(round(c_m_t[i,i]/np.sum(c_m_t[:,i]),3))801# except:802# precision.append(round(0,3))803# try:804# f1.append(round(2*recall[i]*precision[i]/(recall[i]+precision[i]),3))805# except:806# f1.append(round(0,3))807# support.append(np.sum(c_m_t[i]))808# file_report.write(str(i).rjust(10,)+str(precision[i]).rjust(10,)+str(recall[i]).rjust(10,)+str(f1[i]).rjust(10,)+str(support[i]).rjust(10,)+'\n')809# try:810# recall_avg = round(np.sum(np.array(recall))/CLASS_NUM,3)811# except:812# recall_avg = 0813# try:814# precision_avg = round(np.sum(np.array(precision))/CLASS_NUM,3)815# except:816# precision_avg = 0817# try:818# f1_avg = round(np.sum(np.array(f1))/CLASS_NUM,3)819# except:820# f1_avg = 0821# support_num = np.sum(np.array(support))822# accuracy = round(accuracy/support_num,5)823# file_report.write("average".rjust(10,)+str(precision_avg).rjust(10,)+str(recall_avg).rjust(10,)+str(f1_avg).rjust(10,)+str(support_num).rjust(10,)+'\n')824# file_report.write(" stage acc is " +str(accuracy))825# file_report.write("\n\n\n\n")826# file_report.close()827#828 for ac_method in ac_methods:829 file_report = open('./confusion_matrix/'+Experiment_NAME+ac_method+'report.txt','w')830 for a in range(ACTIVE_TIME):831 recall = []832 precision = []833 f1 = []834 support = []835 accuracy = 0836 c_m_t = np.zeros([CLASS_NUM,CLASS_NUM])837 saver.restore(sess,MODEL_PATH+Experiment_NAME+ac_method+str(a+1)+'.ckpt')838 for i in range(len(TestData)):839 predict_label = sess.run(predict_class,feed_dict = { batch:unit.changeshape_1(TestData[i]),keep_prop:1.0})840 c_m_t[TestLabels[i],predict_label] += 1841 confunsion_matix.append(c_m_t)842 np.save('./confusion_matrix/'+Experiment_NAME+ac_method+str(a+1)+'.npy',c_m_t)843# np.save('./confusion_matrix/'+Experiment_NAME+'_pre'+'.npy',c_m_t)844 file_report.write(str(a+1) + " stage " + "confusion_matrix with testsets\n" )845 file_report.write(" "+"precision".rjust(10,)+"recall".rjust(10,)+"f1-score".rjust(10,)+"support".rjust(10,)+'\n')846 plotconfusion(c_m_t,'./confusion_matrix/'+Experiment_NAME+ac_method+str(a+1),CLASS_NUM)847# plotconfusion(c_m_t,'./confusion_matrix/'+Experiment_NAME+'_pre',CLASS_NUM)848 for i in range(CLASS_NUM):849 accuracy += c_m_t[i,i]850 try:851 recall.append(round(c_m_t[i,i]/np.sum(c_m_t[i]),3))852 except:853 recall.apprend(round(0,3))854 try:855 precision.append(round(c_m_t[i,i]/np.sum(c_m_t[:,i]),3))856 except:857 precision.append(round(0,3))858 try:859 f1.append(round(2*recall[i]*precision[i]/(recall[i]+precision[i]),3))860 except:861 f1.append(round(0,3))862 support.append(np.sum(c_m_t[i]))863 file_report.write(str(i).rjust(10,)+str(precision[i]).rjust(10,)+str(recall[i]).rjust(10,)+str(f1[i]).rjust(10,)+str(support[i]).rjust(10,)+'\n')864 try:865 recall_avg = round(np.sum(np.array(recall))/CLASS_NUM,3)866 except:867 recall_avg = 0868 try:869 precision_avg = round(np.sum(np.array(precision))/CLASS_NUM,3)870 except:871 precision_avg = 0872 try:873 f1_avg = round(np.sum(np.array(f1))/CLASS_NUM,3)874 except:875 f1_avg = 0876 support_num = np.sum(np.array(support))877 accuracy = round(accuracy/support_num,5)878 file_report.write("average".rjust(10,)+str(precision_avg).rjust(10,)+str(recall_avg).rjust(10,)+str(f1_avg).rjust(10,)+str(support_num).rjust(10,)+'\n')879 file_report.write(str(a+1) + " stage acc is " +str(accuracy))880 file_report.write("\n\n\n\n")881 file_report.close()882 883 return 884 885if __name__=='__main__':886# data_process()887# pretrain()888# random_active()889# entropy_active()890# bidirectional_active()891# bidirectional_active_expert()...
book.py
Source:book.py
1#-*- coding: utf-8 -*-2import time3import Queue4import threading5import requests6from bs4 import BeautifulSoup7THREAD_NUM = 108PAGES_PER_TAG = 109class Book:10 def __init__(self):11 self.tag_list = []12 self.tag_queue = Queue.Queue()13 self.tag_url = "http://book.douban.com/tag/?view=cloud"14 self.book_url = "http://www.douban.com/tag/this_is_tag/book"15 self.headers1 = {16 'Host':'book.douban.com',17 'Connection':'keep-alive',18 'Accept-Language':'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',19 'Referer':'http://book.douban.com/tag/?view=type&icn=index-sorttags-all',20 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',21 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36',22 'Cookie':'bid="U5k+t1Oea4U"; ll="108296"; push_noty_num=0; push_doumail_num=0; ps=y; ap=1; _pk_ref.100001.3ac3=%5B%22%22%2C%22%22%2C1439183313%2C%22http%3A%2F%2Fwww.douban.com%2F%22%5D; __utmt_douban=1; __utmt=1; _pk_id.100001.3ac3=303b99e5664ea9f0.1439179156.2.1439184790.1439179283.; _pk_ses.100001.3ac3=*; __utma=30149280.318137177.1438926241.1439179150.1439183313.7; __utmb=30149280.12.10.1439183313; __utmc=30149280; __utmz=30149280.1438926241.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmv=30149280.12292; __utma=81379588.1359695583.1439179155.1439179155.1439183313.2; __utmb=81379588.10.10.1439183313; __utmc=81379588; __utmz=81379588.1439179155.1.1.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/'23 }24 self.headers2 = {25 'Host':'www.douban.com',26 'Connection':'keep-alive',27 'Accept-Language':'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',28 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',29 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36',30 'Cookie':'bid="U5k+t1Oea4U"; ll="108296"; push_noty_num=0; push_doumail_num=0; ps=y; _pk_id.100001.8cb4=ae3aa95b83ce68de.1438926236.5.1439186874.1439179318.; __utma=30149280.318137177.1438926241.1439179150.1439183313.7; __utmc=30149280; __utmz=30149280.1438926241.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmv=30149280.12292; ap=1'31 }32 def getTags(self):33 response = requests.get(self.tag_url, headers = self.headers1)34 soup = BeautifulSoup(response.content, "html.parser")35 div = soup.find('div', {'class':"indent tag_cloud"})36 for tag in div.find_all('a'):37 self.tag_queue.put(tag.contents[0].strip().encode('utf8'))38 self.tag_list.append(tag.contents[0].strip().encode('utf8'))39 open("books/Book_tags.txt", "w+").write('Tags number: ' + str(len(self.tag_list)) + '\n' + '\n'.join(self.tag_list))40 def getBooks(self):41 self.finished = 042 thread_list = []43 for i in range(THREAD_NUM):44 thread_list.append(threading.Thread(target = self.threadFunc))45 thread_list.append(threading.Thread(target = self.getTagsRemainCount))46 for t in thread_list:47 t.start()48 for t in range(THREAD_NUM):49 thread_list[t].join()50 self.finished = 151 def getTagsRemainCount(self):52 size = tag_queue_size = self.tag_queue.qsize()53 print "%d tags remained..." % size54 while not self.tag_queue.empty():55 if self.finished == 1:56 return57 if size != tag_queue_size:58 print "%d tags remained..." % size59 tag_queue_size = size60 time.sleep(1)61 size = self.tag_queue.qsize()62 def threadFunc(self):63 while not self.tag_queue.empty():64 tag = self.tag_queue.get()65 book_list = []66 url = self.book_url.replace("this_is_tag", tag)67 for i in range(PAGES_PER_TAG):68 params = {'start': i * 15}69 response = requests.get(url, params = params, headers = self.headers2)70 soup = BeautifulSoup(response.content, "html.parser")71 div = soup.find('div', {'class': 'mod book-list'})72 for info in div.find_all('dd'):73 book_id, book_title, book_desc, book_rate = '', '', '', '0.0'74 try:75 book_id = info.a['href'].split('/')[-2].encode('utf8')76 book_title = info.a.contents[0].strip().encode('utf8')77 try:78 book_desc = info.find('div', {'class':'desc'}).contents[0].strip().encode('utf8')79 book_rate = info.find('span', {'class':'rating_nums'}).contents[0].encode('utf8')80 except:81 pass82 except Exception, e:83 print e84 book_list.append((book_rate, book_id, book_title, book_desc))85 open("books/"+tag+".txt", "w+").write('Book number: ' + str(len(book_list)) + '\n' + '\n'.join(['\t'.join(b) for b in book_list]))86if __name__ =='__main__':87 b = Book()88 b.getTags()...
toggle_emph.py
Source:toggle_emph.py
1# Copyright (c) 2021. Kenneth A. Grady2# See BSD-2-Clause-Patent license in LICENSE.txt3# Additional licenses are in the license folder.4""" """5__author__ = "Kenneth A. Grady"6__version__ = "0.1.0a0"7__maintainer__ = "Kenneth A. Grady"8__email__ = "gradyken@msu.edu"9__date__ = "2020-8-17"10__name__ = "Contents.Library.control_words_symbols.toggle_emph"11# From standard libraries12import logging13from typing import Tuple14import build_output_file15log = logging.getLogger(__name__)16def processor(tag_info: dict, main_dict: dict) -> Tuple[dict, dict]:17 cw_text = tag_info["cw_text"]18 controlword = main_dict[cw_text]19 controlword_val = tag_info["cw_value"]20 tag_queue = main_dict["tag_queue"]21 if controlword == "open" and controlword_val == "" and not tag_queue:22 log.info(msg="1")23 main_dict["update_output"] = ""24 main_dict["tag_queue"].append(main_dict["tags"]["cw_text"][1])25 elif controlword == "open" and controlword_val == "" and tag_queue:26 log.info(msg="2")27 main_dict["update_output"] = ''.join(main_dict["tag_queue"][::-1])28 main_dict["tag_queue"] = []29 open_str_empty = main_dict["tags"]["toggle_emph"][0]30 open_str = open_str_empty.replace("zzz", tag_info["cw_text"])31 main_dict["update_output"] = main_dict["update_output"] + open_str32 main_dict["tag_queue"].append(main_dict["tags"]["toggle_emph"][1])33 elif controlword == "open" and controlword_val != "" and not tag_queue:34 log.info(msg="3")35 main_dict["update_output"] = main_dict["tags"]["toggle_emph"][1]36 main_dict["cw_text"] = "closed"37 elif controlword == "open" and controlword_val != "" and tag_queue:38 log.info(msg="4")39 main_dict["update_output"] = ''.join(main_dict["tag_queue"][::-1])40 main_dict["update_output"] = main_dict["tags"]["toggle_emph"][1]41 main_dict["tag_queue"] = []42 main_dict["cw_text"] = "closed"43 elif controlword == "closed" and controlword_val == "" and not tag_queue:44 log.info(msg="5")45 open_str_empty = main_dict["tags"]["toggle_emph"][0]46 open_str = open_str_empty.replace("zzz", tag_info["cw_text"])47 main_dict["update_output"] = open_str48 main_dict["tag_queue"].append(main_dict["tags"]["toggle_emph"][1])49 elif controlword == "closed" and controlword_val == "" and tag_queue:50 log.info(msg="6")51 main_dict["update_output"] = ''.join(main_dict["tag_queue"][::-1])52 main_dict["tag_queue"] = []53 open_str_empty = main_dict["tags"]["toggle_emph"][0]54 open_str = open_str_empty.replace("zzz", tag_info["cw_text"])55 main_dict["update_output"] = main_dict["update_output"] + open_str56 main_dict["tag_queue"].append(main_dict["tags"]["toggle_emph"][1])57 elif controlword == "closed" and controlword_val != "" and not tag_queue:58 log.debug(msg="7")59 main_dict["update_output"] = ""60 elif controlword == "closed" and controlword_val != "" and tag_queue:61 log.info(msg="8")62 main_dict["update_output"] = ''.join(main_dict["tag_queue"][::-1])63 main_dict["tag_queue"] = []64 build_output_file.processor(main_dict=main_dict)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!