Best Python code snippet using uiautomator
cosine_esamodel_test.py
Source:cosine_esamodel_test.py
1#!/usr/bin/env python2# -*- coding: utf-8 -*-3'''4The MIT License (MIT)5Copyright (c) 2012-2013 Karsten Jeschkies <jeskar@web.de>6Permission is hereby granted, free of charge, to any person obtaining a copy of 7this software and associated documentation files (the "Software"), to deal in 8the Software without restriction, including without limitation the rights to use, 9copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the 10Software, and to permit persons to whom the Software is furnished to do so, 11subject to the following conditions:12The above copyright notice and this permission notice shall be included in all 13copies or substantial portions of the Software.14THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 15INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 16PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 17HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 19SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.20'''21'''22Created on 23.11.201223@author: karsten jeschkies <jeskar@web.de>24'''25from cosine_esamodel import CosineEsaModel, DocumentTitles26from feature_extractor.extractors import (EsaFeatureExtractor, 27 TfidfFeatureExtractor,28 LdaFeatureExtractor)29from gensim import utils, matutils30from gensim.corpora import Dictionary, MmCorpus31from gensim.models import tfidfmodel32import itertools33import logging34from models.mongodb_models import (Article, Features, User, UserModel, 35 RankedArticle, ReadArticleFeedback)36from mongoengine import *37import numpy as np38import unittest 39from utils.helper import load_config40from random import sample41from sets import Set42from smote import SMOTE, borderlineSMOTE43import sys44logger = logging.getLogger("unittesting")45# set up vars used in testing ("Deerwester" from the web tutorial)46texts = [['human', 'interface', 'computer'], #human interface47 ['survey', 'user', 'computer', 'system', 'response', 'time'], #computer systems48 ['eps', 'user', 'interface', 'system'], #eps49 ['system', 'human', 'system', 'eps'], #human systems50 ['user', 'response', 'time'], #response time51 ['trees'], #trees52 ['graph', 'trees'], #graph53 ['graph', 'minors', 'trees'], #minor tress54 ['graph', 'minors', 'survey']] #minors survey55dictionary = Dictionary(texts)56corpus = [dictionary.doc2bow(text) for text in texts]57concepts = ['human interface', 'computer systems', 'eps', 'human systems',58 'response time', 'tress', 'graph', 'minors tress', 'minors survey']59test_corpus_texts = [['graph', 'minors', 'eps'],60 ['human', 'system', 'computer'],61 ['user', 'system', 'human']62 ]63test_corpus = [dictionary.doc2bow(text) for text in test_corpus_texts]64UNREAD = 065READ = 166def get_features(article, extractor):67 '''68 Reaturns full features vector from article.69 Article should be a mongodb model70 '''71 #check if features of article are current version72 try:73 feature_version = article.features.version74 except AttributeError as e:75 if str(e) == 'features':76 logger.error("Article %s does not have any features." % 77 article.id)78 #article seems not to exist anymore go on79 raise 80 81 if feature_version != extractor.get_version():82 clean_content = article.clean_content83 84 #get new features85 features = extractor.get_features(clean_content)86 else:87 features = article.features.data88 89 #sparse2full converts list of 2-tuples to numpy array90 article_features_as_full_vec = matutils.sparse2full(features, 91 extractor.get_feature_number())92 93 return article_features_as_full_vec94def get_samples(extractor,95 read_article_ids, 96 unread_article_ids,97 p_synthetic_samples = 300,98 p_majority_samples = 500,99 k = 5):100 '''101 read_article_ids : Set102 unread_article_ids : Set103 n_synthetic_samples : Percentage of snythetic samples, 300 for 300%104 k : neighbourhood for k nearest neighbour, standard 5105 Returns106 -------107 array-like full vector samples, shape = [n_features, n_samples]108 array-like marks, shape = [n_samples]109 '''110 111 #Under-sample unread ids112 unread_article_ids = Set(sample(unread_article_ids, 113 min(p_majority_samples/100 * len(read_article_ids), 114 len(unread_article_ids))115 )116 )117 118 #Create unread article vectors119 unread_marks = np.empty(len(unread_article_ids))120 unread_marks.fill(UNREAD)121 unread_articles = np.empty(shape=(len(unread_article_ids), 122 extractor.get_feature_number()))123 124 125 for i, article in enumerate(Article.objects(id__in = unread_article_ids)):126 try:127 article_features_as_full_vec = get_features(article, extractor)128 unread_articles[i,:] = article_features_as_full_vec[:]129 except AttributeError as e:130 logger.error("Article %s does not have attribute: %s." 131 % (article.id, e)) 132 133 #Create read article vectors134 read_marks = np.empty(len(read_article_ids))135 read_marks.fill(READ) 136 read_articles = np.empty(shape=(len(read_article_ids), 137 extractor.get_feature_number()))138 139 for i, article in enumerate(Article.objects(id__in = read_article_ids)):140 try:141 article_features_as_full_vec = get_features(article, extractor)142 read_articles[i,:] = article_features_as_full_vec[:]143 except AttributeError as e:144 logger.error("Article %s does not have attribute: %s." 145 % (article.id, e)) 146 147 #SMOTE sample minorities148 #synthetic_read_articles = SMOTE(read_articles, p_synthetic_samples, k) 149 150 #borderlineSMOTE sample minorites151 X = np.concatenate((read_articles, unread_articles)) 152 y = np.concatenate((read_marks, unread_marks))153 new_read_articles, synthetic_read_articles, danger_read_articles = borderlineSMOTE(X = X,154 y = y,155 minority_target = READ,156 N = p_synthetic_samples, k = k)157 158 #Create synthetic read samples159 synthetic_marks = np.zeros(len(synthetic_read_articles))160 synthetic_marks.fill(READ) 161 162 read_marks = np.empty(len(new_read_articles))163 read_marks.fill(READ) 164 165 danger_read_marks = np.empty(len(danger_read_articles))166 danger_read_marks.fill(READ) 167 168 logger.info("Use %d read, %d unread, %d danger reads and %d synthetic samples." %169 (len(read_marks), len(unread_marks), 170 len(danger_read_marks), len(synthetic_marks)))171 172 return (np.concatenate((new_read_articles, 173 synthetic_read_articles, 174 danger_read_articles,175 unread_articles)),176 np.concatenate((read_marks, 177 synthetic_marks, 178 danger_read_marks,179 unread_marks))180 )181class TestCosineESAModel(unittest.TestCase):182 def setUp(self):183 logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', 184 level=logging.DEBUG)185 186 self.config_ = load_config(("/media/sdc1/Aptana Studio 3 Workspace/"187 "configs/config.yaml"), 188 logger, 189 exit_with_error = True)190 191 if self.config_ == None:192 logger.error("No config. Exit.")193 sys.exit(1)194 def tearDown(self):195 pass196 @unittest.skip("Skip small test")197 def test_constructor(self):198 #create tf-idf model199 tfidf_model = tfidfmodel.TfidfModel(corpus, normalize=True)200 201 #transform corpus202 tfidf_corpus = tfidf_model[corpus]203 204 #train esa model205 esa_model = CosineEsaModel(tfidf_corpus,206 document_titles = concepts,207 test_corpus = test_corpus, 208 test_corpus_targets = [1,2,2],209 num_test_corpus = 3,210 num_best_features = 2, 211 num_features = len(dictionary))212 213 test_doc = ['graph', 'minors', 'trees']#['user', 'computer', 'time']#214 tfidf_test_doc = tfidf_model[dictionary.doc2bow(test_doc)]215 216 #transform test doc to esa217 esa_test_doc = esa_model[tfidf_test_doc]218 219 print esa_test_doc220 #for concept_id, weight in sorted(esa_test_doc, key=lambda item: -item[1]):221 # print "%s %.3f" % (esa_model.document_titles[concept_id], weight)222 223 #@unittest.skip("Skip bigger test") 224 def test_constructor_with_file_wikicorpus(self):225 226 #load tf-idf model227 tfidf_model = tfidfmodel.TfidfModel.load("/media/sdc1/test_dump/result/test_tfidf.model")228 extractor = TfidfFeatureExtractor("/media/sdc1/test_dump/result/test")229 230 #load tf-idf corpus231 tfidf_corpus = MmCorpus('/media/sdc1/test_dump/result/test_tfidf_corpus.mm')232 233 #load lda corpus234 #lda_corpus = MmCorpus('/media/sdc1/test_dump/result/test_lda_corpus.mm')235 236 #load dictionary237 id2token = Dictionary.load("/media/sdc1/test_dump/result/test_wordids.dict")238 239 #load article titles240 document_titles = DocumentTitles.load("/media/sdc1/test_dump/result/test_articles.txt")241 242 #Connect to mongo database243 connect(self.config_['database']['db-name'], 244 username= self.config_['database']['user'], 245 password= self.config_['database']['passwd'], 246 port = self.config_['database']['port'])247 248 #Load articles as test corpus249 user = User.objects(email=u"jeskar@web.de").first()250 251 ranked_article_ids = (a.article.id 252 for a 253 in RankedArticle.objects(user_id = user.id).only("article"))254 all_article_ids = Set(a.id 255 for a 256 in Article.objects(id__in = ranked_article_ids).only("id"))257 258 read_article_ids = Set(a.article.id 259 for a 260 in ReadArticleFeedback.objects(user_id = user.id).only("article"))261 262 unread_article_ids = all_article_ids - read_article_ids263 #sample test articles264 X, y = get_samples(extractor, read_article_ids, unread_article_ids)265 266 s,f = X.shape267 logger.debug("Traning with %d samples, %d features, %d marks" % 268 (s,f, len(y)))269 #train esa model270 esa_model = CosineEsaModel(tfidf_corpus, 271 document_titles = document_titles,272 test_corpus = X, 273 test_corpus_targets = y, 274 num_test_corpus = len(y),275 num_best_features = 15,276 num_features = len(id2token))277 278 print esa_model279 280 esa_model.save('/media/sdc1/test_dump/result/test_cesa.model')281 282 tmp_esa = CosineEsaModel.load('/media/sdc1/test_dump/result/test_cesa.model') 283 print tmp_esa 284 285 @unittest.skip("too big")286 def test_constructor_with_big_file_wikicorpus(self):287 288 #load tf-idf corpus289 tfidf_corpus = MmCorpus('/media/sdc1/test_dump/result/wiki_tfidf_corpus.mm')290 291 #load lda corpus292 #lda_corpus = MmCorpus('/media/sdc1/test_dump/result/test_lda_corpus.mm')293 294 #load dictionary295 id2token = Dictionary.load("/media/sdc1/test_dump/result/wiki_wordids.dict")296 297 #load article titles298 document_titles = DocumentTitles.load("/media/sdc1/test_dump/result/wiki_articles.txt")299 #train esa model300 esa_model = EsaModel(tfidf_corpus, num_clusters = 15, 301 document_titles = document_titles,302 num_features = len(id2token))303 304 print esa_model305 306 esa_model.save('/media/sdc1/test_dump/result/wiki_cesa.model')307 308 tmp_esa = EsaModel.load('/media/sdc1/test_dump/result/wiki_cesa.model') 309 print tmp_esa 310 311 312if __name__ == "__main__":313 #import sys;sys.argv = ['', 'Test.testName']...
test_esa_model.py
Source:test_esa_model.py
1#!/usr/bin/env python2# -*- coding: utf-8 -*-3'''4The MIT License (MIT)5Copyright (c) 2012-2013 Karsten Jeschkies <jeskar@web.de>6Permission is hereby granted, free of charge, to any person obtaining a copy of 7this software and associated documentation files (the "Software"), to deal in 8the Software without restriction, including without limitation the rights to use, 9copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the 10Software, and to permit persons to whom the Software is furnished to do so, 11subject to the following conditions:12The above copyright notice and this permission notice shall be included in all 13copies or substantial portions of the Software.14THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 15INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 16PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 17HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 19SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.20'''21'''22@author: Karsten Jeschkies <jeskar@web.de>23Unittests for the implementation of the ESA model.24The tests are not complete and were used for debugging.25'''26from esamodel import EsaModel, DocumentTitles27from gensim.corpora import Dictionary, MmCorpus28from gensim.models import tfidfmodel29import logging30import unittest 31logger = logging.getLogger("unittesting")32# set up vars used in testing ("Deerwester" from the web tutorial)33texts = [['human', 'interface', 'computer'], #human interface34 ['survey', 'user', 'computer', 'system', 'response', 'time'], #computer systems35 ['eps', 'user', 'interface', 'system'], #eps36 ['system', 'human', 'system', 'eps'], #human systems37 ['user', 'response', 'time'], #response time38 ['trees'], #trees39 ['graph', 'trees'], #graph40 ['graph', 'minors', 'trees'], #minor tress41 ['graph', 'minors', 'survey']] #minors survey42dictionary = Dictionary(texts)43corpus = [dictionary.doc2bow(text) for text in texts]44concepts = ['human interface', 'computer systems', 'eps', 'human systems',45 'response time', 'tress', 'graph', 'minors tress', 'minors survey']46class TestESAModel(unittest.TestCase):47 def setUp(self):48 logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', 49 level=logging.DEBUG)50 def tearDown(self):51 pass52 @unittest.skip("Skip small test")53 def test_constructor(self):54 #create tf-idf model55 tfidf_model = tfidfmodel.TfidfModel(corpus, normalize=True)56 57 #transform corpus58 tfidf_corpus = tfidf_model[corpus]59 60 #train esa model61 esa_model = EsaModel(tfidf_corpus, num_clusters = 9, 62 document_titles = concepts,63 num_features = len(dictionary))64 65 print "%s\n" % str(esa_model)66 67 test_doc = ['user', 'computer', 'time']68 tfidf_test_doc = tfidf_model[dictionary.doc2bow(test_doc)]69 70 #transform test doc to esa71 esa_test_doc = esa_model[tfidf_test_doc]72 73 for concept_id, weight in sorted(esa_test_doc, key=lambda item: -item[1]):74 print "%s %.3f" % (esa_model.document_titles[concept_id], weight)75 76 #@unittest.skip("Skip bigger test") 77 def test_constructor_with_file_wikicorpus(self):78 79 #load tf-idf corpus80 tfidf_corpus = MmCorpus('/media/sdc1/test_dump/result/test_tfidf_corpus.mm')81 82 #load lda corpus83 #lda_corpus = MmCorpus('/media/sdc1/test_dump/result/test_lda_corpus.mm')84 85 #load dictionary86 id2token = Dictionary.load("/media/sdc1/test_dump/result/test_wordids.dict")87 88 #load article titles89 document_titles = DocumentTitles.load("/media/sdc1/test_dump/result/test_articles.txt")90 #train esa model91 esa_model = EsaModel(tfidf_corpus, num_clusters = 15, 92 document_titles = document_titles,93 num_features = len(id2token))94 95 print esa_model96 97 esa_model.save('/media/sdc1/test_dump/result/wiki_esa.model')98 99 tmp_esa = EsaModel.load('/media/sdc1/test_dump/result/wiki_esa.model') 100 print tmp_esa 101 102 @unittest.skip("too big")103 def test_constructor_with_big_file_wikicorpus(self):104 105 #load tf-idf corpus106 tfidf_corpus = MmCorpus('/media/sdc1/test_dump/result/wiki_tfidf_corpus.mm')107 108 #load lda corpus109 #lda_corpus = MmCorpus('/media/sdc1/test_dump/result/test_lda_corpus.mm')110 111 #load dictionary112 id2token = Dictionary.load("/media/sdc1/test_dump/result/wiki_wordids.dict")113 114 #load article titles115 document_titles = DocumentTitles.load("/media/sdc1/test_dump/result/wiki_articles.txt")116 #train esa model117 esa_model = EsaModel(tfidf_corpus, num_clusters = 15, 118 document_titles = document_titles,119 num_features = len(id2token))120 121 print esa_model122 123 esa_model.save('/media/sdc1/test_dump/result/wiki_esa.model')124 125 tmp_esa = EsaModel.load('/media/sdc1/test_dump/result/wiki_esa.model') 126 print tmp_esa 127 128 129if __name__ == "__main__":130 #import sys;sys.argv = ['', 'Test.testName']...
test_pg2ogr.py
Source:test_pg2ogr.py
1import os2import shutil3import tempfile4import unittest5import fiona6import pgdata7URL = 'postgresql://postgres:postgres@localhost:5432/pgdata'8DB = pgdata.connect(URL)9DB.execute('CREATE SCHEMA IF NOT EXISTS pgdata')10DATA_1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')11AIRPORTS = os.path.join(DATA_1, 'bc_airports.json')12# also test a path with spaces13DATA_2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data space')14AIRPORTS_2 = os.path.join(DATA_2, 'bc_airports_one.json')15class ogrpg(unittest.TestCase):16 def setUp(self):17 self.tempdir = tempfile.mkdtemp()18 self.spaced_dir = tempfile.mkdtemp("spa ced")19 def test_ogr2pg(self):20 db = DB21 db.ogr2pg(AIRPORTS, in_layer='bc_airports', out_layer='bc_airports',22 schema='pgdata')23 airports = db['pgdata.bc_airports']24 assert 'physical_address' in airports.columns25 assert sum(1 for _ in airports.all()) == 42526 def test_ogr2pg_noindex(self):27 db = DB28 db.ogr2pg(AIRPORTS, in_layer='bc_airports', out_layer='bc_airports_noindex', schema='pgdata', index=False)29 airports = db['pgdata.bc_airports_noindex']30 assert len(airports.indexes) == 031 def test_ogr2pg_sql(self):32 db = DB33 db.ogr2pg(AIRPORTS, in_layer='bc_airports', out_layer='bc_airports_sql', schema='pgdata', sql="AIRPORT_NAME='Terrace (Northwest Regional) Airport'")34 airports = db['pgdata.bc_airports_sql']35 assert 'physical_address' in airports.columns36 assert sum(1 for _ in airports.all()) == 137 def test_ogr2pg_spaces(self):38 db = DB39 db.ogr2pg(AIRPORTS_2, in_layer='bc_airports', out_layer='bc_airports_spaced',40 schema='pgdata')41 airports = db['pgdata.bc_airports_spaced']42 assert 'physical_address' in airports.columns43 assert sum(1 for _ in airports.all()) == 144 def test_pg2ogr_spaces(self):45 db = DB46 db.pg2ogr(sql='SELECT * from pgdata.bc_airports_spaced', driver='GeoJSON', outfile=os.path.join(self.spaced_dir, 'test_dump_spaced.json'))47 c = fiona.open(os.path.join(self.spaced_dir, 'test_dump_spaced.json'), 'r')48 assert len(c) == 149 def test_pg2geojson(self):50 db = DB51 db.pg2ogr(sql='SELECT * FROM pgdata.bc_airports LIMIT 10', driver='GeoJSON',52 outfile=os.path.join(self.tempdir, 'test_dump.json'))53 c = fiona.open(os.path.join(self.tempdir, 'test_dump.json'), 'r')54 assert len(c) == 1055 def test_pg2gpkg(self):56 db = DB57 db.pg2ogr(sql='SELECT * FROM pgdata.bc_airports LIMIT 10', driver='GPKG',58 outfile=os.path.join(self.tempdir, 'test_dump.gpkg'),59 outlayer='bc_airports')60 c = fiona.open(os.path.join(self.tempdir, 'test_dump.gpkg'), 'r')61 assert len(c) == 1062 def test_pg2gpkg_update(self):63 db = DB64 db.pg2ogr(sql='SELECT * FROM pgdata.bc_airports LIMIT 10', driver='GPKG',65 outfile=os.path.join(self.tempdir, 'test_dump.gpkg'),66 outlayer='bc_airports')67 db.pg2ogr(sql='SELECT * FROM pgdata.bc_airports LIMIT 10', driver='GPKG',68 outfile=os.path.join(self.tempdir, 'test_dump.gpkg'),69 outlayer='bc_airports_2')70 layers = fiona.listlayers(os.path.join(self.tempdir, 'test_dump.gpkg'))71 assert len(layers) == 272 def test_pg2ogr_append(self):73 db = DB74 db.pg2ogr(sql='SELECT * FROM pgdata.bc_airports LIMIT 10', driver='GPKG',75 outfile=os.path.join(self.tempdir, 'test_dump.gpkg'),76 outlayer='bc_airports')77 db.pg2ogr(sql='SELECT * FROM pgdata.bc_airports LIMIT 10', driver='GPKG',78 outfile=os.path.join(self.tempdir, 'test_dump.gpkg'),79 outlayer='bc_airports', append=True)80 c = fiona.open(os.path.join(self.tempdir, 'test_dump.gpkg'), 'r')81 assert len(c) == 2082 def tearDown(self):83 shutil.rmtree(self.tempdir)84 shutil.rmtree(self.spaced_dir)85def test_tearDown():...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!