Best Python code snippet using autotest_python
workflow_example.py
Source: workflow_example.py
...15 'input_expected': False}16 conf_job4 = {'method_name': 'export_csv',17 'args': ['result']}18 twitter_plugin = JobCreator(plugin=PluginsEnum.TwitterPlugin)19 job1 = twitter_plugin.create_job(conf_job1)20 job2 = twitter_plugin.create_job(conf_job2)21 job3 = twitter_plugin.create_job(conf_job3)22 csv_plugin = JobCreator(plugin=PluginsEnum.CsvPlugin)23 job4 = csv_plugin.create_job(conf_job4)24 flow1 = FlowCreator(inputs=[job1], callbacks=[job2]).create_flow()25 flow2 = FlowCreator(inputs=[job3], callbacks=[job4]).create_flow()26 WorkflowCreator(flows=[flow1, flow2]).execute_workflow()27def visualization_workflow():28 conf = {'method_name': 'get_histogram',29 'args': [], 'input_expected': False}30 visualization_plugin = JobCreator(plugin=PluginsEnum.VisualizationPlugin)31 job = visualization_plugin.create_job(conf)32 flow = FlowCreator(inputs=[job]).create_flow()33 WorkflowCreator(flows=[flow]).execute_workflow()34# def execute_workflow_2(folder_name):35# csv_plugin = JobCreator(plugin=PluginsEnum.CsvPlugin)36# twitter_plugin = JobCreator(plugin=PluginsEnum.TwitterPlugin)37# emoji_plugin = JobCreator(plugin=PluginsEnum.EmojiFilterPlugin)38# ngram_plugin = JobCreator(plugin=PluginsEnum.NGramPlugin)39# converter_plugin = JobCreator(plugin=PluginsEnum.ConverterPlugin)40#41# files_names = FileUtilsPlugin().get_path_files('\\Users\\\willi\\\Documents\\SMM Emojis\\' + folder_name + '\\',42# 'csv')43#44# twitter_config = {'method_name': 'configure',45# 'args': [TWITTER_TOKENS]}46# j1 = twitter_plugin.create_job(twitter_config)47#48# twitter_auth = {'method_name': 'authenticate',49# 'args': [],50# 'input_expected': False}51# j2 = twitter_plugin.create_job(twitter_auth)52#53# get_tweets_by_ids = {'method_name': 'get_tweets_by_ids',54# 'args': []55# }56# j4 = twitter_plugin.create_job(get_tweets_by_ids)57#58# get_emojis = {'method_name': 'filter',59# 'args': ['full_text']60# }61# j5 = emoji_plugin.create_job(get_emojis)62#63# n_gram_frequences = {'method_name': 'extract_ngram_frequencies',64# 'args': [1]65# }66# j6 = ngram_plugin.create_job(n_gram_frequences)67#68# export_csv = {'method_name': 'export_csv',69# 'args': []70# }71# j7 = csv_plugin.create_job(export_csv)72#73# converter_1 = {'method_name': 'merge_lists_of_str_to_str',74# 'args': []}75# converter_2 = {'method_name': 'merge_list_of_strs',76# 'args': []}77# converter_3 = {'method_name': 'convert_list_of_lists_to_list_of_dicts',78# 'args': ['frequence']}79# j8 = converter_plugin.create_job(converter_1)80# j9 = converter_plugin.create_job(converter_2)81# j10 = converter_plugin.create_job(converter_3)82#83# flow_twitter_auth = FlowCreator(jobs=[j1], callback_jobs=[j2]).create_flow()84# flow_get_emojis = FlowCreator(jobs=[j5], callback_jobs=[j8]).create_flow()85# flow_export_csv = FlowCreator(jobs=[j7], callback_jobs=[]).create_flow()86#87# workflow_ls = []88#89# WorkflowCreator(flows=[flow_twitter_auth]).execute_workflow()90# for file_name in files_names:91# import_tweets_ids = {'method_name': 'reading_csv_to_list',92# 'args': [file_name, 'id'],93# 'input_expected': False}94# j3 = csv_plugin.create_job(import_tweets_ids)95# flow_get_tweet_ids = FlowCreator(jobs=[j3], callback_jobs=[j4]).create_flow()96# workflow_ls.append(WorkflowCreator(flows=[flow_get_tweet_ids, flow_get_emojis]).get_workflow())97#98# flow_join_emoji_results = FlowCreator(jobs=workflow_ls, callback_jobs=[j9]).create_flow()99# flow_get_frequences = FlowCreator(jobs=[j6], callback_jobs=[j10]).create_flow()100#101# return WorkflowCreator(flows=[flow_join_emoji_results, flow_get_frequences, flow_export_csv]).execute_workflow()102# def execute_workflow_3():103# csv_plugin = JobCreator(plugin=PluginsEnum.CsvPlugin)104# twitter_plugin = JobCreator(plugin=PluginsEnum.TwitterPlugin)105# emoji_plugin = JobCreator(plugin=PluginsEnum.EmojiFilterPlugin)106# ngram_plugin = JobCreator(plugin=PluginsEnum.NGramPlugin)107# converter_plugin = JobCreator(plugin=PluginsEnum.ConverterPlugin)108# filterdata_plugin = JobCreator(plugin=PluginsEnum.FilterDataPlugin)109#110# files_names = FileUtilsPlugin().get_path_files('\\Users\\\willi\\\Documents\\SMM Emojis\\Hawking Twitter\\', 'csv')111#112# # AUTENTICAÃÃO DO TWITTER113# twitter_config = {'method_name': 'configure',114# 'args': [TWITTER_TOKENS]}115# twitter_auth = {'method_name': 'authenticate',116# 'args': [],117# 'input_expected': False}118# j_twitter_auth1 = twitter_plugin.create_job(conf=twitter_config)119# j_twitter_auth2 = twitter_plugin.create_job(conf=twitter_auth)120#121# # WORKFLOW 1 - EXTRAÃÃO DE EMOJIS DOS TEXTOS DOS TWEETS NÃO TRUNCADOS (SEM RTs)122# filter_data_1 = {'method_name': 'filter_list_of_dicts',123# 'args': [{'text': None, 'truncated': 'FALSE', 'id': None, 'isRetweet': 'FALSE'}]}124# j_filter_1 = filterdata_plugin.create_job(conf=filter_data_1)125#126# filter_emojis = {'method_name': 'filter',127# 'args': ['text']128# }129# j_filter_emojis = emoji_plugin.create_job(conf=filter_emojis)130#131# converter_str1 = {'method_name': 'join_list_of_lists_to_str',132# 'args': []}133# j_results_str = converter_plugin.create_job(conf=converter_str1)134#135# # WORKFLOW 2 - EXTRAÃÃO DE EMOJIS DOS TEXTOS DOS TWEETS TRUNCADOS (SEM RTs) - USO DA API136# # pega apenas os 'truncated' que não são rt, para pegar o texto inteiro137# filter_data_2 = {'method_name': 'filter_list_of_dicts',138# 'args': [{'truncated': 'TRUE', 'id': None, 'isRetweet': 'FALSE'}]}139# j_filter_2 = filterdata_plugin.create_job(conf=filter_data_2)140#141# filter_data_3 = {'method_name': 'filter_list_of_dict_to_list',142# 'args': ['id']}143# j_filter_3 = filterdata_plugin.create_job(conf=filter_data_3)144#145# get_tweets = {'method_name': 'get_tweets_by_ids',146# 'args': []}147# j_get_tweets = twitter_plugin.create_job(conf=get_tweets)148#149# aggregate_results = {'method_name': 'join_list_to_str',150# 'args': []}151# j_aggregate_results = converter_plugin.create_job(conf=aggregate_results)152#153# n_gram_frequences = {'method_name': 'extract_ngram_frequencies',154# 'args': [1]155# }156# j_ngram = ngram_plugin.create_job(conf=n_gram_frequences)157#158# prepare_csv = {'method_name': 'list_of_lists_to_dict',159# 'args': ['frequence']}160# j_prepare_csv = converter_plugin.create_job(conf=prepare_csv)161#162# export_csv = {'method_name': 'export_csv',163# 'args': []164# }165# j_export_result = csv_plugin.create_job(export_csv)166#167# wfs_extract_emojis = []168#169# # flow de autenticacao do twitter170# f_twitter_auth = FlowCreator(jobs=[j_twitter_auth1], callback_jobs=[j_twitter_auth2]).create_flow()171#172# # flow de subworkflows173# f_filter_tweets_1 = FlowCreator(jobs=[j_filter_1]).create_flow()174# f_filter_tweets_2 = FlowCreator(jobs=[j_filter_2], callback_jobs=[j_filter_3]).create_flow()175# f_get_tweets = FlowCreator(jobs=[j_get_tweets]).create_flow()176#177# # flow comum aos dois subworkflows178# f_filter_converter_emojis = FlowCreator(jobs=[j_filter_emojis], callback_jobs=[j_results_str]).create_flow()179#180# f_process_ngrams = FlowCreator(jobs=[j_ngram], callback_jobs=[j_prepare_csv]).create_flow()181# f_export_csv = FlowCreator(jobs=[j_export_result]).create_flow()182#183# WorkflowCreator(flows=[f_twitter_auth]).execute_workflow()184# # Preparando workflows185# wf_not_truncated = WorkflowCreator(flows=[f_filter_tweets_1, f_filter_converter_emojis]).get_workflow()186# wf_truncated = WorkflowCreator(flows=[f_filter_tweets_2, f_get_tweets, f_filter_converter_emojis]).get_workflow()187# for file_name in files_names:188# read_csv = {'method_name': 'reading_csv_to_list',189# 'args': [file_name],190# 'input_expected': False}191# j_read_csv = csv_plugin.create_job(read_csv)192# flow_read_csv = FlowCreator(jobs=[j_read_csv], callback_jobs=[wf_not_truncated, wf_truncated]).create_flow()193# wfs_extract_emojis.append(WorkflowCreator(flows=[flow_read_csv]).get_workflow())194#195# f_emojis_results = FlowCreator(jobs=wfs_extract_emojis, callback_jobs=[j_aggregate_results]).create_flow()196# return WorkflowCreator(flows=[f_emojis_results, f_process_ngrams]).execute_workflow()197# # WorkflowCreator(flows=[f_emojis_results, f_process_ngrams, f_export_csv]).execute_workflow()198# def execute_workflow_4():199# queries = ['site:oglobo.globo.com bolsonaro']200# bing_plugin = JobCreator(plugin=PluginsEnum.BingPlugin)201# filter_data_plugin = JobCreator(plugin=PluginsEnum.FilterDataPlugin)202# converter_plugin = JobCreator(plugin=PluginsEnum.ConverterPlugin)203# stopwords_plugin = JobCreator(plugin=PluginsEnum.StopwordsPlugin)204#205# search_results = {'method_name': 'get_search_results',206# 'args': ['site:oglobo.globo.com bolsonaro', 'pt-BR', 20]}207# job_bing = bing_plugin.create_job(conf=search_results)208#209# filter_data_conf = {'method_name': 'filter_list_of_dict_to_list',210# 'args': ['snippet']}211# job_filter = filter_data_plugin.create_job(conf=filter_data_conf)212#213# converter_conf = {'method_name': 'join_list_to_str',214# 'args': []}215# job_converter = converter_plugin.create_job(conf=converter_conf)216#217# stopwords_conf = {'method_name': '',218# 'args': []}219#220# FlowCreator(jobs=[job_bing], callback_jobs=[job_filter])221# FlowCreator(jobs=[job_converter], callback_jobs=[])222def workflow_get_tweets():223 JobCreator(plugin=PluginsEnum.CsvPlugin)224 JobCreator(plugin=PluginsEnum.FilterDataPlugin)225 JobCreator(plugin=PluginsEnum.ConverterPlugin)226# execute_workflow_2('Hawking Twitter')...
test_project_db.py
Source: test_project_db.py
...12 public=True,13 )14 final_kwargs.update(kwargs)15 return Project(**final_kwargs)16def create_job(**kwargs):17 """ Create a ``Job`` with defaults """18 final_kwargs = dict(19 repo_fs='test',20 commit='test',21 )22 final_kwargs.update(kwargs)23 return Job(**final_kwargs)24class TestProjectsSummary(object):25 """ Ensure ``Project.get_status_summary`` behaves as expected """26 def setup_method(self, _):27 JobStageTmp.query.delete()28 Job.query.delete()29 def teardown_method(self, _):30 JobStageTmp.query.delete()31 Job.query.delete()32 @pytest.mark.parametrize('models,p_filters,exp_s,exp_f,exp_b,exp_i', [33 (34 (35 (create_project('p1'), create_job(result='success')),36 (create_project('p2'), create_job()),37 (create_project('u', utility=True), create_job()),38 ),39 None,40 1, 0, 0, 2,41 ),42 (43 (44 (create_project('p1'), create_job()),45 (create_project('p2'), create_job()),46 (create_project('u', utility=True), create_job(result='success')),47 ),48 None,49 1, 0, 0, 2,50 ),51 (52 (53 (create_project('p1'), create_job(result='success')),54 (create_project('p2'), create_job()),55 (create_project('u', utility=True), create_job(result='success')),56 ),57 None,58 2, 0, 0, 1,59 ),60 (61 (62 (create_project('p1'), create_job(result='fail')),63 (create_project('p2'), create_job()),64 (create_project('u', utility=True), create_job(result='success')),65 ),66 None,67 1, 1, 0, 1,68 ),69 (70 (71 (create_project('p1'), create_job(result='broken')),72 (create_project('p2'), create_job()),73 (create_project('u', utility=True), create_job(result='success')),74 ),75 None,76 1, 0, 1, 1,77 ),78 (79 (80 (create_project('p1'), create_job(result='broken')),81 (create_project('p2'), create_job()),82 (create_project('u', utility=True), create_job(result='success')),83 ),84 {'utility': False},85 0, 0, 1, 1,86 ),87 (88 (89 (create_project('p1'), create_job(result='broken')),90 (create_project('p2'), create_job()),91 (create_project('u', utility=True), create_job(result='success')),92 ),93 {'utility': True},94 1, 0, 0, 0,95 ),96 (97 (98 (create_project('p1'), create_job(result='broken')),99 (create_project('p2'), create_job(result='broken')),100 (create_project('u', utility=True), create_job(result='success')),101 ),102 None,103 1, 0, 2, 0,104 ),105 ])106 def test_it(self, db, models, p_filters, exp_s, exp_f, exp_b, exp_i):107 """ Commit models, assert status summary is accurate """108 for project, *jobs in models:109 DB.session.add(project)110 for job in jobs:111 job.project = project112 DB.session.add(job)113 DB.session.commit()114 assert Project.get_status_summary(p_filters) == dict(...
test_JobManager.py
Source: test_JobManager.py
1import time2from JobManager import *3def test_JobManager_get_jobs():4 jobManager = JobManager()5 id1 = jobManager.create_job("ping 127.0.0.1")6 id2 = jobManager.create_job("ping 127.0.0.1")7 id3 = jobManager.create_job("ping 127.0.0.1")8 id4 = jobManager.create_job("ping 127.0.0.1")9 idarr = jobManager.get_jobs()10 assert(len(idarr) == 4)11 # killed = jobManager.kill_job(id1)12 # assert(killed == True)13 # stdout = jobManager.get_job_output(id2)14 # running = jobManager.get_jobs_running()15 # while (len(running) > 0):16 # for job_id in running:17 # job = jobManager.get_job(job_id)18 # assert(job.status == JobStatus.RUNNING)19 # time.sleep(0.5)20 # assert(len(jobManager.get_jobs_completed()) == 3)21 # assert(len(jobManager.get_jobs_stopped()) == 1)22def test_JobManager_kill_jobs():23 jobManager = JobManager()24 id1 = jobManager.create_job("ping 127.0.0.1")25 id2 = jobManager.create_job("ping 127.0.0.1")26 id3 = jobManager.create_job("ping 127.0.0.1")27 id4 = jobManager.create_job("ping 127.0.0.1")28 time.sleep(2) # Give thread and proc a chance to start29 killed = jobManager.kill_job(id1)30 assert(killed == True)31 assert(len(jobManager.get_jobs_stopped()) == 1)32def test_JobManager_kill_get_jobs_running_completed_stopped():33 jobManager = JobManager()34 id1 = jobManager.create_job("ping 127.0.0.1")35 id2 = jobManager.create_job("ping 127.0.0.1")36 id3 = jobManager.create_job("ping 127.0.0.1")37 id4 = jobManager.create_job("ping 127.0.0.1")38 time.sleep(2) # Give thread and proc a chance to start39 killed = jobManager.kill_job(id1)40 assert(killed == True)41 assert(len(jobManager.get_jobs_stopped()) == 1)42 running = jobManager.get_jobs_running()43 while (len(running) > 0):44 for job_id in running:45 job = jobManager.get_job(job_id)46 assert (job.status == JobStatus.RUNNING)47 running = jobManager.get_jobs_running()48 time.sleep(0.5)49 assert (len(jobManager.get_jobs_completed()) == 3)...
Check out the latest blogs from LambdaTest on this topic:
Before we discuss Scala testing, let us understand the fundamentals of Scala and how this programming language is a preferred choice for your development requirements.The popularity and usage of Scala are rapidly rising, evident by the ever-increasing open positions for Scala developers.
So, now that the first installment of this two fold article has been published (hence you might have an idea of what Agile Testing is not in my opinion), I’ve started feeling the pressure to explain what Agile Testing actually means to me.
Did you know that according to Statista, the number of smartphone users will reach 18.22 billion by 2025? Let’s face it, digital transformation is skyrocketing and will continue to do so. This swamps the mobile app development market with various options and gives rise to the need for the best mobile app testing tools
As a developer, checking the cross browser compatibility of your CSS properties is of utmost importance when building your website. I have often found myself excited to use a CSS feature only to discover that it’s still not supported on all browsers. Even if it is supported, the feature might be experimental and not work consistently across all browsers. Ask any front-end developer about using a CSS feature whose support is still in the experimental phase in most prominent web browsers. ????
The count of mobile users is on a steep rise. According to the research, by 2025, it is expected to reach 7.49 billion users worldwide. 70% of all US digital media time comes from mobile apps, and to your surprise, the average smartphone owner uses ten apps per day and 30 apps each month.
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!