Best Python code snippet using autotest_python
utils.py
Source:utils.py
...7fields_user_num = ['followers_count', 'friends_count', 'listed_count', 'statuses_count', 'favourites_count', 'len_name',8 'len_screen_name', 'len_description']9fields_user_cat = ['geo_enabled', 'verified', 'has_location']10fields_user = ['followers_count', 'friends_count', 'listed_count', 'statuses_count', 'favourites_count', 'geo_enabled', 'verified', 'has_location']11def get_root_dir():12 """13 Change this to the root data directory14 :return: root directory15 """16 root = osp.join("..", "fake_news_data")17 if os.name == "posix":18 root = osp.join("..", "fake_news_data")19 else:20 root = osp.join("C:\\Workspace", "FakeNews", "fake_news_data")21 return root22def get_processed_dir(exp_name=None):23 if exp_name is not None:24 processed_dir = osp.join(get_root_dir(), "back", exp_name)25 if not osp.exists(processed_dir):26 os.mkdir(processed_dir)27 return processed_dir28 else:29 return get_root_dir()30def load_tweets(dataset):31 return torch.load(osp.join(get_root_dir(), f"{dataset}_tweets.pt"))32def save_tweets(dataset, all_tweets_d, all_replies_d, all_tweets_score_d):33 torch.save((all_tweets_d, all_replies_d, all_tweets_score_d), osp.join(get_root_dir(), f"{dataset}_tweets.pt"))34def load_users(dataset):35 all_user_feat_d = torch.load(osp.join(get_root_dir(), f"{dataset}_users.pt"))36 return all_user_feat_d37def save_users(dataset, all_user_feat_d):38 torch.save(all_user_feat_d, osp.join(get_root_dir(), f"{dataset}_users.pt"))39def load_nx_graphs(dataset):40 all_Gu = torch.load(osp.join(get_root_dir(), f"{dataset}_Gu.pt"))41 return all_Gu42def save_Gu(dataset, all_Gu):43 torch.save(all_Gu, osp.join(get_root_dir(), f"{dataset}_Gu.pt"))44def load_labels(dataset):45 processed_dir = get_root_dir()46 labels_d = torch.load(osp.join(processed_dir, f"{dataset}_labels.pt"))47 return labels_d48def save_labels(dataset, labels_d):49 torch.save(labels_d, osp.join(get_root_dir(), f"{dataset}_labels.pt"))50def load_user_feat(dataset):51 import torch52 all_Gu = torch.load(osp.join(get_root_dir(), f"{dataset}_Gu.pt"))53 return all_Gu54def read_news_article_evidence(dataset):55 """56 format: claim_evi_pair, news_article, label57 :param dataset:58 :return:59 """60 data_dir = get_root_dir()61 path = osp.join(data_dir, f"{dataset}_news_article_evidence.pkl")62 with open(path, "rb") as f:63 examples = pickle.load(f)64 return examples65def read_tweets_and_scores(dataset):66 import torch67 all_tweets_d, all_replies_d, all_tweets_score_d = torch.load(osp.join(get_root_dir(), f"{dataset}_tweets.pt"))68 return all_tweets_d, all_replies_d, all_tweets_score_d69def read_news_articles_text(global_news_article_d, dataset_name="politifact"):70 root = get_root_dir()71 with open(osp.join(root, f"{dataset_name}_news_articles.txt"), 'r', encoding='utf-8') as f:72 for line in f.readlines():73 filename, article = line.split("\t")74 article = article.strip()75 global_news_article_d[filename] = article76 f.close()77def read_news_articles_labels(dataset_name="politifact", n_samples=0):78 KEEP_EMPTY_RETWEETS_AND_REPLIES = 179 # if we only read the first `n_samples` samples in the dataframe80 if n_samples > 0:81 news_article_df = pd.read_csv(get_root_dir() + f"\\{dataset_name}_news_articles.tsv", sep='\t', iterator=True,82 header=None)83 news_article_df = news_article_df.get_chunk(n_samples)84 else:85 news_article_df = pd.read_csv(get_root_dir() + f"\\{dataset_name}_news_articles.tsv", sep='\t')86 if KEEP_EMPTY_RETWEETS_AND_REPLIES:87 news_article_cleaned_df = news_article_df[88 (news_article_df.has_tweets == 1) & (news_article_df.has_news_article == 1)]89 else:90 news_article_cleaned_df = news_article_df[91 (news_article_df.has_tweets == 1) & (news_article_df.has_news_article == 1) & (92 news_article_df.has_retweets == 1) & (news_article_df.has_replies == 1)]93 return news_article_cleaned_df94def only_directories(path):95 return [name for name in os.listdir(path) if osp.isdir(osp.join(path, name))]96def get_data_list():97 politifact_fake = only_directories("politifact_fake")98 politifact_real = only_directories("politifact_real")99 gossipcop_fake = only_directories("gossipcop_fake")100 gossipcop_real = only_directories("gossipcop_real")101 data_list = {102 "politifact_real": politifact_real,103 "politifact_fake": politifact_fake,104 "gossipcop_fake" : gossipcop_fake,105 "gossipcop_real" : gossipcop_real106 }107 return data_list108def get_dataset_names(dataset):109 if dataset == "politifact":110 dataset_names = {111 "politifact": ["politifact_real", "politifact_fake"]112 }113 elif dataset == "gossipcop":114 dataset_names = {115 "gossipcop": ["gossipcop_real", "gossipcop_fake"]116 }117 elif dataset == "both":118 dataset_names = {119 "politifact": ["politifact_real", "politifact_fake"],120 "gossipcop" : ["gossipcop_real", "gossipcop_fake"]121 }122 else:123 raise NotImplementedError124 return dataset_names125def filter_empty_dict_entry(d, filename, log=True):126 is_empty_json = False127 new_d = {}128 for k in d:129 if d[k] != []:130 new_d[k] = d[k]131 if new_d == {}:132 if log:133 print(f"\t{filename} json empty")134 is_empty_json = True135 return new_d, is_empty_json136# For a dict of dicts, filter empty entries, which are {}137def filter_empty_nested_dict(d):138 new_d, empty_li = {}, []139 for k, v in d.items():140 if v == {}:141 empty_li += [k]142 else:143 new_d[k] = d[k]144 return new_d, empty_li145def print_results(results, epoch, dataset_split_name="Train", enable_logging=True, args=None):146 log_str = f"\n[{dataset_split_name}] Epoch {epoch}\n\tPre: {results['pre']:.4f}, Rec: {results['rec']:.4f}\n\tAcc: {results['acc']:.4f}, F1: {results['f1']:.4f}\n"147 print(log_str)148 if enable_logging:149 f = open(f"{args.outdir}/{dataset_split_name}_{args.max_len}_{args.evi_num}_results.txt", "a+")150 f.write(log_str)151def load_tweet_df(filename, dataset):152 import pandas as pd153 import numpy as np154 pd.options.display.max_columns = 20155 pd.set_option('precision', 20)156 # NOTE: reading as int64 is super important157 dtypes = {158 'root_tweet_id': np.int64,159 'tweet_id' : np.int64,160 'root_user_id' : np.int64,161 'user_id' : np.int64,162 }163 path = osp.join(get_root_dir(), dataset, filename, "tweets_retweets_comments.tsv")164 if not osp.exists(path):165 print(f"\t SKIP {filename}: no tweet_retweet_comment.tsv")166 return None167 tweet_df = pd.read_csv(path, sep='\t', float_precision='high')168 return tweet_df169def print_heading(dataset):...
window_utils.py
Source:window_utils.py
...11 """Retrieves the pref value under the given name key from the settings file"""12 return load_settings(SETTINGS_FILENAME).get(key)13def open_config_rc(window):14 """Opens the default .jsbeautifyrc file for editing in a new tab"""15 file_path = join(get_root_dir(), '.jsbeautifyrc.defaults.json')16 window.open_file(file_path)17def open_u_config_rc(window):18 """Opens the user's .jsbeautifyrc file for editing in a new tab"""19 defaults = "{\n}"20 old_jsbeautifyrc_path = join(get_root_dir(), '.jsbeautifyrc')21 old_jsbeautifyrc = read_text_from_file(old_jsbeautifyrc_path, defaults)22 file_path = join(get_user_dir(), '.jsbeautifyrc')23 window.open_file(ensure_file(file_path, default_contents=old_jsbeautifyrc))24def open_sublime_settings(window):25 """Opens the default plugin settings file for editing in a new tab"""26 file_path = join(get_root_dir(), SETTINGS_FILENAME)27 window.open_file(file_path)28def open_u_sublime_settings(window):29 """Opens the user's plugin settings file for editing in a new tab"""30 file_path = join(get_user_dir(), SETTINGS_FILENAME)31 window.open_file(ensure_file(file_path, default_contents="{\n}"))32def open_sublime_keymap(window, platform):33 """Opens the default plugin keyboard bindings file for editing in a new tab"""34 file_name = KEYMAP_FILENAME.replace("$PLATFORM", platform)35 file_path = join(get_root_dir(), file_name)36 window.open_file(file_path)37def open_u_sublime_keymap(window, platform):38 """Opens the user's plugin keyboard bindings file for editing in a new tab"""39 file_name = KEYMAP_FILENAME.replace("$PLATFORM", platform)40 file_path = join(get_user_dir(), file_name)...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!