Best Python code snippet using toolium_python
scrapinsta.py
Source:scrapinsta.py
1import os, json2import openpyxl3import instaloader4import shutil5try:6 from instaloader import ConnectionException, Instaloader, Profile, Post7except ModuleNotFoundError:8 raise SystemExit("Instaloader not found.\n pip install [--user] instaloader")9class Scraper():10 def __init__(self):11 try:12 with open('config.txt') as f:13 lines = f.readlines()14 self.user=lines[0].strip()15 self.password=lines[1].strip()16 except OSError :17 return ('There is no config.txt file. Create it in the current path.try again') 18 def Exportfilecomment(self,type,link):19 try:20 21 # name=link[-40:]22 # name=name[:11]23 json_files = [pos_json for pos_json in os.listdir('test/') if pos_json.endswith('.json')]24 f = open("test/"+json_files[0],'r') 25 data = json.load(f)26 list2=[] 27 for i in data:28 name=i['owner']['username']29 list2.append(name)30 book = openpyxl.Workbook()31 sheet = book.get_sheet_by_name('Sheet')32 i=233 sheet.cell(row=1, column=1).value = 'username'34 for x in list2:35 sheet.cell(row=i, column=1).value = x36 i=i+137 book.save('Sample.xlsx')38 return ('Sample.xlsx')39 except Exception as ex :40 return ex41 def Exportfilelike(self,type,link):42 try: 43 44 a=link[-40:]45 test1 = Instaloader(download_pictures=False,download_videos=False,download_video_thumbnails=False)46 test1.login(self.user,self.password)47 post = Post.from_shortcode(test1.context, a[0:11])48 book = openpyxl.Workbook()49 sheet = book.get_sheet_by_name('Sheet')50 i=251 sheet.cell(row=1, column=1).value = 'username'52 for x in post.get_likes():53 sheet.cell(row=i, column=1).value = x.username54 i=i+155 book.save('Samplelike.xlsx')56 return ('Samplelike.xlsx')57 except Exception as ex :58 return ex59 def Exportfilefollower(self,type,link):60 try: 61 a=link[-40:]62 test1 = Instaloader(download_pictures=False,download_videos=False,download_video_thumbnails=False)63 test1.login(self.user,self.password)64 post = Post.from_shortcode(test1.context, a[0:11])65 profile = instaloader.Profile.from_username(test1.context, post.owner_username)66 book = openpyxl.Workbook()67 sheet = book.get_sheet_by_name('Sheet')68 i=269 sheet.cell(row=1, column=1).value = 'username'70 for x in profile.get_followers():71 sheet.cell(row=i, column=1).value = x.username72 i=i+173 book.save('Samplef.xlsx')74 return ('Samplef.xlsx')75 except Exception :76 return ('Login to instagram fail.Please wait a few minutes before you try again') #render_template ("login.html",src="",list=[False,False,False,False],post="",text="",count="",flag=False,flag1=False,flag2=False)77 78 def Exportfilemention(self,type,link):79 try: 80 # name=link[-40:]81 # name=name[:11]82 json_files = [pos_json for pos_json in os.listdir('test/') if pos_json.endswith('.json')]83 f = open("test/"+json_files[0],'r') 84 data = json.load(f)85 list2=[] 86 list1=[]87 for i in data:88 name=i['owner']['username']89 text=i['text']90 s1=""91 s=""92 i=093 j=094 while i<len(text ):95 s=""96 if ord(text[i])==64:97 s +=text[i]98 i=i+199 while i<len(text):100 if ord(text[i])!=32:101 s +=text[i]102 i=i+1103 else:104 break105 106 s1 +=','+s107 j=j+1108 i=i+1109 if j>0:110 list2.append(name)111 list2.append(s1)112 list2.append(j)113 list1.append(list2)114 list2=[]115 book = openpyxl.Workbook()116 sheet = book.get_sheet_by_name('Sheet')117 i=2118 j=1119 sheet.cell(row=1, column=1).value = 'Username'120 sheet.cell(row=1, column=2).value = 'Mention'121 sheet.cell(row=1, column=3).value = 'Count'122 for x in list1:123 sheet.cell(row=i, column=j).value = x[0]124 j=j+1125 sheet.cell(row=i, column=j).value = x[1]126 j=j+1127 sheet.cell(row=i, column=j).value = x[2]128 j=1129 i=i+1130 book.save('Samplemention.xlsx')131 return ('Samplemention.xlsx')132 except Exception :133 return ('Login to instagram fail.Please wait a few minutes before you try again') #render_template ("login.html",src="",list=[False,False,False,False],post="",text="",count="",flag=False,flag1=False,flag2=False)134 135 def scraperpost(self, shortpost):136 try:137 test1 = Instaloader(download_pictures=False,download_videos=False,download_video_thumbnails=False)138 test1.login(self.user,self.password)139 post = Post.from_shortcode(test1.context, shortpost)140 test1.download_post(post, target='test')141 return (post.owner_username)142 143 except Exception as ex :144 return ex145 def textcomment(self):146 try:147 path_to_txt = 'test/'148 txt_files = [pos_json for pos_json in os.listdir(path_to_txt) if pos_json.endswith('.txt')]149 f = open("test/"+str(txt_files[len(txt_files)-1]),'r') 150 content=f.read()151 return content152 except: 153 return False154 155 def countcomment(self,post):156 try:157 json_files = [pos_json for pos_json in os.listdir('test/') if pos_json.endswith('.json')]158 f = open("test/"+json_files[0],'r') 159 data = json.load(f) 160 return (len(data))161 except: 162 return False163 def countlike(self,post):164 try:165 a=post[-40:]166 test1 = Instaloader(download_pictures=False,download_videos=False,download_video_thumbnails=False)167 test1.login(self.user,self.password)168 post = Post.from_shortcode(test1.context, a[0:11])169 return (len(set(post.get_likes())))170 except: 171 return False172 def countf(self,shortpost):173 try:174 test1 = Instaloader(download_pictures=False,download_videos=False,download_video_thumbnails=False)175 test1.login(self.user,self.password)176 profile = instaloader.Profile.from_username(test1.context, shortpost)177 return (len(set(profile.get_followers())))178 except: 179 return False180 def countmention(self,post):181 try:182 183 json_files = [pos_json for pos_json in os.listdir('test/') if pos_json.endswith('.json')]184 f = open("test/"+json_files[0],'r') 185 data = json.load(f) 186 countm=0187 for x in data:188 text=x['text']189 s1=""190 s=""191 i=0192 j=0193 while i<len(text ):194 s=""195 if ord(text[i])==64:196 s +=text[i]197 i=i+1198 while i<len(text):199 if ord(text[i])!=32:200 s +=text[i]201 i=i+1202 else:203 break204 205 s1 +=','+s206 j=j+1207 # countm =countm+1208 i=i+1209 if j>0:210 countm =countm+1211 212 return (countm)213 except: 214 return False215 def get_list_like(self,link):216 try:217 a=link[-40:]218 listt=[]219 test1 = Instaloader(download_pictures=False,download_videos=False,download_video_thumbnails=False)220 test1.login(self.user,self.password)221 post = Post.from_shortcode(test1.context, a[0:11])222 for x in list(post.get_likes()):223 listt.append(x.username)224 return (listt)225 except Exception as ex: 226 return ex227 def get_list_follower(self,link):228 try:229 a=link[-40:]230 listt=[]231 test1 = Instaloader(download_pictures=False,download_videos=False,download_video_thumbnails=False)232 test1.login(self.user,self.password)233 post = Post.from_shortcode(test1.context, a[0:11])234 profile = instaloader.Profile.from_username(test1.context, post.owner_username)235 for x in list(profile.get_followers()):236 listt.append(x.username)237 return (listt)238 except Exception as ex: 239 return ex240 def get_list_comment(self,link):241 try:242 243 244 list1=[]245 json_files = [pos_json for pos_json in os.listdir('test/') if pos_json.endswith('.json')]246 f = open("test/"+json_files[0],'r') 247 data = json.load(f) 248 for x in data:249 list1.append(x['owner']['username'])250 return (list1)251 except Exception as ex: 252 return ex253 def get_list_mention(self,link):254 try:255 256 json_files = [pos_json for pos_json in os.listdir('test/') if pos_json.endswith('.json')]257 f = open("test/"+json_files[0],'r') 258 data = json.load(f)259 list2=[] 260 list1=[]261 for i in data:262 name=i['owner']['username']263 text=i['text']264 s1=""265 s=""266 i=0267 j=0268 while i<len(text ):269 s=""270 if ord(text[i])==64:271 s +=text[i]272 i=i+1273 while i<len(text):274 if ord(text[i])!=32:275 s +=text[i]276 i=i+1277 else:278 break279 280 s1 +=','+s281 j=j+1282 i=i+1283 if j>0:284 list1.append(name)285 if j>=5 :286 c=j/5287 i=1288 for i in i<=c:289 list1.append(name)290 291 292 return(list1)293 except Exception as ex: ...
fb_main.py
Source:fb_main.py
1import json2import fb_search as fb3if __name__ == '__main__':4 #### SEARCH PARAMETERS ####5 PG_SEARCH_LIMIT = 36 VIDEO_LIMIT = 107 QUERY_LIST = [] # Enter list of query requests as strings8 #### DOWNLOAD PARAMETERS ####9 DOWNLOAD_VIDEOS = True10 AUDIO_ONLY = True11 VIDEO_DIRECTORY_NAME = 'video_data'12 open('pg_video.json', 'w').close()13 open('page_verified.json', 'w').close()14 open('page_to_be_verified.json', 'w').close()15 if DOWNLOAD_VIDEOS:16 fb.make_directory(VIDEO_DIRECTORY_NAME)17 page_verified = []18 page_to_be_verified = []19 for query in QUERY_LIST:20 page = fb.select_page(query, PG_SEARCH_LIMIT)21 if page is None:22 continue23 if fb.is_verified_page(page):24 page_verified.append(page)25 else:26 page_to_be_verified.append(page)27 with open('page_verified.json', 'a') as outfile:28 json.dump(page_verified, outfile, indent=4, sort_keys=True)29 with open('page_to_be_verified.json', 'a') as outfile:30 json.dump(page_to_be_verified, outfile, indent=4, sort_keys=True)31 valid_video = []32 for page in page_verified:33 videos = fb.search_page_videos(page['page_id'], VIDEO_LIMIT)34 valid_video.extend(videos)35 with open('pg_video.json', 'a') as outfile:36 json.dump(valid_video, outfile, indent=4, sort_keys=True)37 print 'Found ' + str(len(valid_video)) + ' related resources.'38 if DOWNLOAD_VIDEOS:39 for video in valid_video:40 page = video['page']['page_name'] + '-' + video['page']['page_id']...
main.py
Source:main.py
1from download import download_youtube_files2from merge import merge_files3from open import open_file4from csv_file import make_file_csv5from mananger_sound import mananger_sounds6from sheet import sheet_url7from google_sheet import insert_row_col8from twitt import upload_tweet9from table_info import insert_info10import logging11def main():12 logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s",13 handlers=[logging.FileHandler("debug.log"), logging.StreamHandler()])14 logger = logging.getLogger(__name__)15 logger.info('start the program')16 logger.info('getting the urls')17 file_url = open_file()18 logger.info('got the urls and ready to download the sounds')19 file_download = sheet_url()20 logger.info(f'got {file_url}, ready to download')21 download_videos = download_youtube_files(logger, file_download)22 logger.info(f'got {download_videos}, ready to merge')23 list_for_csv = download_videos[0]24 make_file_csv(list_for_csv)25 insert_row_col(list_for_csv)26 insert_info(logger, list_for_csv)27 logger.info('finished making the csv file')28 list_for_mananger = download_videos[1]29 mananger_sounds(list_for_mananger)30 merge_files(logger, list_for_mananger, 'merge_alarm.mp3')31 logger.info('finished merge the sounds')32 upload_tweet(list_for_mananger)33 logger.info(f'end to the program')34if __name__ == '__main__':...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!