Best JavaScript code snippet using fast-check-monorepo
seyirTURK.py
Source:seyirTURK.py
...19 if privacy == "true":20 mrl = root() + 'main/vip.php?dummy=dummy'21 22 resp = root() + "check/check.php?mail=" + settings.getSetting( "mail" ) + "&parola=" + settings.getSetting( "sifre" )23 membership = get_url(resp)24 if ("okmember" in membership and settings.getSetting( "favori" ) == "true" ):25 erl = "&tip=favori&url=" + root() + "data/"+ hashlib.md5(settings.getSetting( "mail" )).hexdigest()+".xml"26 elif settings.getSetting( "favori" ) != "true" :27 erl =""28 else:29 erl="";30 showMessage("Favorilerim ozelligini kullanabilmek icin lutfen http://seyirturk.com a uye olunuz.")31 listele(mrl+erl) 32 if ("okmember" in membership and settings.getSetting( 'favori' ) == "false" ):33 addDir('[COLOR orange][B][COLOR blue]* [/COLOR]'+ 'Favorilerim' +'[/B][/COLOR]'+'[COLOR blue]* [/COLOR]',mrl + "&tip=favori&url="+root()+"data/"+ hashlib.md5(settings.getSetting( "mail" )).hexdigest()+".xml",2,root()+'resimler/favori.png')34 elif ("okmember" in membership and settings.getSetting( 'favori' ) == "true"):35 if settings.getSetting( 'Adult')!='true':36 portal_url = root() + 'main/vip.php?dummy=dummy'37 else :38 portal_url = root() + 'main/vip.php?filter=evet'39 addDir('[COLOR orange][B][COLOR blue]* [/COLOR]'+ 'Portallar' +'[/B][/COLOR]'+'[COLOR blue]* [/COLOR]',mrl,2,"resim")40 41def listele(url):42 43 searchstring=""44 if "&keyword" in url:45 keyboard = xbmc.Keyboard( '', "Film Arama", False )46 keyboard.doModal()47 if ( keyboard.isConfirmed() ):48 searchstring = keyboard.getText()49 url = url+searchstring50 if url == "seyirturk.xml":51 seyirturk_file = os.path.join(userdata, 'seyirturk.xml') 52 if os.path.isfile(seyirturk_file) :53 g = open(seyirturk_file).read()54 else:55 showMessage("[COLOR blue][B]seyirTURK[/B][/COLOR]","[COLOR red][B]yerel listeniz yok![/B][/COLOR]")56 else :57 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 seyirTURK_KODI (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})58 f = urllib2.urlopen(request)59 g = get_url(url)60 print url61 print g62 if "Baslik" in g:63 js = json.load(f)64 for rs in js:65 baslik=rs['Baslik'].encode('utf-8')66 resim=rs['Resim'].encode('utf-8')67 playlist=rs['Playlist'].encode('utf-8')68 stream=rs['Stream'].encode('utf-8')69 aciklama=rs['Aciklama'].encode('utf-8')70 koruma=rs['Koruma']71 if playlist <> '': 72 url= playlist73 if koruma == "False":74 75 addDir('[COLOR orange][B][COLOR blue]* [/COLOR]'+ baslik +'[/B][/COLOR]',url+searchstring,2,resim)76 #addDir('[COLOR orange][B][COLOR blue]* [/COLOR]'+ baslik +'[/B][/COLOR]',url,2,resim)77 else:78 addDir('[COLOR red][B][COLOR blue]+18 > [/COLOR]'+ baslik +'[/B][/COLOR]',url,2,resim)79 else:80 url=stream81 addDir('[COLOR white][B][COLOR red]> [/COLOR]'+baslik+'[/B][/COLOR]',url,3,resim)82 if "PORTALLAR" in g:83 addDir('[COLOR orange][B][COLOR blue]* [/COLOR] Yerel Oynatma Listem[/B][/COLOR]',settings.getSetting( "yerelxml" ) ,2,root()+'resimler/xml.png')84 85 elif "title" in g:86 xmldoc = minidom.parseString(g)87 js = xmldoc.getElementsByTagName('channel')88 89 for rs in js:90 baslik = rs.getElementsByTagName("title")[0].firstChild.data.encode('utf-8')91 resim=rs.getElementsByTagName("logo_30x30")[0].firstChild.data.encode('utf-8')92 playlist_url = rs.getElementsByTagName("playlist_url")93 stream_url = rs.getElementsByTagName("stream_url") 94 if len(playlist_url) > 0:95 playlist=playlist_url[0].firstChild.data.encode('utf-8')96 url = playlist97 stream =None98 elif len(stream_url)>0:99 stream=stream_url[0].firstChild.data.encode('utf-8')100 url = stream101 playlist =None102 else:103 playlist = None104 stream =None105 aciklama=rs.getElementsByTagName("description")[0].firstChild.data.encode('utf-8')106 aciklama = re.sub(r'<.*?>', '', aciklama)107 if playlist <> None: 108 addDir('[COLOR orange][B][COLOR blue]* [/COLOR]'+ baslik +'[/B][/COLOR]',url,2,resim)109 else:110 addDir('[COLOR white][B][COLOR red]> [/COLOR]'+baslik+'[/B][/COLOR]',url,3,resim)111 112 else:113 showMessage("[COLOR blue][B]seyirTURK[/B][/COLOR]","[COLOR blue][B]Link Bulunamadi[/B][/COLOR]")114def oynat(url,baslik): 115 playList.clear()116 url = str(url).encode('utf-8', 'ignore')117 if "vk.com" in url:118 url= VKoynat(url)119 elif "picasaweb" in url:120 url= Picasaweb(url)121 elif "ok.ru" in url:122 url= okru(url)123 elif "odnoklassniki.ru" in url:124 url= okru(url)125 elif "mail.ru" in url:126 url = Mailru(url)127 elif "youtube" in url:128 url= YoutubeOynat(url)129 elif "dailymotion" in url:130 url= dailyoynat(url)131 elif "epornik" in url:132 url= epornik(url)133 elif "veterok" in url:134 url= veterok(url)135 elif "vid.ag" in url:136 url= vidagoynat(url)137 elif "imdb" in url:138 url= imdb(url)139 elif "player.vimeo.com" in url:140 url= vimeo(url)141 elif "embed.myvideo.az" in url:142 url= myvideo(url)143 elif "watchcinema.ru" in url:144 url= watchcinema(url)145 elif "stagevu" in url:146 url= stagevu(url)147 elif "rutube" in url:148 url= rutube(url)149 elif "cloudy" in url:150 url= filekey1(url)151 elif "videoraj" in url:152 url= filekey1(url)153 elif "novamov" in url:154 url= filekey(url)155 elif "divxstage" in url:156 url= filekey(url)157 elif "embed.movshare" in url:158 url= kzd(url)159 elif "embed.nowvideo" in url:160 url= kzd(url)161 elif "plus.google.com" in url:162 url= google(url)163 elif "docs.google.com" in url:164 url= google(url)165 elif 'rtmp:' in url:166 url= url167 elif 'rtsp:' in url:168 url= url 169 elif 'mms:' in url:170 url= url171 elif '.m3u8' in url:172 url= url173 elif url.endswith('.mp4'):174 url= url 175 else:176 url1=urlresolver.resolve(url)177 if url1:178 url = url1179 if url:180 if ("vk.com" in url or "youtube.com" in url or "rutube" in url):181 oynat(url, baslik)182 else:183 addLink(baslik,url,'')184 listitem = xbmcgui.ListItem(baslik, iconImage="DefaultFolder.png", thumbnailImage='')185 listitem.setInfo('video', {'name': baslik } )186 playList.add(url,listitem=listitem)187 xbmcPlayer.play(playList)188 else:189 showMessage("[COLOR blue][B]seyirTURK[/B][/COLOR]","[COLOR blue][B]Link Bulunamadi[/B][/COLOR]")190def google(url):191 if 'plus.google.com' in url:192 if "oid" in url:193 oid = re.findall('oid=([0-9]+)',url)[0]194 pid = re.findall('pid=([0-9]+)',url)[0]195 else:196 ids = url.split("/")197 oid = ids[4]198 pid = ids[7]199 url = "https://picasaweb.google.com/data/feed/tiny/user/"+oid+"/photoid/"+pid+"?alt=jsonm";200 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})201 response = urllib2.urlopen(request)202 html = response.read().decode('unicode-escape')203 204 if 'picasaweb.google.com' in url:205 links_part = re.findall('"https://redirector(.*?)"', html)206 pre_link = 'https://redirector'207 if 'docs.google.com' in url:208 links_parts = re.findall('"fmt_stream_map","(.*?)"', html)[0]209 links_part = re.findall('\\|(.*?),', links_parts)210 pre_link =''211 videolist = []212 qualitylist = []213 for link_part in links_part: 214 if link_part.encode('utf_8').find("itag=18") > -1:215 videolist.append(pre_link + link_part.encode('utf_8'))216 qualitylist.append("360p")217 if link_part.encode('utf_8').find("itag=22") > -1:218 videolist.append(pre_link + link_part.encode('utf_8'))219 qualitylist.append("720p")220 if link_part.encode('utf_8').find("itag=37") > -1:221 videolist.append(pre_link + link_part.encode('utf_8'))222 qualitylist.append("1080p")223 dialog = xbmcgui.Dialog()224 ret = dialog.select('kalite secin...',qualitylist)225 return videolist[ret]226def kzd(url):227 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})228 secondpage = urllib2.urlopen(request).read()229 link = re.findall('flashvars\\.file="(.*?)";', secondpage)230 key = re.findall('var fkzd="(.*?)";', secondpage)231 if 'embed.movshare' in url:232 video = 'http://www.movshare.net/api/player.api.php?file=' + link[0] + '&key=' + key[0]233 else:234 video = 'http://www.nowvideo.sx/api/player.api.php?file=' + link[0] + '&key=' + key[0]235 request2 = urllib2.Request(video, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})236 thirdpage = urllib2.urlopen(request2).read()237 return re.findall('url=(.*?flv)', thirdpage)[0]238def filekey(url):239 240 url = url.replace("http://embed.divxstage.eu/embed.php?v=","http://www.cloudtime.to/video/")241 url = url.replace("http://www.divxstage.eu/video/","http://www.cloudtime.to/video/")242 url = url.replace("http://www.divxstage.to/video/","http://www.cloudtime.to/video/")243 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})244 page = urllib2.urlopen(request).read()245 link = re.findall('flashvars.file="(.*?)";', page)246 key = re.findall('flashvars.filekey="(.*?)";', page)247 if 'novamov' in url:248 video = 'http://www.novamov.com/api/player.api.php?file=' + link[0] + '&key=' + key[0]249 else :250 video = 'http://www.cloudtime.to/api/player.api.php?file=' + link[0] + '&key=' + key[0]251 request2 = urllib2.Request(video, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})252 page2 = urllib2.urlopen(request2).read()253 return urllib.unquote(re.findall('url=(.*?)&', page2)[0])254def filekey1(url):255 256 url = url.replace ("http://www.videoraj.ch/v/","http://www.videoraj.ch/embed.php?id=")257 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})258 page = urllib2.urlopen(request).read()259 link = re.findall('file:"(.*?)",', page)260 key = re.findall('key: "(.*?)",', page)261 domain = re.findall('domain: "(.*?)",', page)262 if 'videoraj' in url:263 video = "http://www.videoraj.ch/api/player.api.php?file="+ link[0] + "&key="+key[0]264 else:265 video = 'http://www.cloudy.ec/api/player.api.php?user=undefined&codes=1&file=' + link[0] + '&pass=undefined&key=' + key[0]266 request2 = urllib2.Request(video, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})267 page2 = urllib2.urlopen(request2).read()268 return urllib.unquote(re.findall('url=(.*?)&', page2)[0])269def rutube(url):270 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'}) 271 page = urllib2.urlopen(request).read()272 page = page.replace (""","")273 return re.findall('m3u8:(.*?)}', page)[0].replace("&", "&")274 275def stagevu(url):276 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})277 page = urllib2.urlopen(request).read()278 return re.findall('"src" value="(.*?)"', page)[0]279 280def watchcinema(url):281 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})282 page = urllib2.urlopen(request).read()283 page = page.strip(' \t\n\r')284 regex = re.findall('<iframe src="(.*?)"', page)285 return "http://"+regex[0].replace("https:","").replace("http://","").replace("&", "&").replace("vkontakte.ru", "vk.com").replace("watchcinema.ru", "vk.com")286def vimeo(url):287 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})288 page = urllib2.urlopen(request).read()289 qualitylist = re.findall('(hd|sd|mobile)":\\{"profile".*?"url":".*?["|&]', page)290 videolist = re.findall('":\\{"profile".*?"url":"(.*?)"', page)291 dialog = xbmcgui.Dialog()292 ret = dialog.select('kalite secin...',qualitylist)293 return videolist[ret]294def myvideo(url):295 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})296 response = urllib2.urlopen(request).read()297 a= re.findall("'file': '(.*?)'",response)298 request = urllib2.Request(a[0], None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})299 response = urllib2.urlopen(request)300 video = response.geturl()301 response.close()302 return video303 304def imdb(url):305 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})306 page = urllib2.urlopen(request).read()307 return re.findall('"url":"(.*?)"', page)[0] 308def veterok(url):309 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})310 page = urllib2.urlopen(request).read()311 return re.findall('<script>files.*?="(.*?)"', page)[0]312 313def epornik(url):314 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'}) 315 page = urllib2.urlopen(request).read()316 return re.findall('file: "(.*?)"', page)[0]317def Mailru(url):318 url = url.replace(".html","")319 var_arr = url.split("/")320 url = "http://m.my.mail.ru/mail/" +var_arr[6] + "/video/" + var_arr[7] + "/" + var_arr[8] + ".html"321 request = urllib2.Request(url, None)322 page = urllib2.urlopen(request).read()323 return re.findall('data-src="(.*?)"' ,page)[0].replace("&","&")324def VKoynat(url):325 url = url.replace('https', 'http').replace('http://www.', 'http://')326 sorgu = url.split('?', 1)[-1]327 sorgu = parse_qs(sorgu)328 url = 'http://api.vk.com/method/video.getEmbed?oid=%s&video_id=%s&embed_hash=%s' % (sorgu['oid'][0], sorgu['id'][0], sorgu['hash'][0])329 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})330 page = urllib2.urlopen(request).read()331 er = re.findall('error_msg":"([^"]+)', page)332 if er:333 url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (sorgu['oid'][0], sorgu['id'][0])334 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})335 page = urllib2.urlopen(request).read()336 page = re.sub('\\\\', '', page)337 videolist = re.findall('"url[^"]+":"(http:[^"]+)', page)338 qualitylist = re.findall('"url([^"]+)":"http:[^"]+', page)339 dialog = xbmcgui.Dialog()340 ret = dialog.select('kalite secin...',qualitylist)341 return videolist[ret]342def Picasaweb(url):343 idd = re.findall('#(.*?)$', url)344 dert = '"streamIds".*?shared_group_'+idd[0]+'.*?content":(.*?)description'345 request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3','Connection': 'Close'})346 response = urllib2.urlopen(request)347 html = response.read()348 bundlelinks = re.findall(dert,html)[0]349 links=re.findall('https(.*?)"', bundlelinks)350 qualitylist = []351 videolist = []352 pre_link='https'353 for link_part in links: 354 if link_part.find("=m18") > -1:355 videolist.append(pre_link + link_part)356 qualitylist.append("360p")357 if link_part.find("=m22") > -1:358 videolist.append(pre_link + link_part)359 qualitylist.append("720p")360 if link_part.find("=m37") > -1:361 videolist.append(pre_link + link_part)362 qualitylist.append("1080p")363 dialog = xbmcgui.Dialog()364 ret = dialog.select('kalite secin...',qualitylist)365 return videolist[ret]366def okru(url):367 id = re.search('\d+', url).group(0)368 json_url = 'http://ok.ru/dk?cmd=videoPlayerMetadata&mid=' + id369 req = urllib2.Request(json_url)370 response = urllib2.urlopen(req)371 source = response.read()372 response.close()373 json_source = json.loads(source)374 videolist = []375 qualitylist = []376 sources = []377 for source in json_source['videos']:378 name = source['name']379 link = source['url']380 videolist.append(link.decode('unicode_escape').encode('utf-8'))381 qualitylist.append(name)382 dialog = xbmcgui.Dialog()383 ret = dialog.select('kalite secin...',qualitylist)384 return videolist[ret] 385def vidagoynat(url):386 page = get_url(url)387 vids = re.findall(',{file:"(.*?mp4)"', page)388 quals = re.findall('",label:"(.*?)"', page)389 videolist = [ reg.replace('\\','').replace('"','') for reg in vids]390 qualitylist = [ qual + '' for qual in quals ]391 dialog = xbmcgui.Dialog()392 ret = dialog.select('kalite secin...',qualitylist)393 return videolist[ret]394def YoutubeOynat(url):395 yt_id = url.replace("http://www.youtube.com/embed/","").replace("http://www.youtube.com/watch?v=","")396 url='plugin://plugin.video.youtube/?action=play_video&videoid=' + yt_id397 return url398 399def dailyoynat(url):400 qualitylist =[]401 videolist=[]402 url = url.replace('dailymotion.com/video/', 'dailymotion.com/embed/video/')403 page = get_url(url)404 array = re.findall('stream_h264_(?:hd1080_|ld_|hq_|hd_|)url":"(.*?H264-(.*?)\\\\/.*?)"', page)405 if array:406 for v, q in array:407 url = v.replace('\\', '')408 videolist.append(url)409 qualitylist.append(q+"p")410 array1 = re.findall('"(\\d+)":\\[{"type":"video\\\\\\/mp4","url":"([^"]+)"}]', page)411 if array1:412 for v, q in array1:413 url = q.replace('\\', '')414 videolist.append(url)415 qualitylist.append(v+"p")416 dialog = xbmcgui.Dialog()417 ret = dialog.select('kalite secin...',qualitylist)418 return videolist[ret]419def root():420 req = urllib2.Request(base64.b64decode("aHR0cDovL2hpdGl0LnRrL21haW4vZ2V0cm9vdC5waHA="), None, {'User-agent': 'Mozilla/5.0 seyirTURK_E2','Connection': 'Close'})421 return base64.b64decode(urllib2.urlopen(req).read())422def get_url(url):423 req = urllib2.Request(url)424 req.add_header('User-Agent', 'Mozilla/5.0 seyirTURK_KODI (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')425 response = urllib2.urlopen(req)426 link=response.read()427 response.close()428 return link429def showMessage(heading='seyirTURK', message = '', times = 2000, pics = ''):430 try: xbmc.executebuiltin('XBMC.Notification("%s", "%s", %s, "%s")' % (heading, message, times, pics))431 except Exception, e:432 xbmc.log( '[%s]: showMessage: exec failed [%s]' % ('', e), 1 )433def addLink(name,url,iconimage):434 ok=True435 liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)436 liz.setInfo( type="Video", infoLabels={ "Title": name } )...
test_url.py
Source:test_url.py
1from django.template import RequestContext, TemplateSyntaxError2from django.test import RequestFactory, SimpleTestCase, override_settings3from django.urls import NoReverseMatch, resolve4from ..utils import setup5@override_settings(ROOT_URLCONF='template_tests.urls')6class UrlTagTests(SimpleTestCase):7 # Successes8 @setup({'url01': '{% url "client" client.id %}'})9 def test_url01(self):10 output = self.engine.render_to_string('url01', {'client': {'id': 1}})11 self.assertEqual(output, '/client/1/')12 @setup({'url02': '{% url "client_action" id=client.id action="update" %}'})13 def test_url02(self):14 output = self.engine.render_to_string('url02', {'client': {'id': 1}})15 self.assertEqual(output, '/client/1/update/')16 @setup({'url02a': '{% url "client_action" client.id "update" %}'})17 def test_url02a(self):18 output = self.engine.render_to_string('url02a', {'client': {'id': 1}})19 self.assertEqual(output, '/client/1/update/')20 @setup({'url02b': "{% url 'client_action' id=client.id action='update' %}"})21 def test_url02b(self):22 output = self.engine.render_to_string('url02b', {'client': {'id': 1}})23 self.assertEqual(output, '/client/1/update/')24 @setup({'url02c': "{% url 'client_action' client.id 'update' %}"})25 def test_url02c(self):26 output = self.engine.render_to_string('url02c', {'client': {'id': 1}})27 self.assertEqual(output, '/client/1/update/')28 @setup({'url03': '{% url "index" %}'})29 def test_url03(self):30 output = self.engine.render_to_string('url03')31 self.assertEqual(output, '/')32 @setup({'url04': '{% url "named.client" client.id %}'})33 def test_url04(self):34 output = self.engine.render_to_string('url04', {'client': {'id': 1}})35 self.assertEqual(output, '/named-client/1/')36 @setup({'url05': '{% url "меÑка_опеÑаÑоÑа" v %}'})37 def test_url05(self):38 output = self.engine.render_to_string('url05', {'v': 'Ω'})39 self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')40 @setup({'url06': '{% url "меÑка_опеÑаÑоÑа_2" tag=v %}'})41 def test_url06(self):42 output = self.engine.render_to_string('url06', {'v': 'Ω'})43 self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')44 @setup({'url08': '{% url "меÑка_опеÑаÑоÑа" v %}'})45 def test_url08(self):46 output = self.engine.render_to_string('url08', {'v': 'Ω'})47 self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')48 @setup({'url09': '{% url "меÑка_опеÑаÑоÑа_2" tag=v %}'})49 def test_url09(self):50 output = self.engine.render_to_string('url09', {'v': 'Ω'})51 self.assertEqual(output, '/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/')52 @setup({'url10': '{% url "client_action" id=client.id action="two words" %}'})53 def test_url10(self):54 output = self.engine.render_to_string('url10', {'client': {'id': 1}})55 self.assertEqual(output, '/client/1/two%20words/')56 @setup({'url11': '{% url "client_action" id=client.id action="==" %}'})57 def test_url11(self):58 output = self.engine.render_to_string('url11', {'client': {'id': 1}})59 self.assertEqual(output, '/client/1/==/')60 @setup({'url12': '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'})61 def test_url12(self):62 output = self.engine.render_to_string('url12', {'client': {'id': 1}})63 self.assertEqual(output, '/client/1/!$&'()*+,;=~:@,/')64 @setup({'url13': '{% url "client_action" id=client.id action=arg|join:"-" %}'})65 def test_url13(self):66 output = self.engine.render_to_string('url13', {'client': {'id': 1}, 'arg': ['a', 'b']})67 self.assertEqual(output, '/client/1/a-b/')68 @setup({'url14': '{% url "client_action" client.id arg|join:"-" %}'})69 def test_url14(self):70 output = self.engine.render_to_string('url14', {'client': {'id': 1}, 'arg': ['a', 'b']})71 self.assertEqual(output, '/client/1/a-b/')72 @setup({'url15': '{% url "client_action" 12 "test" %}'})73 def test_url15(self):74 output = self.engine.render_to_string('url15')75 self.assertEqual(output, '/client/12/test/')76 @setup({'url18': '{% url "client" "1,2" %}'})77 def test_url18(self):78 output = self.engine.render_to_string('url18')79 self.assertEqual(output, '/client/1,2/')80 @setup({'url19': '{% url named_url client.id %}'})81 def test_url19(self):82 output = self.engine.render_to_string(83 'url19', {'client': {'id': 1}, 'named_url': 'client'}84 )85 self.assertEqual(output, '/client/1/')86 @setup({'url20': '{% url url_name_in_var client.id %}'})87 def test_url20(self):88 output = self.engine.render_to_string('url20', {'client': {'id': 1}, 'url_name_in_var': 'named.client'})89 self.assertEqual(output, '/named-client/1/')90 @setup({'url21': '{% autoescape off %}'91 '{% url "client_action" id=client.id action="!$&\'()*+,;=~:@," %}'92 '{% endautoescape %}'})93 def test_url21(self):94 output = self.engine.render_to_string('url21', {'client': {'id': 1}})95 self.assertEqual(output, '/client/1/!$&\'()*+,;=~:@,/')96 # Failures97 @setup({'url-fail01': '{% url %}'})98 def test_url_fail01(self):99 with self.assertRaises(TemplateSyntaxError):100 self.engine.get_template('url-fail01')101 @setup({'url-fail02': '{% url "no_such_view" %}'})102 def test_url_fail02(self):103 with self.assertRaises(NoReverseMatch):104 self.engine.render_to_string('url-fail02')105 @setup({'url-fail03': '{% url "client" %}'})106 def test_url_fail03(self):107 with self.assertRaises(NoReverseMatch):108 self.engine.render_to_string('url-fail03')109 @setup({'url-fail04': '{% url "view" id, %}'})110 def test_url_fail04(self):111 with self.assertRaises(TemplateSyntaxError):112 self.engine.get_template('url-fail04')113 @setup({'url-fail05': '{% url "view" id= %}'})114 def test_url_fail05(self):115 with self.assertRaises(TemplateSyntaxError):116 self.engine.get_template('url-fail05')117 @setup({'url-fail06': '{% url "view" a.id=id %}'})118 def test_url_fail06(self):119 with self.assertRaises(TemplateSyntaxError):120 self.engine.get_template('url-fail06')121 @setup({'url-fail07': '{% url "view" a.id!id %}'})122 def test_url_fail07(self):123 with self.assertRaises(TemplateSyntaxError):124 self.engine.get_template('url-fail07')125 @setup({'url-fail08': '{% url "view" id="unterminatedstring %}'})126 def test_url_fail08(self):127 with self.assertRaises(TemplateSyntaxError):128 self.engine.get_template('url-fail08')129 @setup({'url-fail09': '{% url "view" id=", %}'})130 def test_url_fail09(self):131 with self.assertRaises(TemplateSyntaxError):132 self.engine.get_template('url-fail09')133 @setup({'url-fail11': '{% url named_url %}'})134 def test_url_fail11(self):135 with self.assertRaises(NoReverseMatch):136 self.engine.render_to_string('url-fail11')137 @setup({'url-fail12': '{% url named_url %}'})138 def test_url_fail12(self):139 with self.assertRaises(NoReverseMatch):140 self.engine.render_to_string('url-fail12', {'named_url': 'no_such_view'})141 @setup({'url-fail13': '{% url named_url %}'})142 def test_url_fail13(self):143 with self.assertRaises(NoReverseMatch):144 self.engine.render_to_string('url-fail13', {'named_url': 'template_tests.views.client'})145 @setup({'url-fail14': '{% url named_url id, %}'})146 def test_url_fail14(self):147 with self.assertRaises(TemplateSyntaxError):148 self.engine.render_to_string('url-fail14', {'named_url': 'view'})149 @setup({'url-fail15': '{% url named_url id= %}'})150 def test_url_fail15(self):151 with self.assertRaises(TemplateSyntaxError):152 self.engine.render_to_string('url-fail15', {'named_url': 'view'})153 @setup({'url-fail16': '{% url named_url a.id=id %}'})154 def test_url_fail16(self):155 with self.assertRaises(TemplateSyntaxError):156 self.engine.render_to_string('url-fail16', {'named_url': 'view'})157 @setup({'url-fail17': '{% url named_url a.id!id %}'})158 def test_url_fail17(self):159 with self.assertRaises(TemplateSyntaxError):160 self.engine.render_to_string('url-fail17', {'named_url': 'view'})161 @setup({'url-fail18': '{% url named_url id="unterminatedstring %}'})162 def test_url_fail18(self):163 with self.assertRaises(TemplateSyntaxError):164 self.engine.render_to_string('url-fail18', {'named_url': 'view'})165 @setup({'url-fail19': '{% url named_url id=", %}'})166 def test_url_fail19(self):167 with self.assertRaises(TemplateSyntaxError):168 self.engine.render_to_string('url-fail19', {'named_url': 'view'})169 # {% url ... as var %}170 @setup({'url-asvar01': '{% url "index" as url %}'})171 def test_url_asvar01(self):172 output = self.engine.render_to_string('url-asvar01')173 self.assertEqual(output, '')174 @setup({'url-asvar02': '{% url "index" as url %}{{ url }}'})175 def test_url_asvar02(self):176 output = self.engine.render_to_string('url-asvar02')177 self.assertEqual(output, '/')178 @setup({'url-asvar03': '{% url "no_such_view" as url %}{{ url }}'})179 def test_url_asvar03(self):180 output = self.engine.render_to_string('url-asvar03')181 self.assertEqual(output, '')182 @setup({'url-namespace01': '{% url "app:named.client" 42 %}'})183 def test_url_namespace01(self):184 request = RequestFactory().get('/')185 request.resolver_match = resolve('/ns1/')186 template = self.engine.get_template('url-namespace01')187 context = RequestContext(request)188 output = template.render(context)189 self.assertEqual(output, '/ns1/named-client/42/')190 @setup({'url-namespace02': '{% url "app:named.client" 42 %}'})191 def test_url_namespace02(self):192 request = RequestFactory().get('/')193 request.resolver_match = resolve('/ns2/')194 template = self.engine.get_template('url-namespace02')195 context = RequestContext(request)196 output = template.render(context)197 self.assertEqual(output, '/ns2/named-client/42/')198 @setup({'url-namespace03': '{% url "app:named.client" 42 %}'})199 def test_url_namespace03(self):200 request = RequestFactory().get('/')201 template = self.engine.get_template('url-namespace03')202 context = RequestContext(request)203 output = template.render(context)204 self.assertEqual(output, '/ns2/named-client/42/')205 @setup({'url-namespace-no-current-app': '{% url "app:named.client" 42 %}'})206 def test_url_namespace_no_current_app(self):207 request = RequestFactory().get('/')208 request.resolver_match = resolve('/ns1/')209 request.current_app = None210 template = self.engine.get_template('url-namespace-no-current-app')211 context = RequestContext(request)212 output = template.render(context)213 self.assertEqual(output, '/ns2/named-client/42/')214 @setup({'url-namespace-explicit-current-app': '{% url "app:named.client" 42 %}'})215 def test_url_namespace_explicit_current_app(self):216 request = RequestFactory().get('/')217 request.resolver_match = resolve('/ns1/')218 request.current_app = 'app'219 template = self.engine.get_template('url-namespace-explicit-current-app')220 context = RequestContext(request)221 output = template.render(context)...
urlparse.py
Source:urlparse.py
...97 __slots__ = ()98 def __new__(cls, scheme, netloc, path, query, fragment):99 return BaseResult.__new__(100 cls, (scheme, netloc, path, query, fragment))101 def geturl(self):102 return urlunsplit(self)103class ParseResult(BaseResult):104 __slots__ = ()105 def __new__(cls, scheme, netloc, path, params, query, fragment):106 return BaseResult.__new__(107 cls, (scheme, netloc, path, params, query, fragment))108 @property109 def params(self):110 return self[3]111 def geturl(self):112 return urlunparse(self)113def urlparse(url, scheme='', allow_fragments=True):114 """Parse a URL into 6 components:115 <scheme>://<netloc>/<path>;<params>?<query>#<fragment>116 Return a 6-tuple: (scheme, netloc, path, params, query, fragment).117 Note that we don't break the components up in smaller bits118 (e.g. netloc is a single string) and we don't expand % escapes."""119 tuple = urlsplit(url, scheme, allow_fragments)120 scheme, netloc, url, query, fragment = tuple121 if scheme in uses_params and ';' in url:122 url, params = _splitparams(url)123 else:124 params = ''125 return ParseResult(scheme, netloc, url, params, query, fragment)...
web_extractor.py
Source:web_extractor.py
...147 return data['Content-Disposition'].split(";")[1].split('"')[1]148 else:149 return re.search(r'(?<=\/)[^\/\?#]+(?=[^\/]*$)', url).group(0)150 151def redirect_url(url):152 web_domain = getDomain(url)153 if(web_domain == 'github'):154 return requests.head(url)155 else:156 with requests.Session() as session:157 return session.head(session.post(url).url)158domain_crawler_mapper = {159 "mediafire": mediafire,160 "google": google,161 "dropbox": dropbox,162 "github": github,163 "youtube": youtube,164 "facebook": facebook165}166def direct_link_generator(url):167 response = redirect_url(url)168 url = response.url169 if(response.status_code == 404 or response.status_code == 403 or response.status_code == 400):170 return "Invalid url"171 else:172 url_type, _ = mimetypes.guess_type(url)173 if url_type is None:174 url_type = response.headers175 if(('Content-Type' in url_type) and (url_type['Content-Type'] == 'text/html; charset=utf-8' or url_type['Content-Type'] == 'text/html; charset="utf-8"' or url_type['Content-Type'] == 'text/html; charset=UTF-8' )): # True if this url is text/html, False if is a file176 web_domain = getDomain(url)177 if web_domain in domain_crawler_mapper:178 direct_url = domain_crawler_mapper[web_domain](url)179 if(direct_url):180 if(type(direct_url) is not tuple):181 return (get_file_name(direct_url),direct_url)...
url_resolver.py
Source:url_resolver.py
...5import resources.lib.kodion.simple_requests as requests6class AbstractResolver(object):7 def __init__(self):8 pass9 def supports_url(self, url, url_components):10 raise NotImplementedError()11 def resolve(self, url, url_components):12 raise NotImplementedError()13 pass14class YouTubeResolver(AbstractResolver):15 RE_USER_NAME = re.compile(r'http(s)?://(www.)?youtube.com/(?P<user_name>[a-zA-Z0-9]+)$')16 def __init__(self):17 AbstractResolver.__init__(self)18 pass19 def supports_url(self, url, url_components):20 if url_components.hostname == 'www.youtube.com' or url_components.hostname == 'youtube.com':21 if url_components.path.lower() in ['/redirect', '/user']:22 return True23 if url_components.path.lower().startswith('/user'):24 return True25 re_match = self.RE_USER_NAME.match(url)26 if re_match:27 return True28 pass29 return False30 def resolve(self, url, url_components):31 def _load_page(_url):32 # we try to extract the channel id from the html content. With the channel id we can construct a url we33 # already work with.34 # https://www.youtube.com/channel/<CHANNEL_ID>35 try:36 headers = {'Cache-Control': 'max-age=0',37 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',38 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36',39 'DNT': '1',40 'Accept-Encoding': 'gzip, deflate',41 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}42 response = requests.get(url, headers=headers)43 if response.status_code == 200:44 re_match = re.search(r'<meta itemprop="channelId" content="(?P<channel_id>.+)">', response.text)45 if re_match:46 channel_id = re_match.group('channel_id')47 return 'https://www.youtube.com/channel/%s' % channel_id48 pass49 except:50 # do nothing51 pass52 return _url53 if url_components.path.lower() == '/redirect':54 params = dict(urlparse.parse_qsl(url_components.query))55 return params['q']56 if url_components.path.lower().startswith('/user'):57 return _load_page(url)58 re_match = self.RE_USER_NAME.match(url)59 if re_match:60 return _load_page(url)61 return url62 pass63class CommonResolver(AbstractResolver, list):64 def __init__(self):65 AbstractResolver.__init__(self)66 pass67 def supports_url(self, url, url_components):68 return True69 def resolve(self, url, url_components):70 def _loop(_url, tries=5):71 if tries == 0:72 return _url73 try:74 headers = {'Cache-Control': 'max-age=0',75 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',76 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36',77 'DNT': '1',78 'Accept-Encoding': 'gzip, deflate',79 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}80 response = requests.head(_url, headers=headers, allow_redirects=False)81 if response.status_code == 304:82 return url83 if response.status_code in [301, 302, 303]:84 headers = response.headers85 location = headers.get('location', '')86 # validate the location - some server returned garbage87 _url_components = urlparse.urlparse(location)88 if not _url_components.scheme and not _url_components.hostname:89 return url90 # some server return 301 for HEAD requests91 # we just compare the new location - if it's equal we can return the url92 if location == _url or location + '/' == _url or location == _url + '/':93 return _url94 if location:95 return _loop(location, tries=tries - 1)96 # just to be sure ;)97 location = headers.get('Location', '')98 if location:99 return _loop(location, tries=tries - 1)100 pass101 except:102 # do nothing103 pass104 return _url105 resolved_url = _loop(url)106 return resolved_url107 pass108class UrlResolver(object):109 def __init__(self, context):110 self._context = context111 self._cache = {}112 self._youtube_resolver = YouTubeResolver()113 self._resolver = [114 self._youtube_resolver,115 CommonResolver()116 ]117 pass118 def clear(self):119 self._context.get_function_cache().clear()120 pass121 def _resolve(self, url):122 # try one of the resolver123 url_components = urlparse.urlparse(url)124 for resolver in self._resolver:125 if resolver.supports_url(url, url_components):126 resolved_url = resolver.resolve(url, url_components)127 self._cache[url] = resolved_url128 # one last check...sometimes the resolved url is YouTube-specific and can be resolved again or129 # simplified.130 url_components = urlparse.urlparse(resolved_url)131 if resolver is not self._youtube_resolver and self._youtube_resolver.supports_url(resolved_url,132 url_components):133 return self._youtube_resolver.resolve(resolved_url, url_components)134 return resolved_url135 pass136 pass137 def resolve(self, url):138 function_cache = self._context.get_function_cache()139 resolved_url = function_cache.get(FunctionCache.ONE_DAY, self._resolve, url)140 if not resolved_url or resolved_url == '/':141 return url142 return resolved_url...
requiredhosts.py
Source:requiredhosts.py
1import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,os,sys,time,shutil2import wizardmain as main3addon=main.addon; net=main.net; settings=main.settings; 4UA='Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0'5def FireDrive(url):6 if url.startswith('firedrive://'): url=url.replace('firedrive://','http://www.firedrive.com/file/')7 if ('http://m.firedrive.com/file/' not in url) and ('https://m.firedrive.com/file/' not in url) and ('http://www.firedrive.com/file/' not in url) and ('http://firedrive.com/file/' not in url) and ('https://www.firedrive.com/file/' not in url) and ('https://firedrive.com/file/' not in url): return url8 #else:9 try:10 html=net.http_GET(url).content11 if ">This file doesn't exist, or has been removed.<" in html: return "[error] This file doesn't exist, or has been removed."12 elif ">File Does Not Exist | Firedrive<" in html: return "[error] File Does Not Exist."13 elif "404: This file might have been moved, replaced or deleted.<" in html: return "[error] 404: This file might have been moved, replaced or deleted."14 #print html; 15 data={}; r=re.findall(r'<input\s+type="\D+"\s+name="(.+?)"\s+value="(.+?)"\s*/>',html);16 for name,value in r: data[name]=value17 #print data; 18 if len(data)==0: return '[error] input data not found.'19 html=net.http_POST(url,data,headers={'User-Agent':UA,'Referer':url,'Host':'www.firedrive.com'}).content20 #print html21 r=re.search('<a\s+href="(.+?)"\s+target="_blank"\s+id=\'top_external_download\'\s+title=\'Download This File\'\s*>',html)22 if r: return urllib.unquote_plus(r.group(1))23 else: return url+'#[error]r'24 except: return url+'#[error]exception'25def ResolveOtherHosts(url):26 try:27 if url.startswith('host://'): 28 url=url.replace('host://','http://')29 try: 30 import urlresolver31 url=urlresolver.HostedMediaFile(url).resolve()32 return url33 except: return url+'#[error]urlresolver'34 else: return url35 except: return url+'#[error]exception'36def MrFile_dot_me(url):37 if url.startswith('mrfile://'): url=url.replace('mrfile://','http://mrfile.me/')38 if ('http://mrfile.me/' not in url) and ('http://www.mrfile.me/' not in url): return url39 try:40 html=net.http_GET(url).content41 #if '<h3 class="error_msg_title">Invalid or Deleted File.</h3>' in html: return "[error] This file doesn't exist, or has been removed."42 data={}; r=re.findall(r'<input type="hidden"\s*name="(.+?)"\s*value="(.*?)"',html)43 for name,value in r: data[name]=value44 data['referer']=''; data['submit']='Click here to Continue'; data['method_free']=''; data['method_premium']=''; 45 html=main.nolines(net.http_POST(url,data).content).replace('<br><br><br>','<br>\r\a<br>\n<br>')46 r=re.search('<a href="(http\D*://.+?\.zip)">Download .+?\.zip</a>\s*</span',html)47 if r: return urllib.unquote_plus(r.group(1))48 else: return url+'#[error]r'49 except: return url+'#[error]exception'50def PromptFile(url):51 if url.startswith('promptfile://'): url=url.replace('promptfile://','http://www.promptfile.com/l/')52 if url.startswith('http://promptfile.com/'): url=url.replace('http://promptfile.com/','http://www.promptfile.com/')53 if ('http://www.promptfile.com/l/' not in url): return url54 try:55 html=main.nolines(net.http_GET(url,headers={'User-Agent':UA}).content).replace('/>','/\n\r>').replace('</div>','</div\n\r>')56 #if '<h3 class="error_msg_title">Invalid or Deleted File.</h3>' in html: return "[error] This file doesn't exist, or has been removed."57 r=re.search('<a href="(http\D*://.+?)" class="green_btn download_btn">\s*Download File\s*</a',html)58 if not r:59 data={}; r=re.findall(r'<input type="hidden" name="(chash)" value="(.*?)"',html)60 for name,value in r: data[name]=value61 html=main.nolines(net.http_POST(url,data,headers={'User-Agent':UA,'Referer':url}).content).replace('</div>','</div\n\r>')62 r=re.search('<a href="(http\D*://.+?)" class="green_btn download_btn">Download File</a',html)63 if r: return urllib.unquote_plus(r.group(1))64 else: return url+'#[error]r'65 except: return url+'#[error]exception'66def CheckForHosts(url):67 #DefaultUrl=""+url68 #try:69 if 'https://' in url.lower(): url=url.replace('https://','http://')70 print {'incoming url':url}71 if url.startswith('host://'): url=ResolveOtherHosts(url)72 else:73 url=FireDrive(url)74 url=MrFile_dot_me(url)75 url=PromptFile(url)76 print {'returning url':url}77 return url...
url_test.py
Source:url_test.py
...18import atom.url19import gdata.test_config as conf20class UrlTest(unittest.TestCase):21 def testParseUrl(self):22 url = atom.url.parse_url('http://www.google.com/calendar/feeds')23 self.assert_(url.protocol == 'http')24 self.assertTrue(url.port is None)25 self.assert_(url.host == 'www.google.com')26 self.assert_(url.path == '/calendar/feeds')27 self.assert_(url.params == {})28 url = atom.url.parse_url('http://example.com:6091/calendar/feeds')29 self.assert_(url.protocol == 'http')30 self.assert_(url.host == 'example.com')31 self.assert_(url.port == '6091')32 self.assert_(url.path == '/calendar/feeds')33 self.assert_(url.params == {})34 35 url = atom.url.parse_url('/calendar/feeds?foo=bar')36 self.assert_(url.protocol is None)37 self.assert_(url.host is None)38 self.assert_(url.path == '/calendar/feeds')39 self.assert_(len(url.params.keys()) == 1)40 self.assert_('foo' in url.params)41 self.assert_(url.params['foo'] == 'bar')42 43 url = atom.url.parse_url('/calendar/feeds?my+foo=bar%3Dx')44 self.assert_(len(url.params.keys()) == 1)45 self.assert_('my foo' in url.params)46 self.assert_(url.params['my foo'] == 'bar=x')47 48 def testUrlToString(self):49 url = atom.url.Url(port=80)50 url.host = 'example.com'51 self.assert_(str(url), '//example.com:80')52 url = atom.url.Url(protocol='http', host='example.com', path='/feed')53 url.params['has spaces'] = 'sneaky=values?&!'54 self.assert_(url.to_string() == (55 'http://example.com/feed?has+spaces=sneaky%3Dvalues%3F%26%21'))56 def testGetRequestUri(self):57 url = atom.url.Url(protocol='http', host='example.com', path='/feed')...
url_frequency.py
Source:url_frequency.py
1import db2def normalize(url):3 """ lowercase url4 remove 'http://' and 'https://' prefix5 remove '/' suffix """6 url = url.lower()7 if url[:7] == 'http://':8 url = url[7:]9 if url[:8] == 'https://':10 url = url[8:]11 if url[-1] == '/':12 url = url[:-1]13 return url14def map():15 """ create url -> (user_id, tweet_id) list map16 frequency of url = len(url_map[url])17 rank all urls = sorted(url_map.keys(), key = lambda url: len(url_map[url]), reverse = True) """18 table = db.execute(db.mk_connection(), 'select user_id, tweet_id, url, expanded_url from tweets_url')19 print(len(table), 'Table Entries')20 url_map = {}21 for user_id, tweet_id, url, expanded_url in table:22 url = normalize(url) if expanded_url is None or expanded_url == 'Request Error' else normalize(expanded_url)23 if url not in url_map:24 url_map[url] = []25 url_map[url].append((user_id, tweet_id))...
Using AI Code Generation
1import {url} from 'fast-check-monorepo'2import {url} from 'fast-check'3import {url} from 'fast-check-monorepo'4import {url} from 'fast-check'5import {url} from 'fast-check-monorepo'6import {url} from 'fast-check'7import {url} from 'fast-check-monorepo'8import {url} from 'fast-check'9import {url} from 'fast-check-monorepo'10import {url} from 'fast-check'11import {url} from 'fast-check-monorepo'12import {url} from 'fast-check'13import {url} from 'fast-check-monorepo'14import {url} from 'fast-check'15import {url} from 'fast-check-monorepo'16import {url} from 'fast-check'17import {url} from 'fast-check-monorepo'18import {url} from 'fast-check'19import {url} from 'fast-check-monorepo'20import {url} from 'fast-check'
Using AI Code Generation
1import { url } from 'fast-check/lib/check/arbitrary/UrlArbitrary.js';2describe('UrlArbitrary', () => {3 it('should generate valid urls', () => {4 fc.assert(5 fc.property(url(), (u) => {6 const parsed = new URL(u);7 expect(parsed.protocol).not.toBe('');8 expect(parsed.hostname).not.toBe('');9 expect(parsed.pathname).not.toBe('');10 })11 );12 });13});
Using AI Code Generation
1const { url } = require('fast-check-monorepo');2const fc = require('fast-check');3const arb = url();4fc.assert(5 fc.property(arb, (value) => {6 console.log(value);7 })8);9{10 "scripts": {11 },12 "dependencies": {13 }14}
Using AI Code Generation
1import { url } from 'fast-check';2const fc = require('fast-check');3const arb = fc.url();4const fc = require('fast-check');5arb.sample();6arb.sample();7arb.sample();8arb.sample();9arb.sample();10arb.sample();11arb.sample();
Using AI Code Generation
1const fc = require('fast-check');2const url = require('fast-check-monorepo/url');3fc.assert(4 fc.property(url(), url => {5 const parsed = new URL(url);6 })7);8const fc = require('fast-check');9const url = require('fast-check-monorepo/url');10fc.assert(11 fc.property(url(), url => {12 const parsed = new URL(url);13 })14);15const fc = require('fast-check');16const url = require('fast-check-monorepo/url');17fc.assert(18 fc.property(url(), url => {19 const parsed = new URL(url);20 })21);22const fc = require('fast-check');23const url = require('fast-check-monorepo/url');24fc.assert(25 fc.property(url(), url => {26 const parsed = new URL(url);27 })28);29const fc = require('fast-check');30const url = require('fast-check-monorepo/url');31fc.assert(32 fc.property(url(), url => {33 const parsed = new URL(url);34 })35);36const fc = require('fast-check');37const url = require('fast-check-monorepo/url');38fc.assert(39 fc.property(url(), url => {40 const parsed = new URL(url);41 })42);43const fc = require('fast-check');44const url = require('fast-check-monorepo/url');45fc.assert(46 fc.property(url(), url => {47 const parsed = new URL(url);48 })49);50const fc = require('fast-check');51const url = require('fast-check
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!