当前位置: 首页>>代码示例>>Python>>正文


Python cookielib.FileCookieJar方法代码示例

本文整理汇总了Python中cookielib.FileCookieJar方法的典型用法代码示例。如果您正苦于以下问题:Python cookielib.FileCookieJar方法的具体用法?Python cookielib.FileCookieJar怎么用?Python cookielib.FileCookieJar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cookielib的用法示例。


在下文中一共展示了cookielib.FileCookieJar方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import cookielib [as 别名]
# 或者: from cookielib import FileCookieJar [as 别名]
def __init__(self, accessId, accessKey, endpoint=None, caBundle=None, cookieFile='cookies.txt'):
        self.session = requests.Session()
        self.session.auth = (accessId, accessKey)
        self.DEFAULT_VERSION = 'v1'
        self.session.headers = {'content-type': 'application/json', 'accept': 'application/json'}
        if caBundle is not None:
            self.session.verify = caBundle
        cj = cookielib.FileCookieJar(cookieFile)
        self.session.cookies = cj
        if endpoint is None:
            self.endpoint = self._get_endpoint()
        else:
            self.endpoint = endpoint
        if self.endpoint[-1:] == "/":
            raise Exception("Endpoint should not end with a slash character") 
开发者ID:SumoLogic,项目名称:sumologic-python-sdk,代码行数:17,代码来源:sumologic.py

示例2: cxeSearch

# 需要导入模块: import cookielib [as 别名]
# 或者: from cookielib import FileCookieJar [as 别名]
def cxeSearch(go_inurl,go_site,go_cxe,go_ftype,maxc):
	uRLS = []
	counter = 0
       	while counter < int(maxc):
              	jar = cookielib.FileCookieJar("cookies")
                query = 'q='+go_inurl+'+'+go_site+'+'+go_ftype
                results_web = 'http://www.google.com/cse?'+go_cxe+'&'+query+'&num='+str(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                request_web = urllib2.Request(results_web)
		agent = random.choice(header)
                request_web.add_header('User-Agent', agent)
		opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                text = opener_web.open(request_web).read()
		strreg = re.compile('(?<=href=")(.*?)(?=")')
                names = strreg.findall(text)
		counter += 100
                for name in names:
                      	if name not in uRLS:
                               	if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                                       	pass
				elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name) or re.search("%", name):
                                       	pass
				else:
                                      	uRLS.append(name)
	tmpList = []; finalList = []
	print "[+] URLS (unsorted) :", len(uRLS)
        for entry in uRLS:
		try:
			t2host = entry.split("/",3)
			domain = t2host[2]
			if domain not in tmpList and "=" in entry:
				finalList.append(entry)
				tmpList.append(domain)
		except:
			pass
	print "[+] URLS (sorted)   :", len(finalList)
	return finalList 
开发者ID:knightmare2600,项目名称:d4rkc0de,代码行数:38,代码来源:dorkScan.py

示例3: __init__

# 需要导入模块: import cookielib [as 别名]
# 或者: from cookielib import FileCookieJar [as 别名]
def __init__(self, persistent=False,
                 cookies_filename=None, cookies_type='LWPCookieJar'):
        """
        :param bool auto_logout: whether to logout automatically when
            :class:`.API` object is destroyed

                                 .. deprecated:: 0.6.0
                                     Call :meth:`.API.logout` explicitly

        :param bool persistent: whether to use persistent session that stores
            cookies on disk
        :param str cookies_filename: path to the cookies file, use default
            path (`~/.115cookies`) if None
        :param str cookies_type: a string representing
            :class:`cookielib.FileCookieJar` subclass,
            `LWPCookieJar` (default) or `MozillaCookieJar`
        """
        self.persistent = persistent
        self.cookies_filename = cookies_filename
        self.cookies_type = cookies_type
        self.passport = None
        self.http = RequestHandler()
        self.logger = logging.getLogger(conf.LOGGING_API_LOGGER)
        # Cache attributes to decrease API hits
        self._user_id = None
        self._username = None
        self._signatures = {}
        self._upload_url = None
        self._lixian_timestamp = None
        self._root_directory = None
        self._downloads_directory = None
        self._receiver_directory = None
        self._torrents_directory = None
        self._task_count = None
        self._task_quota = None
        if self.persistent:
            self.load_cookies() 
开发者ID:shichao-an,项目名称:115wangpan,代码行数:39,代码来源:api.py

示例4: save_cookies

# 需要导入模块: import cookielib [as 别名]
# 或者: from cookielib import FileCookieJar [as 别名]
def save_cookies(self, ignore_discard=True, ignore_expires=True):
        """Save cookies to the file :attr:`.API.cookies_filename`"""
        if not isinstance(self.cookies, cookielib.FileCookieJar):
            m = 'Cookies must be a cookielib.FileCookieJar object to be saved.'
            raise APIError(m)
        self.cookies.save(ignore_discard=ignore_discard,
                          ignore_expires=ignore_expires) 
开发者ID:shichao-an,项目名称:115wangpan,代码行数:9,代码来源:api.py

示例5: gHarv

# 需要导入模块: import cookielib [as 别名]
# 或者: from cookielib import FileCookieJar [as 别名]
def gHarv(dork,site,dP,cxe,output,gnum,maxcount):
	global GoogleURLS, tmplist
        counter = 0;global gcount;gcount+=1;GoogleURLS = []
        try:
                CXr = CXdic[cxe]
                header = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)'
                saveCount = len(GoogleURLS);cmpslptime = 0;lastlen = 0
                while counter < int(maxcount):
                        jar = cookielib.FileCookieJar("cookies")
                        query = dP+dork+'+site:'+site
			gnum = int(gnum)
                        results_web = 'http://www.google.com/cse?cx='+CXr+'&q='+query+'&num='+repr(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                        request_web = urllib2.Request(results_web);agent = random.choice(header)
                        request_web.add_header('User-Agent', agent);opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                        text = opener_web.open(request_web).read();strreg = re.compile('(?<=href=")(.*?)(?=")')
                        names = strreg.findall(text)
                        for name in names:
                                if name not in GoogleURLS:
                                        if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                                                pass
                                        elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name) or re.search("blackle", name):
                                                pass
                                        else:
						if output == 1:
	                                                txtField.insert(END,name+'\n')
						else:
							pass
                                                GoogleURLS.append(name)
                        sleeptimer = random.choice(rSA);time.sleep(sleeptimer)
                        cmpslptime += sleeptimer;counter += int(gnum)
                        percent = int((1.0*counter/int(maxcount))*100)
                        laststatstring = 'Current MaxCount : '+repr(counter)+' | Last Query Sleeptimer ('+repr(sleeptimer)+') | Percent Done : '+repr(percent)
                        statList.append(laststatstring)                 
                        modStatus()		
		TestHost_bttn.configure(state=NORMAL,fg=fgCol)
                if iC == True:
                        for entry in GoogleURLS:
                                global tmplist
                                if '=' in entry: tmplist.append(entry)
                else:
                        pass
		for url in GoogleURLS:
			try:
				part = url.split('?')
				var = part[1].split('&')
				cod = ""
				for x in var:
					strX = x.split("=")
					cod += strX[0]
					parmURL = part[0]+cod
					if parmURL not in ParmURLS_List and url not in tmplist:
						ParmURLS_List.append(parmURL)
						tmplist.append(url)
			except:
				pass
		tmplist.sort()
		txtField.insert(END,'\nFound URLS: '+repr(len(GoogleURLS))+'\t\tTotal Parm-dupe Checked URLS: '+repr(len(tmplist)))
		txtField.insert(END,'\nGoogle Search Finished...\n')
        except IOError:
                pass 
开发者ID:knightmare2600,项目名称:d4rkc0de,代码行数:62,代码来源:simpleDorkGUi.py

示例6: gharv

# 需要导入模块: import cookielib [as 别名]
# 或者: from cookielib import FileCookieJar [as 别名]
def gharv(magicWord):
	vUniq = []
	for site in sitearray:
		counter = 0;bcksp = 0
		try:
			CXname = CXdic.keys()[int(random.random()*len(CXdic.keys()))];CXr = CXdic[CXname]
			print "\n| Site : ", site, " | CSEngine : ", CXname+" | Progress : ",
			saveCount = len(targets);cmpslptime = 0;lastlen = 0
			while counter < maxcount:
				jar = cookielib.FileCookieJar("cookies")
				query = magicWord+'+'+dork+'+site:'+site
			        results_web = 'http://www.google.com/cse?cx='+CXr+'&q='+query+'&num='+str(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
			        request_web = urllib2.Request(results_web);agent = random.choice(header)
			        request_web.add_header('User-Agent', agent);opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
			        text = opener_web.open(request_web).read();strreg = re.compile('(?<=href=")(.*?)(?=")')
				names = strreg.findall(text)
			        for name in names:
					if name not in targets:
						if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
							pass
						elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name):
							pass
			                        else:
							targets.append(name)
				sleeptimer = random.choice(rSA);time.sleep(sleeptimer)
				cmpslptime += sleeptimer;counter += gnum
				percent = int((1.0*counter/maxcount)*100)
				if bcksp == 1:
					stroutlen = 0
					while stroutlen < lastlen:
						sys.stdout.write("\10");stroutlen += 1
				sys.stdout.write("%s(%s) - %s percent" % (counter,sleeptimer,percent))
				lastlen = len(str(counter)+str(sleeptimer)+str(percent))+13
				sys.stdout.flush()
				bcksp = 1
			sys.stdout.write(" | %s Strings recieved, in %s seconds" % (len(targets)-saveCount,cmpslptime))
		except IOError:
			sys.stdout.write(" | %s Strings recieved" % (len(targets)-saveCount))
	firstparm = '';uList = []
	for entry in targets:
	        thost = entry.rsplit("=");t2host = entry.rsplit("/")
	        try:
	                firstparm = thost[1];domain = t2host[2]
	                if domain not in uList:
	                        if '.'+dorkEXT+'?' in entry and firstparm.isdigit() == True:
	                                uniqvictims.append(entry);uList.append(domain)
	                                pass
	                        elif 'http://' in entry and 'index.' in entry and firstparm.isalpha() == True:
	                                spidervictims.append(entry);uList.append(domain)
	                                pass
	                        else:
	                                miscVic.append(entry)
	                                pass
	        except:
	                pass
# ScanQueue Builder 
开发者ID:knightmare2600,项目名称:d4rkc0de,代码行数:58,代码来源:ACOi.py


注:本文中的cookielib.FileCookieJar方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。