本文整理汇总了Python中urllib.parse.urlunsplit函数的典型用法代码示例。如果您正苦于以下问题:Python urlunsplit函数的具体用法?Python urlunsplit怎么用?Python urlunsplit使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urlunsplit函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: extract_password_row
def extract_password_row(self, row):
res = ''
hostname_split = urlparse.urlsplit(row[0])
website = urlparse.urlunsplit((hostname_split.scheme, hostname_split.netloc, "", "", "")).strip('\n')
username = ''
password = ''
form_url = ''
user_field = ''
pass_field = ''
form_url_split = urlparse.urlsplit(row[1])
form_url = urlparse.urlunsplit((form_url_split.scheme, form_url_split.netloc, "", "", "")).strip('\n')
#print('\nusername = ', row[3], ' password RAW = ', row[5])
password = self.decode_password(row[5])
try:
username = row[3]
try:
password = self.decode_password(row[5])
self.num_passwords += 1
pass
except:
print('ERROR - password = ', row[5])
user_field = row[2]
pass_field = row[4]
except:
print('non password entry (blacklists - ignoring)')
res = self.format_list_csv([website, username, form_url, user_field, pass_field, password])
return res
示例2: zoom_article
def zoom_article(self, ticket_id, article_id):
art_descr = self.__db.article_description(article_id)
if art_descr[4] & ART_TEXT:
return eval(self.__db.article_message(article_id))
self.echo("Zoom article:", ticket_id, article_id)
url_beg = urlsplit(self.runtime.get("site"))[:3]
params = (
("Action", "AgentTicketZoom"), ("Subaction", "ArticleUpdate"),
("TicketID", ticket_id), ("ArticleID", article_id),
("OTRSAgentInterface", self.runtime["OTRSAgentInterface"]))
url = urlunsplit(url_beg + (urlencode(params), ""))
pg = TicketsPage(self.core)
page = pg.load(url)
if page is None:
return
mail_header = page.get("mail_header", [])
if "mail_src" in page:
url = urlunsplit(url_beg[:2] + urlsplit(page["mail_src"])[2:])
self.echo("Get message:", url)
pg = MessagePage(self.core)
try:
mail_text = pg.load(url)
except LoginError:
mail_text = pg.login()
else:
mail_text = page["message_text"]
if mail_header:
mail_text.insert(0, ("\n",))
for i in reversed(mail_header):
mail_text.insert(0, ("%s\t%s\n" % i,))
shrink_tupled_text(mail_text)
self.__db.article_message(article_id, repr(mail_text))
return mail_text
示例3: encode
def encode(self, path, parameters=None):
'''
@see: EncoderPath.encode
'''
assert isinstance(path, (Path, str)), 'Invalid path %s' % path
if isinstance(path, Path):
assert isinstance(path, Path)
url = deque()
url.append(self.root)
url.append('/'.join(path.toPaths(self.converterPath)))
if self.extension:
url.append('.')
url.append(self.extension)
elif path.node.isGroup:
url.append('/')
query = urlencode(parameters) if parameters else ''
return urlunsplit((self.scheme, self.host, ''.join(url), query, ''))
else:
assert isinstance(path, str), 'Invalid path %s' % path
if not path.strip().startswith('/'):
# TODO: improve the relative path detection
# This is an absolute path so we will return it as it is.
return path
# The path is relative to this server so we will convert it in an absolute path
url = urlsplit(path)
return urlunsplit((self.scheme, self.host, url.path, url.query, url.fragment))
示例4: to_python
def to_python(self, value):
def split_url(url):
"""
Return a list of url parts via urlparse.urlsplit(), or raise
ValidationError for some malformed URLs.
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super().to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
示例5: oauth
def oauth(self, req, credentials = None, params = {}):
#NOTE: While flickr supports HTTPS in its oauth endpoints, flickr
#thinks that the HTTPS endpoints are being accessed via HTTP, and thus
#constructs the signature base string accordingly, which
#will hence not match the signature base string generated by
#pyoauth1client. We solve this by replacing HTTPS with HTTP
#when generating the signature base string, and then revert the change
#after the base string is generated. This way the signature
#base string will match the one generated by flickr even though
#we are accessing the endpoints via HTTPS for ADDED SECURITY!!!111one
x = urlsplit(req.url)
if x.scheme == "https":
#Remove the HTTPS Scheme
https = True
x = x._replace(scheme = "http")
req = req._replace(url = urlunsplit(x))
else:
https = False
y = super().oauth(req, credentials, params)
if https:
#Add back the HTTPS scheme
x = urlsplit(y.url)
x = x._replace(scheme = "https")
y = y._replace(url = urlunsplit(x))
return y
示例6: clean_url
def clean_url(value):
"""
Taken from Django' URLField, this helps to normalize URLs. Raises a
ValueError if an invalid url is passed.
Example:
>>> clean_url("www.google.com")
"http://www.google.com"
>>> clean_url("_.com")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Enter a valid URL.
"""
if value:
value = value.strip()
value = value.encode('ascii', 'ignore').decode("utf-8")
url_fields = list(urlsplit((value)))
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = list(urlsplit((urlunsplit(url_fields))))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlunsplit(url_fields)
return value
示例7: encode
def encode(self, path, parameters=None):
"""
@see: EncoderPath.encode
"""
assert isinstance(path, (Path, str)), "Invalid path %s" % path
if isinstance(path, Path):
assert isinstance(path, Path)
url = deque()
url.append(self.root)
url.append("/".join(path.toPaths(self.converterPath)))
if self.extension:
url.append(".")
url.append(self.extension)
elif path.node.isGroup:
url.append("/")
query = urlencode(parameters) if parameters else ""
return urlunsplit((self.scheme, self.host, quote("".join(url)), query, ""))
else:
assert isinstance(path, str), "Invalid path %s" % path
if not path.strip().startswith("/"):
# TODO: improve the relative path detection
# This is an absolute path so we will return it as it is.
return quote(path)
# The path is relative to this server so we will convert it in an absolute path
url = urlsplit(path)
return urlunsplit((self.scheme, self.host, quote(url.path), url.query, url.fragment))
示例8: authorizeApplication
def authorizeApplication(app_id, username, password):
'''Authorize an application to access a systems data
and get the user_id'''
scheme = 'https'
base_url = 'enlighten.enphaseenergy.com'
action = 'app_user_auth/new'
query = p.urlencode({'app_id':app_id})
request1 = p.urlunsplit((scheme,base_url,action,query,''))
logging.debug(request1)
opener = r.build_opener(r.HTTPCookieProcessor())
opener.addheaders = [('User-agent','Mozilla/5.0')]
r1 = opener.open(request1)
action,hiddens = EnphaseInterface._processPage(r1)
payload = {'user[email]':username,'user[password]':password}
hiddens.update(payload)
request2 = p.urlunsplit((scheme,base_url,action,query,''))
r2 = opener.open(request2,p.urlencode(hiddens).encode(encoding='UTF-8'))
action, hiddens = EnphaseInterface._processPage(r2)
request3 = p.urlunsplit((scheme,base_url,action,query,''))
r3 = opener.open(request3,p.urlencode(hiddens).encode(encoding='UTF-8'))
if 'enlighten-api-user-id' not in r3.info():
logging.critical('Failed to aquire user_id')
logging.debug(r3.info()['enlighten-api-user-id'])
return r3.info()['enlighten-api-user-id']
示例9: to_python
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'])
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlunsplit(url_fields)
return value
示例10: items
def items(id:int=None) -> str:
valid_params = {'1': True, '0': False}
starred = valid_params.get(request.query.getone('starred'))
read = valid_params.get(request.query.getone('read'))
channel_ids = [int(i) for i in request.query.getlist('channel')]
channel_ids += [id] if id is not None else []
since_id = request.query.since_id
max_id = request.query.max_id
count = int(request.query.count) if request.query.count else 25
page = int(request.query.page) if request.query.page else 1
search = request.query.q
query = Item.select()
#for channel_id in channel_ids:
if channel_ids:
query = query.where(Item.channel << channel_ids)
if starred:
query = query.where(Item.starred == starred)
if read:
query = query.where(Item.read == read)
if since_id:
query = query.where(Item.id >= since_id)
if max_id:
query = query.where(Item.id <= max_id)
if search:
search = '%' + search + '%'
query = query.where(Item.title ** search | Item.description ** search | Item.author ** search)
#total_count = query.count()
if page and count: query = query.paginate(page, count)
for it in query:
it.new = False
it.save()
out = {'items': list(query.order_by(Item.updated.desc()).limit(count))}
channels = Channel.select().order_by(Channel.title)
for c in channels:
c.filter = True if c.id in channel_ids else False
#if channel:
#Item.update(new=False).where(Item.channel == channel).execute()
params = {}
for p in request.query.keys():
params[p] = request.query.getall(p)
params['page'] = page + 1
out['next'] = urlunsplit(('', '', request.fullpath, urlencode(params, doseq=True), ''))
params['page'] = page - 1 if page > 1 else 1
out['prev'] = urlunsplit(('', '', request.fullpath, urlencode(params, doseq=True), '')) if page > 1 else None
if request_accept_json():
return out
else:
return template('index', out, is_active=is_active, favicon=favicon, date_format=date_format, channels=channels)
示例11: assert_redirects_to
def assert_redirects_to(self, response, url_name, status_code=302,
target_status_code=200, host=None, msg_prefix='', *args, **kwargs):
'''
Assert that the response is a redirect to a resolved url and that the URL can be loaded.
It differs from Django TestCase.assertRedirects on the following points:
- Takes a resolable url name as parameter
- Query params are not taken in account for URL comparison, only for status code retrieval.
'''
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
url = urlunsplit((scheme, netloc, path, None, None))
redirect_response = response.client.get(path, QueryDict(query))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
path = reverse(url_name, *args, **kwargs)
expected_url = urlunsplit(('http', host or 'testserver', path, None, None))
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
示例12: compute_url_from_payload
def compute_url_from_payload(self, data_item):
url = data_item[DomFuzzerQueueTable.URL]
target = data_item[DomFuzzerQueueTable.TARGET]
param = data_item[DomFuzzerQueueTable.PARAM]
test = data_item[DomFuzzerQueueTable.TEST]
if 'url' == target:
if not param:
return url + test
else:
# TODO: fix me
return url + test + '=X'
splitted = urlparse.urlsplit(url)
if 'fragment' == target:
url_field = splitted.fragment
elif 'query' == target:
url_field = splitted.query
else:
raise Exception('unsupported target: %s' % (target))
if not url_field:
raise Exception('missing URL field in url: %s' % (url))
else:
# TODO: this duplicates previous work, so could consider pre-storing target urls?
url_io = StringIO()
pairs = self.re_delim.split(url_field)
found = False
for offset in range(0, len(pairs)):
values = pairs[offset]
if values == ';' or values == '&':
url_io.write(values)
continue
if '=' in values:
name, value = values.split('=', 1)
separator = '='
else:
name, value = values, ''
separator = ''
if name == param:
value += test
found = True
url_io.write(name)
url_io.write(separator)
url_io.write(value)
if not found:
url_io.write(test)
if 'fragment' == target:
target_url = urlparse.urlunsplit((splitted.scheme, splitted.netloc, splitted.path, splitted.query, url_io.getvalue()))
elif 'query' == target:
target_url = urlparse.urlunsplit((splitted.scheme, splitted.netloc, splitted.path, url_io.getvalue(), splitted.fragment))
return target_url
示例13: doEncodePath
def doEncodePath(path):
'''
Do encode the path.
'''
assert isinstance(path, str), 'Invalid path %s' % path
url = urlsplit(path)
if url.scheme or url.netloc: return urlunsplit((url.scheme, url.netloc, url.path, url.query, url.fragment))
# Is a relative URI so we append the scheme and host.
return urlunsplit((scheme, host, url.path, url.query, url.fragment))
示例14: children
def children(root, soup):
""" Return a set of child URLs within a HTML soup,
relative to the given root """
# Establish the root URL base parameters
root_s = urlparse.urlsplit(root.url)
root_url = urlparse.urlunsplit(root_s)
root_url_slash = urlparse.urlunsplit(
(root_s.scheme, root_s.netloc, "/", root_s.query, "")
)
# Collect all interesting <a> tags from the soup and obtain their href-s:
fetch = set()
for link in soup.find_all("a"):
href = link.get("href")
if not href:
continue
# Split the href into its components
s = urlparse.urlsplit(href)
if s.scheme and s.scheme not in {"http", "https"}:
# Not HTTP
continue
if s.netloc and not (
s.netloc == root.domain or s.netloc.endswith("." + root.domain)
):
# External domain - we're not interested
continue
# Seems to be a bug in urllib: fragments are put into the
# path if there is no canonical path
newpath = s.path
if newpath.startswith("#") or newpath.startswith("/#"):
newpath = ""
if not newpath and not s.query:
# No meaningful path info present
continue
# Make sure the newpath is properly urlencoded
if newpath:
newpath = urlparse.quote(newpath)
# Fill in missing stuff from the root URL base parameters
newurl = (
s.scheme or root_s.scheme,
s.netloc or root_s.netloc,
newpath,
s.query,
""
)
# Make a complete new URL to fetch
url = urlparse.urlunsplit(newurl)
if url in {root_url, root_url_slash}:
# Exclude the root URL
continue
# Looks legit: add to the fetch set
fetch.add(url)
return fetch
示例15: webfuzzer_populate_response_id
def webfuzzer_populate_response_id(self, Id):
self.clear_data_dictionary()
row = self.Data.read_responses_by_id(self.cursor, Id)
if not row:
return
responseItems = interface.data_row_to_response_items(row)
url = responseItems[ResponsesTable.URL]
reqHeaders = responseItems[ResponsesTable.REQ_HEADERS].decode('utf-8', 'ignore')
reqData = responseItems[ResponsesTable.REQ_DATA].decode('utf-8', 'ignore')
method = responseItems[ResponsesTable.REQ_METHOD]
splitted = urlparse.urlsplit(url)
# Create a new parsed object removing the scheme and netloc
base_url = urlparse.urlunsplit((splitted[0], splitted[1], splitted[2], '', ''))
req_loc = ("", "", "", splitted.query, splitted.fragment)
useragent = self.framework.useragent()
has_cookie = False
template = StringIO()
template.write('${method} ${request_uri}%s HTTP/1.1\n' % urlparse.urlunsplit(req_loc))
first = True
for line in reqHeaders.splitlines():
if not line:
break
if first and self.re_request.match(line):
first = False
continue
if ':' in line:
name, value = [v.strip() for v in line.split(':', 1)]
lname = name.lower()
if 'host' == lname:
if splitted.hostname and value == splitted.hostname:
template.write('Host: ${host}\n')
continue
elif 'user-agent' == lname:
if useragent == value:
template.write('User-Agent: ${user_agent}\n')
continue
template.write(line)
template.write('\n')
template.write('\n')
template.write(reqData)
self.set_combo_box_text(self.mainWindow.stdFuzzerReqMethod, method.upper())
self.mainWindow.wfStdUrlEdit.setText(base_url)
self.mainWindow.wfStdEdit.setPlainText(template.getvalue())