本文整理汇总了Python中urllib.request.read方法的典型用法代码示例。如果您正苦于以下问题:Python request.read方法的具体用法?Python request.read怎么用?Python request.read使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类urllib.request
的用法示例。
在下文中一共展示了request.read方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_remote_manifest
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def load_remote_manifest(url: str) -> Dict[str, Any]:
"""
Converts a remote yaml file into a Python dictionary
"""
tmp_dir, _ = get_tmp_dir()
try:
request = urllib.request.urlopen(url, timeout=30)
except urllib.error.HTTPError as e: # type: ignore
e.msg += " " + url
raise
manifest_path = os.path.join(tmp_dir, str(uuid.uuid4()) + ".yaml")
with open(manifest_path, "wb") as manifest:
while True:
buffer = request.read(BLOCK_SIZE)
if not buffer:
# There is nothing more to read
break
manifest.write(buffer)
try:
result = load_local_manifest(manifest_path)
finally:
os.remove(manifest_path)
return result
示例2: search
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def search(self, query):
request = urllib.request.urlopen(urllib.request.Request(
'https://api.apidomain.info/list?' + urllib.parse.urlencode({
'sort': 'relevance',
'quality': '720p,1080p,3d',
'page': 1,
'keywords': query,
}),
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' +
'(KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
})
)
results = json.loads(request.read())
return results
示例3: get_segList
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def get_segList(stocknumber):
segList = []
for pageNum in range(1, 21):
urlPage = 'http://guba.eastmoney.com/list,' + \
str(stocknumber) + '_' + str(pageNum) + '.html'
stockPageRequest = urllib.request.urlopen(urlPage)
htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
titlePattern = re.compile(
'<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
gotTitle = re.findall(titlePattern, htmlTitleContent)
for i in range(len(gotTitle)):
for j in range(len(dateCount)):
if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
segSentence = list(jieba.cut(gotTitle[i][1], cut_all=True))
segList.append(segSentence)
return segList
# 分类器构建和数据持久化
示例4: _download_img
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def _download_img(self, image_url: str):
"""Download single image.
Args:
image_url (str): Image url.
Returns:
Union[str, None]: Image path if image was succesfully downloaded. Otherwise, None.
"""
image_name = self._encode_image_name(image_url)
image_path = join(self.dest_dir, image_name)
if not isfile(image_path):
try:
# TODO use request.get with accept jpg?
request = urllib.request.urlopen(image_url, timeout=5)
image = request.read()
if imghdr.what("", image) == "jpeg":
with open(image_path, "wb") as f:
f.write(image)
except Exception as e:
print("Error downloading {}: {}".format(image_url, e), file=sys.stderr)
return None
return image_path
示例5: execute
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def execute(_):
"""Run integration tests."""
command = 'run_server'
indicator = b'Booting worker'
try:
lines = []
server = common.execute_async(
'python -u butler.py {} --skip-install-deps'.format(command))
test_utils.wait_for_emulator_ready(
server,
command,
indicator,
timeout=RUN_SERVER_TIMEOUT,
output_lines=lines)
# Sleep a small amount of time to ensure the server is definitely ready.
time.sleep(1)
# Call setup ourselves instead of passing --bootstrap since we have no idea
# when that finishes.
# TODO(ochang): Make bootstrap a separate butler command and just call that.
common.execute(
('python butler.py run setup '
'--non-dry-run --local --config-dir={config_dir}'
).format(config_dir=constants.TEST_CONFIG_DIR),
exit_on_error=False)
request = urllib.request.urlopen('http://' + constants.DEV_APPSERVER_HOST)
request.read() # Raises exception on error
except Exception:
print('Error occurred:')
print(b''.join(lines))
raise
finally:
server.terminate()
# TODO(ochang): Test that bot runs, and do a basic fuzzing session to ensure
# things work end to end.
print('All end-to-end integration tests passed.')
示例6: fetch_schedule_actions_s3
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def fetch_schedule_actions_s3(url):
source = parse_s3_url(url)
print(source)
s3 = boto3.client('s3')
try:
element = s3.get_object(**source)
except:
print('Couldn\'t read %s' % (url))
return '[]'
return element['Body'].read().decode('utf-8')
示例7: fetch_schedule_actions_from_url
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def fetch_schedule_actions_from_url(url):
request = urllib.request.urlopen(url)
try:
content = request.read().decode('utf-8')
except:
content = None
finally:
request.close()
return content
示例8: get_html
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def get_html(url):
tries = 5
req = urllib.request.Request(url)
req.add_header('User-agent', 'Mozilla/5.0 (Linux x86_64)')
# Add DoNotTrack header, do the right thing even if nobody cares
req.add_header('DNT', '1')
while tries > 0:
try:
request = urllib.request.urlopen(req)
tries = 0
except socket.timeout:
if debug:
raise
tries -= 1
except socket.timeout:
if debug:
raise
tries -= 1
except urllib.error.URLError as e:
if debug:
raise
print("URL Error " + str(e.code) + ": " + e.reason)
print("Aborting...")
exit()
except urllib.error.HTTPError as e:
if debug:
raise
print("HTTP Error " + str(e.code) + ": " + e.reason)
print("Aborting...")
exit()
# html.parser generates problems, I could fix them, but switching to lxml
# is easier and faster
soup = BeautifulSoup(request.read(), "lxml")
return soup
示例9: get_cover
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def get_cover(cover_url):
print(cover_url)
tries = 5
while tries > 0:
try:
req = urllib.request.Request(cover_url)
req.add_header('User-agent', 'Mozilla/5.0 (Linux x86_64)')
request = urllib.request.urlopen(req)
temp = request.read()
with open('cover.jpg', 'wb') as f:
f.write(temp)
tries == 0
# break
return 1
except Exception as error:
tries -= 1
print("Can't retrieve the cover")
print(error)
return 0
###############################################################################
# TODO: Remove this block when appropriate
# Workaround for bug in ebooklib 0.15.
# Something goes wrong when adding an image as a cover, and we need to work
# around it by replacing the get_template function with our own that takes care
# of properly encoding the template as utf8.
示例10: wms
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def wms(minx, miny, maxx, maxy, service, lyr, epsg, style, img, w, h):
"""Retrieve a wms map image from
the specified service and saves it as a JPEG."""
wms = service
wms += "?SERVICE=WMS&VERSION=1.1.1&REQUEST=GetMap&"
wms += "LAYERS={}".format(lyr)
wms += "&STYLES={}&".format(style)
wms += "SRS=EPSG:{}&".format(epsg)
wms += "BBOX={},{},{},{}&".format(minx, miny, maxx, maxy)
wms += "WIDTH={}&".format(w)
wms += "HEIGHT={}&".format(h)
wms += "FORMAT=image/jpeg"
wmsmap = urllib.request.urlopen(wms)
with open(img + ".jpg", "wb") as f:
f.write(wmsmap.read())
开发者ID:PacktPublishing,项目名称:Learning-Geospatial-Analysis-with-Python-Third-Edition,代码行数:17,代码来源:B13346_10_01-gpx-reporter.py
示例11: play
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def play(self, url):
request = urllib.request.urlopen(
'http://{}:{}/play/'.format(self.server, self.port),
data=urllib.parse.urlencode({
'url': url
}).encode()
)
self.state = PlayerState.PLAY.value
return request.read()
示例12: pause
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def pause(self):
http = urllib3.PoolManager()
request = http.request('POST',
'http://{}:{}/pause/'.format(self.server, self.port))
self.state = PlayerState.PAUSE.value
return request.read()
示例13: stop
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def stop(self):
http = urllib3.PoolManager()
request = http.request('POST',
'http://{}:{}/stop/'.format(self.server, self.port))
self.state = PlayerState.STOP.value
return request.read()
示例14: dicopinionResult
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def dicopinionResult(request):
dicStockNum = request.GET['dicStockNum']
dateCount = setDate()
stock_name = get_stock_name(dicStockNum)
for pageNum in range(1, 10):
urlPage = 'http://guba.eastmoney.com/list,' + \
str(dicStockNum)+',f_'+str(pageNum)+'.html'
stockPageRequest = urllib.request.urlopen(urlPage)
htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
titlePattern = re.compile(
'<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
gotTitle = re.findall(titlePattern, htmlTitleContent)
print(type(gotTitle))
for i in range(len(gotTitle)):
for j in range(len(dateCount)):
if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
dateCount[j][5] += 1
segList = list(jieba.cut(gotTitle[i][1], cut_all=True))
# print(tx_npl(gotTitle[i][1]))
for eachItem in segList:
if eachItem != ' ':
if eachItem in positiveWord:
dateCount[j][2] += 1
continue
elif eachItem in negativeWord:
dateCount[j][3] += 1
continue
elif eachItem in neutralWord:
dateCount[j][4] += 1
return render(request, 'dicopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)})
示例15: nbopinionResult
# 需要导入模块: from urllib import request [as 别名]
# 或者: from urllib.request import read [as 别名]
def nbopinionResult(request):
Nb_stock_number = request.GET['Nb_stock_number']
dateCount = setDate()
stock_name = get_stock_name(Nb_stock_number)
homedir = os.getcwd()
clf = joblib.load(homedir+'/StockVisualData/Clf.pkl')
vectorizer = joblib.load(homedir+'/StockVisualData/Vect')
transformer = joblib.load(homedir+'/StockVisualData/Tfidf')
for pageNum in range(1, 21):
urlPage = 'http://guba.eastmoney.com/list,' + \
str(Nb_stock_number)+'_'+str(pageNum)+'.html'
stockPageRequest = urllib.request.urlopen(urlPage)
htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
titlePattern = re.compile(
'<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
gotTitle = re.findall(titlePattern, htmlTitleContent)
for i in range(len(gotTitle)):
text_predict = []
for j in range(len(dateCount)):
if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
dateCount[j][5] += 1
seg_list = list(jieba.cut(gotTitle[i][1], cut_all=True))
seg_text = " ".join(seg_list)
text_predict.append(seg_text)
text_predict = np.array(text_predict)
text_frequency = vectorizer.transform(text_predict)
new_tfidf = transformer.transform(text_frequency)
predicted = clf.predict(new_tfidf)
if predicted == '积极':
dateCount[j][2] += 1
continue
elif predicted == '消极':
dateCount[j][3] += 1
continue
elif predicted == '中立':
dateCount[j][4] += 1
return render(request, 'nbopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)})
# 设置时间数组