本文整理汇总了Python中google.cloud.vision.ImageAnnotatorClient方法的典型用法代码示例。如果您正苦于以下问题:Python vision.ImageAnnotatorClient方法的具体用法?Python vision.ImageAnnotatorClient怎么用?Python vision.ImageAnnotatorClient使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类google.cloud.vision
的用法示例。
在下文中一共展示了vision.ImageAnnotatorClient方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: annotate
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def annotate(path):
"""Returns web annotations given the path to an image."""
# [START vision_web_detection_tutorial_annotate]
client = vision.ImageAnnotatorClient()
if path.startswith('http') or path.startswith('gs:'):
image = types.Image()
image.source.image_uri = path
else:
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
web_detection = client.web_detection(image=image).web_detection
# [END vision_web_detection_tutorial_annotate]
return web_detection
示例2: detect_labels_uri
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def detect_labels_uri(uri):
"""Detects labels in the file located in Google Cloud Storage or on the
Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.types.Image()
image.source.image_uri = uri
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_label_detection_gcs]
# [START vision_landmark_detection]
示例3: detect_landmarks_uri
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def detect_landmarks_uri(uri):
"""Detects landmarks in the file located in Google Cloud Storage or on the
Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.types.Image()
image.source.image_uri = uri
response = client.landmark_detection(image=image)
landmarks = response.landmark_annotations
print('Landmarks:')
for landmark in landmarks:
print(landmark.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_landmark_detection_gcs]
# [START vision_logo_detection]
示例4: localize_objects_uri
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def localize_objects_uri(uri):
"""Localize objects in the image on Google Cloud Storage
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision
client = vision.ImageAnnotatorClient()
image = vision.types.Image()
image.source.image_uri = uri
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects_gcs]
示例5: detect_face
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def detect_face(face_file, max_results=4):
"""Uses the Vision API to detect faces in the given file.
Args:
face_file: A file-like object containing an image with faces.
Returns:
An array of Face objects with information about the picture.
"""
# [START vision_face_detection_tutorial_client]
client = vision.ImageAnnotatorClient()
# [END vision_face_detection_tutorial_client]
content = face_file.read()
image = types.Image(content=content)
return client.face_detection(
image=image, max_results=max_results).face_annotations
# [END vision_face_detection_tutorial_send_request]
# [START vision_face_detection_tutorial_process_response]
示例6: detect_labels_uri
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def detect_labels_uri(uri):
"""This Function detects labels in the image file located in Google Cloud Storage or on
theWeb and returns a comma separated list of labels. This will return an empty string
if not passed a valid image file
Args:
uri: a string link to a photo in gcs or on the web
(Adapted From:
https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/vision/cloud-client/detect/detect.py)
"""
# Initialize cloud vision api client and image object.
client = vision.ImageAnnotatorClient()
image = types.Image()
image.source.image_uri = uri
# Send an api call for this image, extract label descriptions
# and return a comma-space separated string.
response = client.label_detection(image=image)
labels = response.label_annotations
label_list = [l.description for l in labels]
return ', '.join(label_list)
示例7: getImageTags
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def getImageTags(bucketName, fileName):
client = vision.ImageAnnotatorClient()
image_uri = "gs://" + bucketName + "/" + fileName
request = {
"image": {"source": {"image_uri": image_uri}},
"features": [
{
"type": vision.enums.Feature.Type.LABEL_DETECTION,
"max_results": 6
},
{
"type": vision.enums.Feature.Type.LANDMARK_DETECTION,
"max_results": 3
}
],
}
response = client.annotate_image(request)
return getTagsFromResponse(response)
示例8: __init__
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def __init__(
self,
bounds=(0, 255),
channel_axis=3,
preprocessing=(0, 1)):
from google.cloud import vision
super(GoogleSafeSearchModel, self).__init__(
bounds=bounds,
channel_axis=channel_axis,
preprocessing=preprocessing)
self._task = 'cls'
self.model = vision.ImageAnnotatorClient()
示例9: pic_to_text
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def pic_to_text(infile):
"""Detects text in an image file
ARGS
infile: path to image file
RETURNS
String of text detected in image
"""
# Instantiates a client
client = vision.ImageAnnotatorClient()
# Opens the input image file
with io.open(infile, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
# For dense text, use document_text_detection
# For less dense text, use text_detection
response = client.document_text_detection(image=image)
text = response.full_text_annotation.text
return text
# [END translate_hybrid_vision]
# [START translate_hybrid_create_glossary]
示例10: run_quickstart
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def run_quickstart():
# [START vision_quickstart]
import io
import os
# Imports the Google Cloud client library
# [START vision_python_migration_import]
from google.cloud import vision
from google.cloud.vision import types
# [END vision_python_migration_import]
# Instantiates a client
# [START vision_python_migration_client]
client = vision.ImageAnnotatorClient()
# [END vision_python_migration_client]
# The name of the image file to annotate
file_name = os.path.abspath('resources/wakeupcat.jpg')
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# Performs label detection on the image file
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
# [END vision_quickstart]
示例11: set_endpoint
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def set_endpoint():
"""Change your endpoint"""
# [START vision_set_endpoint]
from google.cloud import vision
client_options = {'api_endpoint': 'eu-vision.googleapis.com'}
client = vision.ImageAnnotatorClient(client_options=client_options)
# [END vision_set_endpoint]
image_source = vision.types.ImageSource(
image_uri='gs://cloud-samples-data/vision/text/screen.jpg')
image = vision.types.Image(source=image_source)
response = client.text_detection(image=image)
print('Texts:')
for text in response.text_annotations:
print('{}'.format(text.description))
vertices = ['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices]
print('bounds: {}\n'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
示例12: detect_labels
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def detect_labels(path):
"""Detects labels in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_label_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_label_detection]
# [END vision_label_detection]
# [START vision_label_detection_gcs]
示例13: detect_landmarks
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def detect_landmarks(path):
"""Detects landmarks in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_landmark_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.landmark_detection(image=image)
landmarks = response.landmark_annotations
print('Landmarks:')
for landmark in landmarks:
print(landmark.description)
for location in landmark.locations:
lat_lng = location.lat_lng
print('Latitude {}'.format(lat_lng.latitude))
print('Longitude {}'.format(lat_lng.longitude))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_landmark_detection]
# [END vision_landmark_detection]
# [START vision_landmark_detection_gcs]
示例14: detect_logos
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def detect_logos(path):
"""Detects logos in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_logo_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.logo_detection(image=image)
logos = response.logo_annotations
print('Logos:')
for logo in logos:
print(logo.description)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_logo_detection]
# [END vision_logo_detection]
# [START vision_logo_detection_gcs]
示例15: detect_safe_search
# 需要导入模块: from google.cloud import vision [as 别名]
# 或者: from google.cloud.vision import ImageAnnotatorClient [as 别名]
def detect_safe_search(path):
"""Detects unsafe features in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
# [START vision_python_migration_safe_search_detection]
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.safe_search_detection(image=image)
safe = response.safe_search_annotation
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
print('Safe search:')
print('adult: {}'.format(likelihood_name[safe.adult]))
print('medical: {}'.format(likelihood_name[safe.medical]))
print('spoofed: {}'.format(likelihood_name[safe.spoof]))
print('violence: {}'.format(likelihood_name[safe.violence]))
print('racy: {}'.format(likelihood_name[safe.racy]))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
# [END vision_python_migration_safe_search_detection]
# [END vision_safe_search_detection]
# [START vision_safe_search_detection_gcs]