本文整理汇总了Java中com.google.cloud.language.v1.Entity类的典型用法代码示例。如果您正苦于以下问题:Java Entity类的具体用法?Java Entity怎么用?Java Entity使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Entity类属于com.google.cloud.language.v1包,在下文中一共展示了Entity类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: analyzeEntitiesText
import com.google.cloud.language.v1.Entity; //导入依赖的package包/类
/**
* Identifies entities in the string {@code text}.
*/
public static void analyzeEntitiesText(String text) throws Exception {
// [START analyze_entities_text]
// Instantiate the Language client com.google.cloud.language.v1.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
Document doc = Document.newBuilder()
.setContent(text)
.setType(Type.PLAIN_TEXT)
.build();
AnalyzeEntitiesRequest request = AnalyzeEntitiesRequest.newBuilder()
.setDocument(doc)
.setEncodingType(EncodingType.UTF16)
.build();
AnalyzeEntitiesResponse response = language.analyzeEntities(request);
// Print the response
for (Entity entity : response.getEntitiesList()) {
System.out.printf("Entity: %s", entity.getName());
System.out.printf("Salience: %.3f\n", entity.getSalience());
System.out.println("Metadata: ");
for (Map.Entry<String, String> entry : entity.getMetadataMap().entrySet()) {
System.out.printf("%s : %s", entry.getKey(), entry.getValue());
}
for (EntityMention mention : entity.getMentionsList()) {
System.out.printf("Begin offset: %d\n", mention.getText().getBeginOffset());
System.out.printf("Content: %s\n", mention.getText().getContent());
System.out.printf("Type: %s\n\n", mention.getType());
}
}
}
// [END analyze_entities_text]
}
示例2: analyzeEntitiesFile
import com.google.cloud.language.v1.Entity; //导入依赖的package包/类
/**
* Identifies entities in the contents of the object at the given GCS {@code path}.
*/
public static void analyzeEntitiesFile(String gcsUri) throws Exception {
// [START analyze_entities_gcs]
// Instantiate the Language client com.google.cloud.language.v1.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
// set the GCS Content URI path to the file to be analyzed
Document doc = Document.newBuilder()
.setGcsContentUri(gcsUri)
.setType(Type.PLAIN_TEXT)
.build();
AnalyzeEntitiesRequest request = AnalyzeEntitiesRequest.newBuilder()
.setDocument(doc)
.setEncodingType(EncodingType.UTF16)
.build();
AnalyzeEntitiesResponse response = language.analyzeEntities(request);
// Print the response
for (Entity entity : response.getEntitiesList()) {
System.out.printf("Entity: %s", entity.getName());
System.out.printf("Salience: %.3f\n", entity.getSalience());
System.out.println("Metadata: ");
for (Map.Entry<String, String> entry : entity.getMetadataMap().entrySet()) {
System.out.printf("%s : %s", entry.getKey(), entry.getValue());
}
for (EntityMention mention : entity.getMentionsList()) {
System.out.printf("Begin offset: %d\n", mention.getText().getBeginOffset());
System.out.printf("Content: %s\n", mention.getText().getContent());
System.out.printf("Type: %s\n\n", mention.getType());
}
}
}
// [END analyze_entities_gcs]
}
示例3: entitySentimentText
import com.google.cloud.language.v1.Entity; //导入依赖的package包/类
/**
* Detects the entity sentiments in the string {@code text} using the Language Beta API.
*/
public static void entitySentimentText(String text) throws Exception {
// [START entity_sentiment_text]
// Instantiate the Language client com.google.cloud.language.v1.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
Document doc = Document.newBuilder()
.setContent(text).setType(Type.PLAIN_TEXT).build();
AnalyzeEntitySentimentRequest request = AnalyzeEntitySentimentRequest.newBuilder()
.setDocument(doc)
.setEncodingType(EncodingType.UTF16).build();
// detect entity sentiments in the given string
AnalyzeEntitySentimentResponse response = language.analyzeEntitySentiment(request);
// Print the response
for (Entity entity : response.getEntitiesList()) {
System.out.printf("Entity: %s\n", entity.getName());
System.out.printf("Salience: %.3f\n", entity.getSalience());
System.out.printf("Sentiment : %s\n", entity.getSentiment());
for (EntityMention mention : entity.getMentionsList()) {
System.out.printf("Begin offset: %d\n", mention.getText().getBeginOffset());
System.out.printf("Content: %s\n", mention.getText().getContent());
System.out.printf("Magnitude: %.3f\n", mention.getSentiment().getMagnitude());
System.out.printf("Sentiment score : %.3f\n", mention.getSentiment().getScore());
System.out.printf("Type: %s\n\n", mention.getType());
}
}
}
// [END entity_sentiment_text]
}
示例4: entitySentimentFile
import com.google.cloud.language.v1.Entity; //导入依赖的package包/类
/**
* Identifies the entity sentiments in the the GCS hosted file using the Language Beta API.
*/
public static void entitySentimentFile(String gcsUri) throws Exception {
// [START entity_sentiment_file]
// Instantiate the Language client com.google.cloud.language.v1.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
Document doc = Document.newBuilder()
.setGcsContentUri(gcsUri)
.setType(Type.PLAIN_TEXT)
.build();
AnalyzeEntitySentimentRequest request = AnalyzeEntitySentimentRequest.newBuilder()
.setDocument(doc)
.setEncodingType(EncodingType.UTF16)
.build();
// Detect entity sentiments in the given file
AnalyzeEntitySentimentResponse response = language.analyzeEntitySentiment(request);
// Print the response
for (Entity entity : response.getEntitiesList()) {
System.out.printf("Entity: %s\n", entity.getName());
System.out.printf("Salience: %.3f\n", entity.getSalience());
System.out.printf("Sentiment : %s\n", entity.getSentiment());
for (EntityMention mention : entity.getMentionsList()) {
System.out.printf("Begin offset: %d\n", mention.getText().getBeginOffset());
System.out.printf("Content: %s\n", mention.getText().getContent());
System.out.printf("Magnitude: %.3f\n", mention.getSentiment().getMagnitude());
System.out.printf("Sentiment score : %.3f\n", mention.getSentiment().getScore());
System.out.printf("Type: %s\n\n", mention.getType());
}
}
}
// [END entity_sentiment_file]
}
示例5: processElement
import com.google.cloud.language.v1.Entity; //导入依赖的package包/类
@ProcessElement
public void processElement(ProcessContext c) {
ContentIndexSummary is = c.element();
try {
if (this.languageClient == null)
throw new Exception("CNLP client not initialized");
com.google.cloud.language.v1.Document doc = com.google.cloud.language.v1.Document.newBuilder()
.setContent(is.doc.text).setType(Type.PLAIN_TEXT).build();
AnalyzeEntitiesRequest request = AnalyzeEntitiesRequest.newBuilder()
.setDocument(doc).setEncodingType(EncodingType.UTF16).build();
AnalyzeEntitiesResponse response = languageClient.analyzeEntities(request);
// get at most as many entities as we have tags in the Sirocco-based output
// int entitiesToGet = Math.min(is.doc.tags.length, response.getEntitiesList().size());
int entitiesToGet = response.getEntitiesList().size();
DocumentTag[] newTags = new DocumentTag[entitiesToGet];
// Create additional Document Tags and add them to the output index summary
for (int idx = 0; idx < entitiesToGet; idx++) {
// Entities are sorted by salience in the response list, so pick the first ones
Entity entity = response.getEntitiesList().get(idx);
DocumentTag dt = new DocumentTag();
String tag = IndexerPipelineUtils.CNLP_TAG_PREFIX + entity.getName();
Float weight = entity.getSalience();
Boolean goodAsTopic = null;
dt.initialize(tag, weight, goodAsTopic);
newTags[idx] = dt;
}
if (entitiesToGet>0)
{
ContentIndexSummary iscopy = is.copy();
DocumentTag[] combinedTags = new DocumentTag[newTags.length + iscopy.doc.tags.length];
System.arraycopy(iscopy.doc.tags, 0, combinedTags, 0, iscopy.doc.tags.length);
System.arraycopy(newTags, 0, combinedTags, iscopy.doc.tags.length, newTags.length);
iscopy.doc.tags = combinedTags;
c.output(iscopy);
}
else
c.output(is);
} catch (Exception e) {
LOG.warn(e.getMessage());
}
}
示例6: processElement
import com.google.cloud.language.v1.Entity; //导入依赖的package包/类
@ProcessElement
public void processElement(ProcessContext c) {
ContentIndexSummary is = c.element();
try {
if (this.languageClient == null)
throw new Exception("CNLP client not initialized");
com.google.cloud.language.v1.Document doc = com.google.cloud.language.v1.Document.newBuilder()
.setContent(is.doc.text).setType(Type.PLAIN_TEXT).build();
AnalyzeEntitiesRequest request = AnalyzeEntitiesRequest.newBuilder()
.setDocument(doc).setEncodingType(EncodingType.UTF16).build();
AnalyzeEntitiesResponse response = languageClient.analyzeEntities(request);
// get at most as many entities as we have tags in the Sirocco-based output
int entitiesToGet = Math.min(is.doc.tags.length, response.getEntitiesList().size());
DocumentTag[] newTags = new DocumentTag[entitiesToGet];
// Create additional Document Tags and add them to the output index summary
for (int idx = 0; idx < entitiesToGet; idx++) {
// Entities are sorted by salience in the response list, so pick the first ones
Entity entity = response.getEntitiesList().get(idx);
DocumentTag dt = new DocumentTag();
String tag = IndexerPipelineUtils.CNLP_TAG_PREFIX + entity.getName();
Float weight = entity.getSalience();
Boolean goodAsTopic = null;
dt.initialize(tag, weight, goodAsTopic);
newTags[idx] = dt;
}
if (entitiesToGet>0)
{
ContentIndexSummary iscopy = is.copy();
DocumentTag[] combinedTags = new DocumentTag[newTags.length + iscopy.doc.tags.length];
System.arraycopy(iscopy.doc.tags, 0, combinedTags, 0, iscopy.doc.tags.length);
System.arraycopy(newTags, 0, combinedTags, iscopy.doc.tags.length, newTags.length);
iscopy.doc.tags = combinedTags;
c.output(iscopy);
}
else
c.output(is);
} catch (Exception e) {
LOG.warn(e.getMessage());
}
}