本文整理汇总了Java中org.apache.hadoop.hive.ql.session.SessionState.LogHelper类的典型用法代码示例。如果您正苦于以下问题:Java LogHelper类的具体用法?Java LogHelper怎么用?Java LogHelper使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LogHelper类属于org.apache.hadoop.hive.ql.session.SessionState包,在下文中一共展示了LogHelper类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: processGrantDDL
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; //导入依赖的package包/类
private int processGrantDDL(LogHelper console,
SentryPolicyServiceClient sentryClient, String subject,
String server, GrantDesc desc) throws SentryUserException {
return processGrantRevokeDDL(console, sentryClient, subject,
server, true, desc.getPrincipals(), desc.getPrivileges(),
desc.getPrivilegeSubjectDesc(), desc.isGrantOption());
}
示例2: processRevokeDDL
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; //导入依赖的package包/类
private int processRevokeDDL(LogHelper console,
SentryPolicyServiceClient sentryClient, String subject,
String server, RevokeDesc desc) throws SentryUserException {
return processGrantRevokeDDL(console, sentryClient, subject,
server, false, desc.getPrincipals(), desc.getPrivileges(),
desc.getPrivilegeSubjectDesc(), null);
}
示例3: verifyDynamoDBWriteThroughput
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; //导入依赖的package包/类
private void verifyDynamoDBWriteThroughput(Configuration conf, Properties tbl) {
if (conf == null) {
// In a lot of places Hive creates a SerDe with null conf.
// In this case it is not possible to get the cluster status.
return;
}
if (warningPrinted) {
return;
}
String dynamoDBTableName = tbl.getProperty(DynamoDBConstants.TABLE_NAME);
// Hive uses partition metadata to initialize serde's. We may not need
// to verify write throughput at column level. dynamoDBTableName is null
// in this case, don't proceed and return
if (dynamoDBTableName == null) {
return;
}
log.info("Table Properties:" + tbl);
DynamoDBClient client = new DynamoDBClient(conf, tbl.getProperty(DynamoDBConstants.REGION));
long writesPerSecond = client.describeTable(dynamoDBTableName).getProvisionedThroughput()
.getWriteCapacityUnits();
long maxMapTasks;
try {
JobClient jc = new JobClient(new JobConf(conf));
maxMapTasks = jc.getClusterStatus().getMaxMapTasks();
} catch (IOException e) {
throw new RuntimeException("Could not get cluster capacity.", e);
}
if (maxMapTasks > writesPerSecond) {
String message = "WARNING: Configured write throughput of the dynamodb table "
+ dynamoDBTableName + " is less than the cluster map capacity." + " ClusterMapCapacity: "
+ maxMapTasks + " WriteThroughput: " + writesPerSecond + "\nWARNING: Writes to this "
+ "table might result in a write outage on the table.";
LogHelper console = SessionState.getConsole();
if (console != null) {
console.printInfo(message);
}
log.warn(message);
warningPrinted = true;
}
}
示例4: PantheraCliDriver
import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; //导入依赖的package包/类
public PantheraCliDriver() {
SessionState ss = SessionState.get();
conf = (ss != null) ? ss.getConf() : new Configuration();
Log LOG = LogFactory.getLog("PantheraCliDriver");
console = new LogHelper(LOG);
}