本文整理汇总了Java中org.apache.accumulo.core.client.mapreduce.InputFormatBase.setInputTableName方法的典型用法代码示例。如果您正苦于以下问题:Java InputFormatBase.setInputTableName方法的具体用法?Java InputFormatBase.setInputTableName怎么用?Java InputFormatBase.setInputTableName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.accumulo.core.client.mapreduce.InputFormatBase
的用法示例。
在下文中一共展示了InputFormatBase.setInputTableName方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupAccumuloInput
import org.apache.accumulo.core.client.mapreduce.InputFormatBase; //导入方法依赖的package包/类
@Override
protected void setupAccumuloInput(final Job job) throws AccumuloSecurityException {
// set up accumulo input
if (!hdfsInput) {
job.setInputFormatClass(AccumuloInputFormat.class);
} else {
job.setInputFormatClass(AccumuloHDFSFileInputFormat.class);
}
AbstractInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd));
InputFormatBase.setInputTableName(job, RdfCloudTripleStoreUtils.layoutPrefixToTable(rdfTableLayout, tablePrefix));
AbstractInputFormat.setScanAuthorizations(job, authorizations);
if (!mock) {
AbstractInputFormat.setZooKeeperInstance(job, new ClientConfiguration().withInstance(instance).withZkHosts(zk));
} else {
AbstractInputFormat.setMockInstance(job, instance);
}
if (ttl != null) {
final IteratorSetting setting = new IteratorSetting(1, "fi", AgeOffFilter.class);
AgeOffFilter.setTTL(setting, Long.valueOf(ttl));
InputFormatBase.addIterator(job, setting);
}
for (final IteratorSetting iteratorSetting : AccumuloRyaUtils.COMMON_REG_EX_FILTER_SETTINGS) {
InputFormatBase.addIterator(job, iteratorSetting);
}
}
示例2: getEdgeRDD
import org.apache.accumulo.core.client.mapreduce.InputFormatBase; //导入方法依赖的package包/类
public RDD<Tuple2<Object, Edge>> getEdgeRDD(SparkContext sc, Configuration conf) throws IOException, AccumuloSecurityException{
// Load configuration parameters
zk = MRUtils.getACZK(conf);
instance = MRUtils.getACInstance(conf);
userName = MRUtils.getACUserName(conf);
pwd = MRUtils.getACPwd(conf);
mock = MRUtils.getACMock(conf, false);
tablePrefix = MRUtils.getTablePrefix(conf);
// Set authorizations if specified
String authString = conf.get(MRUtils.AC_AUTH_PROP);
if (authString != null && !authString.isEmpty()) {
authorizations = new Authorizations(authString.split(","));
conf.set(ConfigUtils.CLOUDBASE_AUTHS, authString); // for consistency
}
else {
authorizations = AccumuloRdfConstants.ALL_AUTHORIZATIONS;
}
// Set table prefix to the default if not set
if (tablePrefix == null) {
tablePrefix = RdfCloudTripleStoreConstants.TBL_PRFX_DEF;
MRUtils.setTablePrefix(conf, tablePrefix);
}
// Check for required configuration parameters
Preconditions.checkNotNull(instance, "Accumulo instance name [" + MRUtils.AC_INSTANCE_PROP + "] not set.");
Preconditions.checkNotNull(userName, "Accumulo username [" + MRUtils.AC_USERNAME_PROP + "] not set.");
Preconditions.checkNotNull(pwd, "Accumulo password [" + MRUtils.AC_PWD_PROP + "] not set.");
Preconditions.checkNotNull(tablePrefix, "Table prefix [" + MRUtils.TABLE_PREFIX_PROPERTY + "] not set.");
RdfCloudTripleStoreConstants.prefixTables(tablePrefix);
// If connecting to real accumulo, set additional parameters and require zookeepers
if (!mock) conf.set(ConfigUtils.CLOUDBASE_ZOOKEEPERS, zk); // for consistency
// Ensure consistency between alternative configuration properties
conf.set(ConfigUtils.CLOUDBASE_INSTANCE, instance);
conf.set(ConfigUtils.CLOUDBASE_USER, userName);
conf.set(ConfigUtils.CLOUDBASE_PASSWORD, pwd);
conf.setBoolean(ConfigUtils.USE_MOCK_INSTANCE, mock);
conf.set(RdfCloudTripleStoreConfiguration.CONF_TBL_PREFIX, tablePrefix);
Job job = Job.getInstance(conf, sc.appName());
ClientConfiguration clientConfig = new ClientConfiguration().with(ClientProperty.INSTANCE_NAME, instance).with(ClientProperty.INSTANCE_ZK_HOST, zk);
RyaInputFormat.setTableLayout(job, TABLE_LAYOUT.SPO);
RyaInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd));
RyaInputFormat.setZooKeeperInstance(job, clientConfig);
RyaInputFormat.setScanAuthorizations(job, authorizations);
String tableName = RdfCloudTripleStoreUtils.layoutPrefixToTable(TABLE_LAYOUT.SPO, tablePrefix);
InputFormatBase.setInputTableName(job, tableName);
return sc.newAPIHadoopRDD(job.getConfiguration(), GraphXEdgeInputFormat.class, Object.class, Edge.class);
}
示例3: run
import org.apache.accumulo.core.client.mapreduce.InputFormatBase; //导入方法依赖的package包/类
@Override
public int run(final String[] strings) throws Exception {
useMergeFileInput = conf.getBoolean(USE_MERGE_FILE_INPUT, false);
log.info("Setting up Merge Tool...");
setup();
if (useMergeFileInput) {
// When using file input mode the child instance will use a temporary table in the parent instance to
// store the child table data. The two tables will then be merged together.
copyParentPropertiesToChild(conf);
}
for (final String table : tables) {
final String childTable = table.replaceFirst(tablePrefix, childTablePrefix);
final String jobName = "Merge Tool, merging Child Table: " + childTable + ", into Parent Table: " + table + ", " + System.currentTimeMillis();
log.info("Initializing job: " + jobName);
conf.set(MRUtils.JOB_NAME_PROP, jobName);
conf.set(TABLE_NAME_PROP, table);
final Job job = Job.getInstance(conf);
job.setJarByClass(MergeTool.class);
if (useMergeFileInput) {
importChildFilesToTempParentTable(childTable);
}
setupAccumuloInput(job);
InputFormatBase.setInputTableName(job, table);
// Set input output of the particular job
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Mutation.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Mutation.class);
setupAccumuloOutput(job, table);
// Set mapper and reducer classes
job.setMapperClass(MergeToolMapper.class);
job.setReducerClass(Reducer.class);
// Submit the job
final Date beginTime = new Date();
log.info("Job for table \"" + table + "\" started: " + beginTime);
final int exitCode = job.waitForCompletion(true) ? 0 : 1;
if (useMergeFileInput && StringUtils.isNotBlank(tempChildAuths)) {
// Clear any of the temporary child auths given to the parent
final AccumuloRdfConfiguration parentAccumuloRdfConfiguration = new AccumuloRdfConfiguration(conf);
parentAccumuloRdfConfiguration.setTablePrefix(tablePrefix);
final Connector parentConnector = AccumuloRyaUtils.setupConnector(parentAccumuloRdfConfiguration);
final SecurityOperations secOps = parentConnector.securityOperations();
AccumuloRyaUtils.removeUserAuths(userName, secOps, tempChildAuths);
}
if (exitCode == 0) {
final Date endTime = new Date();
log.info("Job for table \"" + table + "\" finished: " + endTime);
log.info("The job took " + (endTime.getTime() - beginTime.getTime()) / 1000 + " seconds.");
} else {
log.error("Job for table \"" + table + "\" Failed!!!");
return exitCode;
}
}
return 0;
}
示例4: setTableName
import org.apache.accumulo.core.client.mapreduce.InputFormatBase; //导入方法依赖的package包/类
public void setTableName(String tableName) {
InputFormatBase.setInputTableName(job, tableName);
}