本文整理汇总了Java中org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setInputTableName方法的典型用法代码示例。如果您正苦于以下问题:Java AccumuloInputFormat.setInputTableName方法的具体用法?Java AccumuloInputFormat.setInputTableName怎么用?Java AccumuloInputFormat.setInputTableName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat
的用法示例。
在下文中一共展示了AccumuloInputFormat.setInputTableName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setAccumuloConfigs
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
@Override
public void setAccumuloConfigs(Job job) throws AccumuloSecurityException {
super.setAccumuloConfigs(job);
final String principal = getPrincipal(), tableName = getTableName();
if (tokenFile.isEmpty()) {
AuthenticationToken token = getToken();
AccumuloInputFormat.setConnectorInfo(job, principal, token);
AccumuloOutputFormat.setConnectorInfo(job, principal, token);
} else {
AccumuloInputFormat.setConnectorInfo(job, principal, tokenFile);
AccumuloOutputFormat.setConnectorInfo(job, principal, tokenFile);
}
AccumuloInputFormat.setInputTableName(job, tableName);
AccumuloInputFormat.setScanAuthorizations(job, auths);
AccumuloOutputFormat.setCreateTables(job, true);
AccumuloOutputFormat.setDefaultTableName(job, tableName);
}
示例2: setupAccumuloInput
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
* Sets up Accumulo input for a job: the job receives
* ({@link org.apache.accumulo.core.data.Key},
* {@link org.apache.accumulo.core.data.Value}) pairs from the table
* specified by the configuration (using
* {@link MRUtils#TABLE_PREFIX_PROPERTY} and
* {@link MRUtils#TABLE_LAYOUT_PROP}).
* @param job MapReduce Job to configure
* @throws AccumuloSecurityException if connecting to Accumulo with the
* given username and password fails.
*/
protected void setupAccumuloInput(Job job) throws AccumuloSecurityException {
// set up accumulo input
if (!hdfsInput) {
job.setInputFormatClass(AccumuloInputFormat.class);
} else {
job.setInputFormatClass(AccumuloHDFSFileInputFormat.class);
}
AccumuloInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd));
String tableName = RdfCloudTripleStoreUtils.layoutPrefixToTable(rdfTableLayout, tablePrefix);
AccumuloInputFormat.setInputTableName(job, tableName);
AccumuloInputFormat.setScanAuthorizations(job, authorizations);
if (mock) {
AccumuloInputFormat.setMockInstance(job, instance);
} else {
ClientConfiguration clientConfig = ClientConfiguration.loadDefault()
.withInstance(instance).withZkHosts(zk);
AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
}
if (ttl != null) {
IteratorSetting setting = new IteratorSetting(1, "fi", AgeOffFilter.class.getName());
AgeOffFilter.setTTL(setting, Long.valueOf(ttl));
AccumuloInputFormat.addIterator(job, setting);
}
}
示例3: getExpectedLoadJob
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public Job getExpectedLoadJob(String inst, String zookeepers, String user,
String password, String table, String start, String end,
Authorizations authorizations,
List<Pair<Text, Text>> columnFamilyColumnQualifierPairs)
throws IOException {
Collection<Range> ranges = new LinkedList<Range>();
ranges.add(new Range(start, end));
Job expected = new Job(new Configuration());
try {
AccumuloInputFormat.setConnectorInfo(expected, user,
new PasswordToken(password));
} catch (AccumuloSecurityException e) {
Assert.fail(e.getMessage());
}
AccumuloInputFormat.setInputTableName(expected, table);
AccumuloInputFormat.setScanAuthorizations(expected, authorizations);
AccumuloInputFormat.setZooKeeperInstance(expected, inst, zookeepers);
AccumuloInputFormat.fetchColumns(expected,
columnFamilyColumnQualifierPairs);
AccumuloInputFormat.setRanges(expected, ranges);
return expected;
}
示例4: configure
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
* Configure properties needed to connect to a Fluo application
*
* @param conf Job configuration
* @param config use {@link org.apache.fluo.api.config.FluoConfiguration} to configure
* programmatically
*/
@SuppressWarnings("deprecation")
public static void configure(Job conf, SimpleConfiguration config) {
try {
FluoConfiguration fconfig = new FluoConfiguration(config);
try (Environment env = new Environment(fconfig)) {
long ts =
env.getSharedResources().getTimestampTracker().allocateTimestamp().getTxTimestamp();
conf.getConfiguration().setLong(TIMESTAMP_CONF_KEY, ts);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
config.save(baos);
conf.getConfiguration().set(PROPS_CONF_KEY,
new String(baos.toByteArray(), StandardCharsets.UTF_8));
AccumuloInputFormat.setZooKeeperInstance(conf, fconfig.getAccumuloInstance(),
fconfig.getAccumuloZookeepers());
AccumuloInputFormat.setConnectorInfo(conf, fconfig.getAccumuloUser(),
new PasswordToken(fconfig.getAccumuloPassword()));
AccumuloInputFormat.setInputTableName(conf, env.getTable());
AccumuloInputFormat.setScanAuthorizations(conf, env.getAuthorizations());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例5: configure
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
* Configure properties needed to connect to a Fluo application
*
* @param conf Job configuration
* @param config use {@link FluoConfiguration} to configure programmatically
*/
@SuppressWarnings("deprecation")
public static void configure(Job conf, SimpleConfiguration config) {
try {
FluoConfiguration fconfig = new FluoConfiguration(config);
try (Environment env = new Environment(fconfig)) {
long ts =
env.getSharedResources().getTimestampTracker().allocateTimestamp().getTxTimestamp();
conf.getConfiguration().setLong(TIMESTAMP_CONF_KEY, ts);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
config.save(baos);
conf.getConfiguration().set(PROPS_CONF_KEY,
new String(baos.toByteArray(), StandardCharsets.UTF_8));
AccumuloInputFormat.setZooKeeperInstance(conf, fconfig.getAccumuloInstance(),
fconfig.getAccumuloZookeepers());
AccumuloInputFormat.setConnectorInfo(conf, fconfig.getAccumuloUser(),
new PasswordToken(fconfig.getAccumuloPassword()));
AccumuloInputFormat.setInputTableName(conf, env.getTable());
AccumuloInputFormat.setScanAuthorizations(conf, env.getAuthorizations());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例6: getDataFromAccumulo
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException {
job = Job.getInstance(new Configuration(), "HighScoreJob");
AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword));
AccumuloInputFormat.setScanAuthorizations(job, new Authorizations("standard"));
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.withInstance(accumuloInstanceName);
clientConfig.withZkHosts(accumuloZookeeper);
AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
AccumuloInputFormat.setInputTableName(job, inTable);
return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job);
}
示例7: getDataFromAccumulo
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
* makes accumulo input accessible by flink DataSet api
* @param env
* @return
* @throws IOException
* @throws AccumuloSecurityException
*/
public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException {
job = Job.getInstance(new Configuration(), jobName);
AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword));
AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(AccumuloIdentifiers.AUTHORIZATION.toString()));
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.withInstance(accumuloInstanceName);
clientConfig.withZkHosts(accumuloZookeeper);
AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
AccumuloInputFormat.setInputTableName(job, inTable);
return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job);
}
示例8: getDataFromAccumulo
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
* makes accumulo input accessible by flink DataSet api
* @param env
* @return
* @throws IOException
* @throws AccumuloSecurityException
*/
// TODO make private after testing
public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException {
job = Job.getInstance(new Configuration(), "areaCalculationJob");
AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword));
AccumuloInputFormat.setScanAuthorizations(job, new Authorizations("standard"));
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.withInstance(accumuloInstanceName);
clientConfig.withZkHosts(accumuloZookeeper);
AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
AccumuloInputFormat.setInputTableName(job, inTable);
return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job);
}
示例9: getDataFromAccumulo
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
* makes accumulo input accessible by flink DataSet api
* @param env
* @return
* @throws IOException
* @throws AccumuloSecurityException
*/
// TODO make private after testing
public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException {
job = Job.getInstance(new Configuration(), "pathCalculationJob");
AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword));
AccumuloInputFormat.setScanAuthorizations(job, new Authorizations("standard"));
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.withInstance(accumuloInstanceName);
clientConfig.withZkHosts(accumuloZookeeper);
AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
AccumuloInputFormat.setInputTableName(job, inTable);
return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job);
}
示例10: setAccumuloConfigs
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
@Override
public void setAccumuloConfigs(Job job) throws AccumuloSecurityException {
super.setAccumuloConfigs(job);
final String tableName = getTableName();
final String principal = getPrincipal();
final AuthenticationToken token = getToken();
AccumuloInputFormat.setConnectorInfo(job, principal, token);
AccumuloInputFormat.setInputTableName(job, tableName);
AccumuloInputFormat.setScanAuthorizations(job, auths);
AccumuloOutputFormat.setConnectorInfo(job, principal, token);
AccumuloOutputFormat.setCreateTables(job, true);
AccumuloOutputFormat.setDefaultTableName(job, tableName);
}
示例11: setAccumuloConfigs
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public void setAccumuloConfigs(Job job) throws AccumuloSecurityException {
AccumuloInputFormat.setConnectorInfo(job, getPrincipal(), this.getToken());
AccumuloOutputFormat.setConnectorInfo(job, getPrincipal(), this.getToken());
AccumuloInputFormat.setInputTableName(job, getTableName());
AccumuloInputFormat.setScanAuthorizations(job, this.auths);
AccumuloOutputFormat.setCreateTables(job, true);
AccumuloOutputFormat.setDefaultTableName(job, getTableName());
job.getConfiguration().set("MRMigrationBase.config.prefix", configPrefix);
job.getConfiguration().set("MRMigrationBase.output.tableName", getTableName());
}
示例12: setLocation
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
@Override
public void setLocation(final String location, final Job job) throws IOException {
if (logger.isDebugEnabled()) {
logger.debug("Set Location[" + location + "] for job[" + job.getJobName() + "]");
}
conf = job.getConfiguration();
setLocationFromUri(location, job);
if (!ConfiguratorBase.isConnectorInfoSet(AccumuloInputFormat.class, conf)) {
try {
AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(userP.getBytes(StandardCharsets.UTF_8)));
} catch (final AccumuloSecurityException e) {
throw new RuntimeException(e);
}
AccumuloInputFormat.setInputTableName(job, table);
AccumuloInputFormat.setScanAuthorizations(job, authorizations);
if (!mock) {
AccumuloInputFormat.setZooKeeperInstance(job, inst, zookeepers);
} else {
AccumuloInputFormat.setMockInstance(job, inst);
}
}
if (columnFamilyColumnQualifierPairs.size() > 0) {
AccumuloInputFormat.fetchColumns(job, columnFamilyColumnQualifierPairs);
}
logger.info("Set ranges[" + ranges + "] for job[" + job.getJobName() + "] on table[" + table + "] " +
"for columns[" + columnFamilyColumnQualifierPairs + "] with authorizations[" + authorizations + "]");
if (ranges.size() == 0) {
throw new IOException("Accumulo Range must be specified");
}
AccumuloInputFormat.setRanges(job, ranges);
}
示例13: run
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
@Override
public int run(String[] strings) throws Exception {
conf.set(MRUtils.JOB_NAME_PROP, "Upgrade to Rya 3.2.2");
//faster
init();
Job job = new Job(conf);
job.setJarByClass(Upgrade322Tool.class);
setupAccumuloInput(job);
AccumuloInputFormat.setInputTableName(job, MRUtils.getTablePrefix(conf) + TBL_OSP_SUFFIX);
//we do not need to change any row that is a string, custom, or uri type
IteratorSetting regex = new IteratorSetting(30, "regex",
RegExFilter.class);
RegExFilter.setRegexs(regex, "\\w*" + TYPE_DELIM + "[\u0003|\u0008|\u0002]", null, null, null, false);
RegExFilter.setNegate(regex, true);
// set input output of the particular job
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Mutation.class);
setupAccumuloOutput(job, MRUtils.getTablePrefix(conf) +
TBL_SPO_SUFFIX);
// set mapper and reducer classes
job.setMapperClass(Upgrade322Mapper.class);
job.setReducerClass(Reducer.class);
// Submit the job
return job.waitForCompletion(true) ? 0 : 1;
}
示例14: initTableMRJob
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public static void initTableMRJob(Job job, String intable, String outtable, String auths) throws AccumuloSecurityException {
Configuration conf = job.getConfiguration();
String username = conf.get(USERNAME);
String password = conf.get(PASSWORD);
String instance = conf.get(INSTANCE);
String zookeepers = conf.get(ZOOKEEPERS);
System.out.println("Zookeepers are " + auths);
if (zookeepers != null) {
AccumuloInputFormat.setZooKeeperInstance(job, instance, zookeepers);
AccumuloOutputFormat.setZooKeeperInstance(job, instance, zookeepers);
} else {
throw new IllegalArgumentException("Must specify either mock or zookeepers");
}
AccumuloInputFormat.setConnectorInfo(job, username, new PasswordToken(password));
AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(auths));
AccumuloInputFormat.setInputTableName(job, intable);
job.setInputFormatClass(AccumuloInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// OUTPUT
AccumuloOutputFormat.setConnectorInfo(job, username, new PasswordToken(password));
AccumuloOutputFormat.setDefaultTableName(job, outtable);
job.setOutputFormatClass(AccumuloOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Mutation.class);
}
示例15: initTabToSeqFileJob
import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public static void initTabToSeqFileJob(Job job, String intable, String outpath, String auths) throws AccumuloSecurityException {
Configuration conf = job.getConfiguration();
String username = conf.get(USERNAME);
String password = conf.get(PASSWORD);
String instance = conf.get(INSTANCE);
String zookeepers = conf.get(ZOOKEEPERS);
System.out.println("Zookeepers are " + auths);
if (zookeepers != null) {
AccumuloInputFormat.setZooKeeperInstance(job, instance, zookeepers);
} else {
throw new IllegalArgumentException("Must specify either mock or zookeepers");
}
AccumuloInputFormat.setConnectorInfo(job, username, new PasswordToken(password));
AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(auths));
AccumuloInputFormat.setInputTableName(job, intable);
job.setInputFormatClass(AccumuloInputFormat.class);
job.setMapOutputKeyClass(CompositeType.class);
job.setMapOutputValueClass(TripleCard.class);
// OUTPUT
SequenceFileOutputFormat.setOutputPath(job, new Path(outpath));
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setOutputKeyClass(CompositeType.class);
job.setOutputValueClass(TripleCard.class);
}