当前位置: 首页>>代码示例>>Java>>正文


Java AccumuloInputFormat.setZooKeeperInstance方法代码示例

本文整理汇总了Java中org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setZooKeeperInstance方法的典型用法代码示例。如果您正苦于以下问题:Java AccumuloInputFormat.setZooKeeperInstance方法的具体用法?Java AccumuloInputFormat.setZooKeeperInstance怎么用?Java AccumuloInputFormat.setZooKeeperInstance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat的用法示例。


在下文中一共展示了AccumuloInputFormat.setZooKeeperInstance方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setupAccumuloInput

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
 * Sets up Accumulo input for a job: the job receives
 * ({@link org.apache.accumulo.core.data.Key},
 * {@link org.apache.accumulo.core.data.Value}) pairs from the table
 * specified by the configuration (using
 * {@link MRUtils#TABLE_PREFIX_PROPERTY} and
 * {@link MRUtils#TABLE_LAYOUT_PROP}).
 * @param   job     MapReduce Job to configure
 * @throws  AccumuloSecurityException if connecting to Accumulo with the
 *          given username and password fails.
 */
protected void setupAccumuloInput(Job job) throws AccumuloSecurityException {
    // set up accumulo input
    if (!hdfsInput) {
        job.setInputFormatClass(AccumuloInputFormat.class);
    } else {
        job.setInputFormatClass(AccumuloHDFSFileInputFormat.class);
    }
    AccumuloInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd));
    String tableName = RdfCloudTripleStoreUtils.layoutPrefixToTable(rdfTableLayout, tablePrefix);
    AccumuloInputFormat.setInputTableName(job, tableName);
    AccumuloInputFormat.setScanAuthorizations(job, authorizations);
    if (mock) {
        AccumuloInputFormat.setMockInstance(job, instance);
    } else {
        ClientConfiguration clientConfig = ClientConfiguration.loadDefault()
                .withInstance(instance).withZkHosts(zk);
        AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
    }
    if (ttl != null) {
        IteratorSetting setting = new IteratorSetting(1, "fi", AgeOffFilter.class.getName());
        AgeOffFilter.setTTL(setting, Long.valueOf(ttl));
        AccumuloInputFormat.addIterator(job, setting);
    }
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:36,代码来源:AbstractAccumuloMRTool.java

示例2: getExpectedLoadJob

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public Job getExpectedLoadJob(String inst, String zookeepers, String user,
        String password, String table, String start, String end,
        Authorizations authorizations,
        List<Pair<Text, Text>> columnFamilyColumnQualifierPairs)
        throws IOException {
    Collection<Range> ranges = new LinkedList<Range>();
    ranges.add(new Range(start, end));

    Job expected = new Job(new Configuration());

    try {
        AccumuloInputFormat.setConnectorInfo(expected, user,
                new PasswordToken(password));
    } catch (AccumuloSecurityException e) {
        Assert.fail(e.getMessage());
    }
    AccumuloInputFormat.setInputTableName(expected, table);
    AccumuloInputFormat.setScanAuthorizations(expected, authorizations);
    AccumuloInputFormat.setZooKeeperInstance(expected, inst, zookeepers);
    AccumuloInputFormat.fetchColumns(expected,
            columnFamilyColumnQualifierPairs);
    AccumuloInputFormat.setRanges(expected, ranges);

    return expected;
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:26,代码来源:TestAbstractAccumuloStorage.java

示例3: configure

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
 * Configure properties needed to connect to a Fluo application
 *
 * @param conf Job configuration
 * @param config use {@link org.apache.fluo.api.config.FluoConfiguration} to configure
 *        programmatically
 */
@SuppressWarnings("deprecation")
public static void configure(Job conf, SimpleConfiguration config) {
  try {
    FluoConfiguration fconfig = new FluoConfiguration(config);
    try (Environment env = new Environment(fconfig)) {
      long ts =
          env.getSharedResources().getTimestampTracker().allocateTimestamp().getTxTimestamp();
      conf.getConfiguration().setLong(TIMESTAMP_CONF_KEY, ts);

      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      config.save(baos);
      conf.getConfiguration().set(PROPS_CONF_KEY,
          new String(baos.toByteArray(), StandardCharsets.UTF_8));

      AccumuloInputFormat.setZooKeeperInstance(conf, fconfig.getAccumuloInstance(),
          fconfig.getAccumuloZookeepers());
      AccumuloInputFormat.setConnectorInfo(conf, fconfig.getAccumuloUser(),
          new PasswordToken(fconfig.getAccumuloPassword()));
      AccumuloInputFormat.setInputTableName(conf, env.getTable());
      AccumuloInputFormat.setScanAuthorizations(conf, env.getAuthorizations());
    }
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:apache,项目名称:fluo,代码行数:33,代码来源:FluoRowInputFormat.java

示例4: configure

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
 * Configure properties needed to connect to a Fluo application
 *
 * @param conf Job configuration
 * @param config use {@link FluoConfiguration} to configure programmatically
 */
@SuppressWarnings("deprecation")
public static void configure(Job conf, SimpleConfiguration config) {
  try {
    FluoConfiguration fconfig = new FluoConfiguration(config);
    try (Environment env = new Environment(fconfig)) {
      long ts =
          env.getSharedResources().getTimestampTracker().allocateTimestamp().getTxTimestamp();
      conf.getConfiguration().setLong(TIMESTAMP_CONF_KEY, ts);

      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      config.save(baos);
      conf.getConfiguration().set(PROPS_CONF_KEY,
          new String(baos.toByteArray(), StandardCharsets.UTF_8));

      AccumuloInputFormat.setZooKeeperInstance(conf, fconfig.getAccumuloInstance(),
          fconfig.getAccumuloZookeepers());
      AccumuloInputFormat.setConnectorInfo(conf, fconfig.getAccumuloUser(),
          new PasswordToken(fconfig.getAccumuloPassword()));
      AccumuloInputFormat.setInputTableName(conf, env.getTable());
      AccumuloInputFormat.setScanAuthorizations(conf, env.getAuthorizations());
    }
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:apache,项目名称:fluo,代码行数:32,代码来源:FluoEntryInputFormat.java

示例5: getDataFromAccumulo

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException {
    job = Job.getInstance(new Configuration(), "HighScoreJob");
    AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword));
    AccumuloInputFormat.setScanAuthorizations(job, new Authorizations("standard"));
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.withInstance(accumuloInstanceName);
    clientConfig.withZkHosts(accumuloZookeeper);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
    AccumuloInputFormat.setInputTableName(job, inTable);
    return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job);
}
 
开发者ID:IIDP,项目名称:OSTMap,代码行数:12,代码来源:HighscoreController.java

示例6: getDataFromAccumulo

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
 * makes accumulo input accessible by flink DataSet api
 * @param env
 * @return
 * @throws IOException
 * @throws AccumuloSecurityException
 */
public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException {
    job = Job.getInstance(new Configuration(), jobName);
    AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword));
    AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(AccumuloIdentifiers.AUTHORIZATION.toString()));
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.withInstance(accumuloInstanceName);
    clientConfig.withZkHosts(accumuloZookeeper);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
    AccumuloInputFormat.setInputTableName(job, inTable);
    return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job);
}
 
开发者ID:IIDP,项目名称:OSTMap,代码行数:19,代码来源:FlinkEnvManager.java

示例7: getDataFromAccumulo

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
 * makes accumulo input accessible by flink DataSet api
 * @param env
 * @return
 * @throws IOException
 * @throws AccumuloSecurityException
 */
// TODO make private after testing
public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException {
    job = Job.getInstance(new Configuration(), "areaCalculationJob");
    AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword));
    AccumuloInputFormat.setScanAuthorizations(job, new Authorizations("standard"));
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.withInstance(accumuloInstanceName);
    clientConfig.withZkHosts(accumuloZookeeper);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
    AccumuloInputFormat.setInputTableName(job, inTable);
    return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job);
}
 
开发者ID:IIDP,项目名称:OSTMap,代码行数:20,代码来源:Calculator.java

示例8: getDataFromAccumulo

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
 * makes accumulo input accessible by flink DataSet api
 * @param env
 * @return
 * @throws IOException
 * @throws AccumuloSecurityException
 */
// TODO make private after testing
public DataSet<Tuple2<Key,Value>> getDataFromAccumulo(ExecutionEnvironment env) throws IOException, AccumuloSecurityException {
    job = Job.getInstance(new Configuration(), "pathCalculationJob");
    AccumuloInputFormat.setConnectorInfo(job, accumuloUser, new PasswordToken(accumuloPassword));
    AccumuloInputFormat.setScanAuthorizations(job, new Authorizations("standard"));
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.withInstance(accumuloInstanceName);
    clientConfig.withZkHosts(accumuloZookeeper);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
    AccumuloInputFormat.setInputTableName(job, inTable);
    return env.createHadoopInput(new AccumuloInputFormat(),Key.class,Value.class, job);
}
 
开发者ID:IIDP,项目名称:OSTMap,代码行数:20,代码来源:PathCalculator.java

示例9: setLocation

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
@Override
 public void setLocation(final String location, final Job job) throws IOException {
     if (logger.isDebugEnabled()) {
         logger.debug("Set Location[" + location + "] for job[" + job.getJobName() + "]");
     }
     conf = job.getConfiguration();
     setLocationFromUri(location, job);

     if (!ConfiguratorBase.isConnectorInfoSet(AccumuloInputFormat.class, conf)) {
         try {
	AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(userP.getBytes(StandardCharsets.UTF_8)));
} catch (final AccumuloSecurityException e) {
	throw new RuntimeException(e);
}
         AccumuloInputFormat.setInputTableName(job, table);
 		AccumuloInputFormat.setScanAuthorizations(job, authorizations);
         if (!mock) {
             AccumuloInputFormat.setZooKeeperInstance(job, inst, zookeepers);
         } else {
             AccumuloInputFormat.setMockInstance(job, inst);
         }
     }
     if (columnFamilyColumnQualifierPairs.size() > 0) {
         AccumuloInputFormat.fetchColumns(job, columnFamilyColumnQualifierPairs);
     }
     logger.info("Set ranges[" + ranges + "] for job[" + job.getJobName() + "] on table[" + table + "] " +
             "for columns[" + columnFamilyColumnQualifierPairs + "] with authorizations[" + authorizations + "]");

     if (ranges.size() == 0) {
         throw new IOException("Accumulo Range must be specified");
     }
     AccumuloInputFormat.setRanges(job, ranges);
 }
 
开发者ID:apache,项目名称:incubator-rya,代码行数:34,代码来源:AccumuloStorage.java

示例10: initTableMRJob

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public static void initTableMRJob(Job job, String intable, String outtable, String auths) throws AccumuloSecurityException {
  Configuration conf = job.getConfiguration();
  String username = conf.get(USERNAME);
  String password = conf.get(PASSWORD);
  String instance = conf.get(INSTANCE);
  String zookeepers = conf.get(ZOOKEEPERS);

  System.out.println("Zookeepers are " + auths);

  if (zookeepers != null) {
    AccumuloInputFormat.setZooKeeperInstance(job, instance, zookeepers);
    AccumuloOutputFormat.setZooKeeperInstance(job, instance, zookeepers);
  } else {
    throw new IllegalArgumentException("Must specify either mock or zookeepers");
  }

  AccumuloInputFormat.setConnectorInfo(job, username, new PasswordToken(password));
  AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(auths));
  AccumuloInputFormat.setInputTableName(job, intable);
  job.setInputFormatClass(AccumuloInputFormat.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(IntWritable.class);

  // OUTPUT
  AccumuloOutputFormat.setConnectorInfo(job, username, new PasswordToken(password));
  AccumuloOutputFormat.setDefaultTableName(job, outtable);
  job.setOutputFormatClass(AccumuloOutputFormat.class);
  job.setOutputKeyClass(Text.class);
  job.setOutputValueClass(Mutation.class);

}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:32,代码来源:JoinSelectStatsUtil.java

示例11: initTabToSeqFileJob

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public static void initTabToSeqFileJob(Job job, String intable, String outpath, String auths) throws AccumuloSecurityException {

    Configuration conf = job.getConfiguration();
    String username = conf.get(USERNAME);
    String password = conf.get(PASSWORD);
    String instance = conf.get(INSTANCE);
    String zookeepers = conf.get(ZOOKEEPERS);

    System.out.println("Zookeepers are " + auths);

    if (zookeepers != null) {
      AccumuloInputFormat.setZooKeeperInstance(job, instance, zookeepers);
    } else {
      throw new IllegalArgumentException("Must specify either mock or zookeepers");
    }

    AccumuloInputFormat.setConnectorInfo(job, username, new PasswordToken(password));
    AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(auths));
    AccumuloInputFormat.setInputTableName(job, intable);
    job.setInputFormatClass(AccumuloInputFormat.class);
    job.setMapOutputKeyClass(CompositeType.class);
    job.setMapOutputValueClass(TripleCard.class);

    // OUTPUT
    SequenceFileOutputFormat.setOutputPath(job, new Path(outpath));
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setOutputKeyClass(CompositeType.class);
    job.setOutputValueClass(TripleCard.class);

  }
 
开发者ID:apache,项目名称:incubator-rya,代码行数:31,代码来源:JoinSelectStatsUtil.java

示例12: initMRJob

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public static void initMRJob(final Job job, final String table, final String outtable, final String[] auths) throws AccumuloSecurityException {
    final Configuration conf = job.getConfiguration();
    final String username = conf.get(USERNAME);
    final String password = conf.get(PASSWORD);
    final String instance = conf.get(INSTANCE);
    final String zookeepers = conf.get(ZOOKEEPERS);
    final String mock = conf.get(MOCK);

    //input
    if (Boolean.parseBoolean(mock)) {
        AccumuloInputFormat.setMockInstance(job, instance);
        AccumuloOutputFormat.setMockInstance(job, instance);
    } else if (zookeepers != null) {
        AccumuloInputFormat.setZooKeeperInstance(job, instance, zookeepers);
        AccumuloOutputFormat.setZooKeeperInstance(job, instance, zookeepers);
    } else {
        throw new IllegalArgumentException("Must specify either mock or zookeepers");
    }

    AccumuloInputFormat.setConnectorInfo(job, username, new PasswordToken(password.getBytes(StandardCharsets.UTF_8)));
    AccumuloInputFormat.setInputTableName(job, table);
    job.setInputFormatClass(AccumuloInputFormat.class);
    AccumuloInputFormat.setScanAuthorizations(job, new Authorizations(auths));

    // OUTPUT
    job.setOutputFormatClass(AccumuloOutputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Mutation.class);
    AccumuloOutputFormat.setConnectorInfo(job, username, new PasswordToken(password.getBytes(StandardCharsets.UTF_8)));
    AccumuloOutputFormat.setDefaultTableName(job, outtable);
}
 
开发者ID:apache,项目名称:incubator-rya,代码行数:32,代码来源:ProspectorUtils.java

示例13: setAccumuloConfigs

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
public void setAccumuloConfigs(Job job) throws AccumuloSecurityException {
  AccumuloInputFormat.setZooKeeperInstance(job, this.getClientConfiguration());
  AccumuloOutputFormat.setZooKeeperInstance(job, this.getClientConfiguration());
}
 
开发者ID:apache,项目名称:accumulo-examples,代码行数:5,代码来源:MapReduceClientOpts.java

示例14: setLocation

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
@Override
public void setLocation(String location, Job job) throws IOException {
    setLocationFromUri(location);

    loadDependentJars(job.getConfiguration());

    Map<String, String> entries = getInputFormatEntries(job
            .getConfiguration());
    unsetEntriesFromConfiguration(job.getConfiguration(), entries);

    try {
        AccumuloInputFormat.setConnectorInfo(job, user, new PasswordToken(
                password));
    } catch (AccumuloSecurityException e) {
        throw new IOException(e);
    }

    AccumuloInputFormat.setInputTableName(job, table);
    AccumuloInputFormat.setScanAuthorizations(job, authorizations);
    AccumuloInputFormat.setZooKeeperInstance(job, inst, zookeepers);

    List<Pair<Text, Text>> inputFormatColumns = new LinkedList<Pair<Text, Text>>();
    int colfamPrefix = 0;

    for (Column c : columns) {
        switch (c.getType()) {
        case LITERAL:
            // Pull the colf[:colq] individually
            inputFormatColumns.add(makePair(c.getColumnFamily(),
                    c.getColumnQualifier()));
            break;
        case COLFAM_PREFIX:
            // Some colfams
            colfamPrefix++;
            break;
        case COLQUAL_PREFIX:
            // Some colquals in a given colfam
            inputFormatColumns.add(makePair(c.getColumnFamily(), null));
            break;
        default:
            log.info("Ignoring unhandled column type");
            break;
        }
    }

    // If we have colfam prefixes, we have to pull all columns and filter on
    // client-side
    // TODO Create an iterator that lets us push-down *all* of the filter
    // logic
    if (0 == colfamPrefix && !inputFormatColumns.isEmpty()) {
        AccumuloInputFormat.fetchColumns(job, inputFormatColumns);
    }

    Collection<Range> ranges = Collections.singleton(new Range(start, end));

    log.info("Scanning Accumulo for " + ranges + " for table " + table);

    AccumuloInputFormat.setRanges(job, ranges);

    configureInputFormat(job);
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:62,代码来源:AbstractAccumuloStorage.java

示例15: run

import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; //导入方法依赖的package包/类
/**
 * The run method which sets the configuration and starts the MapReduce job
 */
public int run(String[] args) throws Exception {
  
  if(USE_MINI_ACCUMULO){
    Connector connector = LocalEnvUtil.getConnector(userPass);
    userName = "root";
    instanceName = connector.getInstance().getInstanceName();
    zookeepers = connector.getInstance().getZooKeepers();
  }
	
	// Create and initialize a MapReduce Job
	Job job = Job.getInstance(getConf(), "tweetIndexer");
	job.setJarByClass(IndexedDocIndexer.class);
	
	
	// Set the AccumuloInputFormat so the mapper can read from Accumulo
	
	AccumuloInputFormat.setConnectorInfo(job,
	    userName, 
	    new PasswordToken(userPass));
	
	AccumuloInputFormat.setInputTableName(job, twitterDataTable);
	
	AccumuloInputFormat.setScanAuthorizations(job, new Authorizations());
	
	ClientConfiguration clientConfig = new ClientConfiguration();
	clientConfig.withInstance(instanceName);
	clientConfig.withZkHosts(zookeepers);
	
	AccumuloInputFormat.setZooKeeperInstance(job, clientConfig);
	
	
	AccumuloOutputFormat.setConnectorInfo(job, 
	    userName,
	    new PasswordToken(userPass));
	
	AccumuloOutputFormat.setCreateTables(job, createTables);
	
	AccumuloOutputFormat.setDefaultTableName(job, tweetDocIndex);
	
	AccumuloOutputFormat.setZooKeeperInstance(job, clientConfig);
	
	
	// Set the map and reduce classes
	job.setMapperClass(TweetMapper.class);
	job.setReducerClass(TweetReducer.class);
	
	// Set the output key and value class for the mapper
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(Text.class);
	
	// Set the output key and value class for the reducer
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(Mutation.class);
	
	// Set the InputFormat and OutputFormat for the job
	job.setInputFormatClass(AccumuloInputFormat.class);
	job.setOutputFormatClass(AccumuloOutputFormat.class);
	
	// Run the MapReduce job and return 0 for success, 1 otherwise
	return job.waitForCompletion(true) ? 0 : 1;
}
 
开发者ID:adamjshook,项目名称:accumulo-training,代码行数:65,代码来源:IndexedDocIndexer.java


注:本文中的org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setZooKeeperInstance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。