当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.setStrings方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.setStrings方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.setStrings方法的具体用法?Java Configuration.setStrings怎么用?Java Configuration.setStrings使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.setStrings方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setupBeforeClass

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName(), SampleRegionWALObserver.Legacy.class.getName());
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName());
  conf.setBoolean("dfs.support.append", true);
  conf.setInt("dfs.client.block.recovery.retries", 2);

  TEST_UTIL.startMiniCluster(1);
  Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem()
      .makeQualified(new Path("/hbase"));
  LOG.info("hbase.rootdir=" + hbaseRootDir);
  FSUtils.setRootDir(conf, hbaseRootDir);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestWALObserver.java

示例2: configureSuperUserIPAddresses

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
private static void configureSuperUserIPAddresses(Configuration conf,
    String superUserShortName) throws IOException {
  ArrayList<String> ipList = new ArrayList<String>();
  Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
      .getNetworkInterfaces();
  while (netInterfaceList.hasMoreElements()) {
    NetworkInterface inf = netInterfaceList.nextElement();
    Enumeration<InetAddress> addrList = inf.getInetAddresses();
    while (addrList.hasMoreElements()) {
      InetAddress addr = addrList.nextElement();
      ipList.add(addr.getHostAddress());
    }
  }
  StringBuilder builder = new StringBuilder();
  for (String ip : ipList) {
    builder.append(ip);
    builder.append(',');
  }
  builder.append("127.0.1.1,");
  builder.append(InetAddress.getLocalHost().getCanonicalHostName());
  LOG.info("Local Ip addresses: " + builder.toString());
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(superUserShortName),
      builder.toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestDelegationTokenForProxyUser.java

示例3: testErrorLogOnContainerExitWithCustomPattern

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testErrorLogOnContainerExitWithCustomPattern() throws Exception {
  Configuration conf = new Configuration();
  conf.setStrings(YarnConfiguration.NM_CONTAINER_STDERR_PATTERN,
      "{*stderr*,*log*}");
  verifyTailErrorLogOnContainerExit(conf, "/error.log", false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestContainerLaunch.java

示例4: testCustomPattern

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testCustomPattern() {
  // 1x0 1x1
  Configuration conf = new Configuration();
  conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "1");

  mux = new WeightedRoundRobinMultiplexer(2, "test.custom", conf);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 1);

  // 1x0 3x1 2x2
  conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "3", "2");

  mux = new WeightedRoundRobinMultiplexer(3, "test.custom", conf);

  for(int i = 0; i < 5; i++) {
    assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
  } // Ensure pattern repeats

}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestWeightedRoundRobinMultiplexer.java

示例5: testRealUserAuthorizationSuccess

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test(timeout=4000)
public void testRealUserAuthorizationSuccess() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
      "group1");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();

  refreshConf(conf);
  try {
    server.start();

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);
    checkRemoteUgi(server, realUserUgi, conf);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    checkRemoteUgi(server, proxyUserUgi, conf);
  } catch (Exception e) {
    e.printStackTrace();
    Assert.fail();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:33,代码来源:TestDoAsEffectiveUser.java

示例6: setUpBeforeClass

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestRegionObserverBypass.java

示例7: testInstantiateIllegalMux

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test(expected=IllegalArgumentException.class)
public void testInstantiateIllegalMux() {
  Configuration conf = new Configuration();
  conf.setStrings("namespace." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "2", "3");

  // ask for 3 weights with 2 queues
  mux = new WeightedRoundRobinMultiplexer(2, "namespace", conf);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:10,代码来源:TestWeightedRoundRobinMultiplexer.java

示例8: testLegalInstantiation

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testLegalInstantiation() {
  Configuration conf = new Configuration();
  conf.setStrings("namespace." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "2", "3");

  // ask for 3 weights with 3 queues
  mux = new WeightedRoundRobinMultiplexer(3, "namespace.", conf);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:10,代码来源:TestWeightedRoundRobinMultiplexer.java

示例9: runMiniBenchmarkWithDelegationToken

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Run MiniBenchmark using delegation token authentication.
 * 
 * @param conf - configuration
 * @param count - connect this many times
 * @param keytabKey - key for keytab file in the configuration
 * @param userNameKey - key for user name in the configuration
 * @return average time to connect
 * @throws IOException
 */
long runMiniBenchmarkWithDelegationToken(Configuration conf,
                                         int count,
                                         String keytabKey,
                                         String userNameKey)
throws IOException {
  // get login information
  String user = System.getProperty("user.name");
  if(userNameKey != null)
    user = conf.get(userNameKey, user);
  String keytabFile = null;
  if(keytabKey != null)
    keytabFile = conf.get(keytabKey, keytabFile);
  MiniServer miniServer = null;
  UserGroupInformation.setConfiguration(conf);
  String shortUserName =
    UserGroupInformation.createRemoteUser(user).getShortUserName();
  try {
    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
            getProxySuperuserGroupConfKey(shortUserName), GROUP_NAME_1);
    configureSuperUserIPAddresses(conf, shortUserName);
    // start the server
    miniServer = new MiniServer(conf, user, keytabFile);
    InetSocketAddress addr = miniServer.getAddress();

    connectToServerAndGetDelegationToken(conf, addr);
    // connect to the server count times
    setLoggingLevel(logLevel);
    long elapsed = 0L;
    for(int idx = 0; idx < count; idx ++) {
      elapsed += connectToServerUsingDelegationToken(conf, addr);
    }
    return elapsed;
  } finally {
    if(miniServer != null) miniServer.stop();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:47,代码来源:MiniRPCBenchmark.java

示例10: getConfigurationWithoutSharedEdits

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Clone the supplied configuration but remove the shared edits dirs.
 *
 * @param conf Supplies the original configuration.
 * @return Cloned configuration without the shared edit dirs.
 * @throws IOException on failure to generate the configuration.
 */
private static Configuration getConfigurationWithoutSharedEdits(
    Configuration conf)
    throws IOException {
  List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
  String editsDirsString = Joiner.on(",").join(editsDirs);

  Configuration confWithoutShared = new Configuration(conf);
  confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      editsDirsString);
  return confWithoutShared;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:NameNode.java

示例11: testPermMask

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testPermMask() throws Exception {

  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  // default fs permission
  FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  // 'hbase.data.umask.enable' is false. We will get default fs permission.
  assertEquals(FsPermission.getFileDefault(), defaultFsPerm);

  conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
  // first check that we don't crash if we don't have perms set
  FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  // default 'hbase.data.umask'is 000, and this umask will be used when
  // 'hbase.data.umask.enable' is true.
  // Therefore we will not get the real fs default in this case.
  // Instead we will get the starting point FULL_RWX_PERMISSIONS
  assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);

  conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
  // now check that we get the right perms
  FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  assertEquals(new FsPermission("700"), filePerm);

  // then that the correct file is created
  Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
  try {
    FSDataOutputStream out = FSUtils.create(conf, fs, p, filePerm, null);
    out.close();
    FileStatus stat = fs.getFileStatus(p);
    assertEquals(new FsPermission("700"), stat.getPermission());
    // and then cleanup
  } finally {
    fs.delete(p, true);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestFSUtils.java

示例12: initTableMapperJob

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Use this before submitting a TableMap job. It will appropriately set up
 * the job.
 *
 * @param table  The table name to read from.
 * @param scan  The scan instance with the columns, time range etc.
 * @param mapper  The mapper class to use.
 * @param outputKeyClass  The class of the output key.
 * @param outputValueClass  The class of the output value.
 * @param job  The current job to adjust.  Make sure the passed job is
 * carrying all necessary HBase configuration.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @param initCredentials whether to initialize hbase auth credentials for the job
 * @param inputFormatClass the input format
 * @throws IOException When setting up the details fails.
 */
public static void initTableMapperJob(String table, Scan scan,
    Class<? extends TableMapper> mapper,
    Class<?> outputKeyClass,
    Class<?> outputValueClass, Job job,
    boolean addDependencyJars, boolean initCredentials,
    Class<? extends InputFormat> inputFormatClass)
throws IOException {
  job.setInputFormatClass(inputFormatClass);
  if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
  if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass);
  job.setMapperClass(mapper);
  if (Put.class.equals(outputValueClass)) {
    job.setCombinerClass(PutCombiner.class);
  }
  Configuration conf = job.getConfiguration();
  HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
  conf.set(TableInputFormat.INPUT_TABLE, table);
  conf.set(TableInputFormat.SCAN, convertScanToString(scan));
  conf.setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());
  if (addDependencyJars) {
    addDependencyJars(job);
  }
  if (initCredentials) {
    initCredentials(job);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:46,代码来源:TableMapReduceUtil.java

示例13: checkSnapshotSupport

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Called at startup, to verify if snapshot operation is supported, and to avoid
 * starting the master if there're snapshots present but the cleaners needed are missing.
 * Otherwise we can end up with snapshot data loss.
 * @param conf The {@link Configuration} object to use
 * @param mfs The MasterFileSystem to use
 * @throws IOException in case of file-system operation failure
 * @throws UnsupportedOperationException in case cleaners are missing and
 *         there're snapshot in the system
 */
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs)
    throws IOException, UnsupportedOperationException {
  // Verify if snapshot is disabled by the user
  String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
  boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
  boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);

  // Extract cleaners from conf
  Set<String> hfileCleaners = new HashSet<String>();
  String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
  if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);

  Set<String> logCleaners = new HashSet<String>();
  cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
  if (cleaners != null) Collections.addAll(logCleaners, cleaners);

  // check if an older version of snapshot directory was present
  Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
  FileSystem fs = mfs.getFileSystem();
  List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir));
  if (ss != null && !ss.isEmpty()) {
    LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
    LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
  }

  // If the user has enabled the snapshot, we force the cleaners to be present
  // otherwise we still need to check if cleaners are enabled or not and verify
  // that there're no snapshot in the .snapshot folder.
  if (snapshotEnabled) {
    // Inject snapshot cleaners, if snapshot.enable is true
    hfileCleaners.add(SnapshotHFileCleaner.class.getName());
    hfileCleaners.add(HFileLinkCleaner.class.getName());
    logCleaners.add(SnapshotLogCleaner.class.getName());

    // Set cleaners conf
    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      hfileCleaners.toArray(new String[hfileCleaners.size()]));
    conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
      logCleaners.toArray(new String[logCleaners.size()]));
  } else {
    // Verify if cleaners are present
    snapshotEnabled = logCleaners.contains(SnapshotLogCleaner.class.getName()) &&
      hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
      hfileCleaners.contains(HFileLinkCleaner.class.getName());

    // Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
    if (snapshotEnabled) {
      LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " +
        "but the '" + HBASE_SNAPSHOT_ENABLED + "' property " +
        (userDisabled ? "is set to 'false'." : "is not set."));
    }
  }

  // Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
  this.isSnapshotSupported = snapshotEnabled && !userDisabled;

  // If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
  // otherwise we end up with snapshot data loss.
  if (!snapshotEnabled) {
    LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
    if (fs.exists(snapshotDir)) {
      FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir,
        new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
      if (snapshots != null) {
        LOG.error("Snapshots are present, but cleaners are not enabled.");
        checkSnapshotSupport();
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:82,代码来源:SnapshotManager.java

示例14: testRealUserGroupAuthorizationFailure

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testRealUserGroupAuthorizationFailure() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
      "group3");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();
  
  refreshConf(conf);

  try {
    server.start();

    final InetSocketAddress addr = NetUtils.getConnectAddress(server);

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    String retVal = proxyUserUgi
        .doAs(new PrivilegedExceptionAction<String>() {
          @Override
          public String run() throws IOException {
            proxy = RPC.getProxy(TestProtocol.class,
                TestProtocol.versionID, addr, conf);
            String ret = proxy.aMethod();
            return ret;
          }
        });

    Assert.fail("The RPC must have failed " + retVal);
  } catch (Exception e) {
    e.printStackTrace();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:45,代码来源:TestDoAsEffectiveUser.java

示例15: initTableReducerJob

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf.
 *
 * @param table  The output table.
 * @param reducer  The reducer class to use.
 * @param job  The current job to adjust.  Make sure the passed job is
 * carrying all necessary HBase configuration.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param quorumAddress Distant cluster to write to; default is null for
 * output to the cluster that is designated in <code>hbase-site.xml</code>.
 * Set this String to the zookeeper ensemble of an alternate remote cluster
 * when you would have the reduce write a cluster that is other than the
 * default; e.g. copying tables between clusters, the source would be
 * designated by <code>hbase-site.xml</code> and this param would have the
 * ensemble address of the remote cluster.  The format to pass is particular.
 * Pass <code> &lt;hbase.zookeeper.quorum&gt;:&lt;
 *             hbase.zookeeper.client.port&gt;:&lt;zookeeper.znode.parent&gt;
 * </code> such as <code>server,server2,server3:2181:/hbase</code>.
 * @param serverClass redefined hbase.regionserver.class
 * @param serverImpl redefined hbase.regionserver.impl
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 */
public static void initTableReducerJob(String table,
  Class<? extends TableReducer> reducer, Job job,
  Class partitioner, String quorumAddress, String serverClass,
  String serverImpl, boolean addDependencyJars) throws IOException {

  Configuration conf = job.getConfiguration();
  HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
  job.setOutputFormatClass(TableOutputFormat.class);
  if (reducer != null) job.setReducerClass(reducer);
  conf.set(TableOutputFormat.OUTPUT_TABLE, table);
  conf.setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName());
  // If passed a quorum/ensemble address, pass it on to TableOutputFormat.
  if (quorumAddress != null) {
    // Calling this will validate the format
    ZKConfig.validateClusterKey(quorumAddress);
    conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress);
  }
  if (serverClass != null && serverImpl != null) {
    conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
    conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);
  }
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Writable.class);
  if (partitioner == HRegionPartitioner.class) {
    job.setPartitionerClass(HRegionPartitioner.class);
    int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table));
    if (job.getNumReduceTasks() > regions) {
      job.setNumReduceTasks(regions);
    }
  } else if (partitioner != null) {
    job.setPartitionerClass(partitioner);
  }

  if (addDependencyJars) {
    addDependencyJars(job);
  }

  initCredentials(job);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:67,代码来源:TableMapReduceUtil.java


注:本文中的org.apache.hadoop.conf.Configuration.setStrings方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。