當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.setStrings方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.setStrings方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setStrings方法的具體用法?Java Configuration.setStrings怎麽用?Java Configuration.setStrings使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.setStrings方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: setupBeforeClass

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName(), SampleRegionWALObserver.Legacy.class.getName());
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName());
  conf.setBoolean("dfs.support.append", true);
  conf.setInt("dfs.client.block.recovery.retries", 2);

  TEST_UTIL.startMiniCluster(1);
  Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem()
      .makeQualified(new Path("/hbase"));
  LOG.info("hbase.rootdir=" + hbaseRootDir);
  FSUtils.setRootDir(conf, hbaseRootDir);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestWALObserver.java

示例2: configureSuperUserIPAddresses

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static void configureSuperUserIPAddresses(Configuration conf,
    String superUserShortName) throws IOException {
  ArrayList<String> ipList = new ArrayList<String>();
  Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
      .getNetworkInterfaces();
  while (netInterfaceList.hasMoreElements()) {
    NetworkInterface inf = netInterfaceList.nextElement();
    Enumeration<InetAddress> addrList = inf.getInetAddresses();
    while (addrList.hasMoreElements()) {
      InetAddress addr = addrList.nextElement();
      ipList.add(addr.getHostAddress());
    }
  }
  StringBuilder builder = new StringBuilder();
  for (String ip : ipList) {
    builder.append(ip);
    builder.append(',');
  }
  builder.append("127.0.1.1,");
  builder.append(InetAddress.getLocalHost().getCanonicalHostName());
  LOG.info("Local Ip addresses: " + builder.toString());
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(superUserShortName),
      builder.toString());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:TestDelegationTokenForProxyUser.java

示例3: testErrorLogOnContainerExitWithCustomPattern

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testErrorLogOnContainerExitWithCustomPattern() throws Exception {
  Configuration conf = new Configuration();
  conf.setStrings(YarnConfiguration.NM_CONTAINER_STDERR_PATTERN,
      "{*stderr*,*log*}");
  verifyTailErrorLogOnContainerExit(conf, "/error.log", false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TestContainerLaunch.java

示例4: testCustomPattern

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testCustomPattern() {
  // 1x0 1x1
  Configuration conf = new Configuration();
  conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "1");

  mux = new WeightedRoundRobinMultiplexer(2, "test.custom", conf);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
  assertEquals(mux.getAndAdvanceCurrentIndex(), 1);

  // 1x0 3x1 2x2
  conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "3", "2");

  mux = new WeightedRoundRobinMultiplexer(3, "test.custom", conf);

  for(int i = 0; i < 5; i++) {
    assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
    assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
  } // Ensure pattern repeats

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:TestWeightedRoundRobinMultiplexer.java

示例5: testRealUserAuthorizationSuccess

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test(timeout=4000)
public void testRealUserAuthorizationSuccess() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
      "group1");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();

  refreshConf(conf);
  try {
    server.start();

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);
    checkRemoteUgi(server, realUserUgi, conf);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    checkRemoteUgi(server, proxyUserUgi, conf);
  } catch (Exception e) {
    e.printStackTrace();
    Assert.fail();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:33,代碼來源:TestDoAsEffectiveUser.java

示例6: setUpBeforeClass

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:TestRegionObserverBypass.java

示例7: testInstantiateIllegalMux

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test(expected=IllegalArgumentException.class)
public void testInstantiateIllegalMux() {
  Configuration conf = new Configuration();
  conf.setStrings("namespace." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "2", "3");

  // ask for 3 weights with 2 queues
  mux = new WeightedRoundRobinMultiplexer(2, "namespace", conf);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:10,代碼來源:TestWeightedRoundRobinMultiplexer.java

示例8: testLegalInstantiation

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testLegalInstantiation() {
  Configuration conf = new Configuration();
  conf.setStrings("namespace." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
    "1", "2", "3");

  // ask for 3 weights with 3 queues
  mux = new WeightedRoundRobinMultiplexer(3, "namespace.", conf);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:10,代碼來源:TestWeightedRoundRobinMultiplexer.java

示例9: runMiniBenchmarkWithDelegationToken

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Run MiniBenchmark using delegation token authentication.
 * 
 * @param conf - configuration
 * @param count - connect this many times
 * @param keytabKey - key for keytab file in the configuration
 * @param userNameKey - key for user name in the configuration
 * @return average time to connect
 * @throws IOException
 */
long runMiniBenchmarkWithDelegationToken(Configuration conf,
                                         int count,
                                         String keytabKey,
                                         String userNameKey)
throws IOException {
  // get login information
  String user = System.getProperty("user.name");
  if(userNameKey != null)
    user = conf.get(userNameKey, user);
  String keytabFile = null;
  if(keytabKey != null)
    keytabFile = conf.get(keytabKey, keytabFile);
  MiniServer miniServer = null;
  UserGroupInformation.setConfiguration(conf);
  String shortUserName =
    UserGroupInformation.createRemoteUser(user).getShortUserName();
  try {
    conf.setStrings(DefaultImpersonationProvider.getTestProvider().
            getProxySuperuserGroupConfKey(shortUserName), GROUP_NAME_1);
    configureSuperUserIPAddresses(conf, shortUserName);
    // start the server
    miniServer = new MiniServer(conf, user, keytabFile);
    InetSocketAddress addr = miniServer.getAddress();

    connectToServerAndGetDelegationToken(conf, addr);
    // connect to the server count times
    setLoggingLevel(logLevel);
    long elapsed = 0L;
    for(int idx = 0; idx < count; idx ++) {
      elapsed += connectToServerUsingDelegationToken(conf, addr);
    }
    return elapsed;
  } finally {
    if(miniServer != null) miniServer.stop();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:47,代碼來源:MiniRPCBenchmark.java

示例10: getConfigurationWithoutSharedEdits

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Clone the supplied configuration but remove the shared edits dirs.
 *
 * @param conf Supplies the original configuration.
 * @return Cloned configuration without the shared edit dirs.
 * @throws IOException on failure to generate the configuration.
 */
private static Configuration getConfigurationWithoutSharedEdits(
    Configuration conf)
    throws IOException {
  List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
  String editsDirsString = Joiner.on(",").join(editsDirs);

  Configuration confWithoutShared = new Configuration(conf);
  confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      editsDirsString);
  return confWithoutShared;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:20,代碼來源:NameNode.java

示例11: testPermMask

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testPermMask() throws Exception {

  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  // default fs permission
  FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  // 'hbase.data.umask.enable' is false. We will get default fs permission.
  assertEquals(FsPermission.getFileDefault(), defaultFsPerm);

  conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
  // first check that we don't crash if we don't have perms set
  FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  // default 'hbase.data.umask'is 000, and this umask will be used when
  // 'hbase.data.umask.enable' is true.
  // Therefore we will not get the real fs default in this case.
  // Instead we will get the starting point FULL_RWX_PERMISSIONS
  assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);

  conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
  // now check that we get the right perms
  FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
      HConstants.DATA_FILE_UMASK_KEY);
  assertEquals(new FsPermission("700"), filePerm);

  // then that the correct file is created
  Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
  try {
    FSDataOutputStream out = FSUtils.create(conf, fs, p, filePerm, null);
    out.close();
    FileStatus stat = fs.getFileStatus(p);
    assertEquals(new FsPermission("700"), stat.getPermission());
    // and then cleanup
  } finally {
    fs.delete(p, true);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:41,代碼來源:TestFSUtils.java

示例12: initTableMapperJob

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Use this before submitting a TableMap job. It will appropriately set up
 * the job.
 *
 * @param table  The table name to read from.
 * @param scan  The scan instance with the columns, time range etc.
 * @param mapper  The mapper class to use.
 * @param outputKeyClass  The class of the output key.
 * @param outputValueClass  The class of the output value.
 * @param job  The current job to adjust.  Make sure the passed job is
 * carrying all necessary HBase configuration.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @param initCredentials whether to initialize hbase auth credentials for the job
 * @param inputFormatClass the input format
 * @throws IOException When setting up the details fails.
 */
public static void initTableMapperJob(String table, Scan scan,
    Class<? extends TableMapper> mapper,
    Class<?> outputKeyClass,
    Class<?> outputValueClass, Job job,
    boolean addDependencyJars, boolean initCredentials,
    Class<? extends InputFormat> inputFormatClass)
throws IOException {
  job.setInputFormatClass(inputFormatClass);
  if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass);
  if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass);
  job.setMapperClass(mapper);
  if (Put.class.equals(outputValueClass)) {
    job.setCombinerClass(PutCombiner.class);
  }
  Configuration conf = job.getConfiguration();
  HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
  conf.set(TableInputFormat.INPUT_TABLE, table);
  conf.set(TableInputFormat.SCAN, convertScanToString(scan));
  conf.setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());
  if (addDependencyJars) {
    addDependencyJars(job);
  }
  if (initCredentials) {
    initCredentials(job);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:46,代碼來源:TableMapReduceUtil.java

示例13: checkSnapshotSupport

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Called at startup, to verify if snapshot operation is supported, and to avoid
 * starting the master if there're snapshots present but the cleaners needed are missing.
 * Otherwise we can end up with snapshot data loss.
 * @param conf The {@link Configuration} object to use
 * @param mfs The MasterFileSystem to use
 * @throws IOException in case of file-system operation failure
 * @throws UnsupportedOperationException in case cleaners are missing and
 *         there're snapshot in the system
 */
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs)
    throws IOException, UnsupportedOperationException {
  // Verify if snapshot is disabled by the user
  String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
  boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
  boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);

  // Extract cleaners from conf
  Set<String> hfileCleaners = new HashSet<String>();
  String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
  if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);

  Set<String> logCleaners = new HashSet<String>();
  cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
  if (cleaners != null) Collections.addAll(logCleaners, cleaners);

  // check if an older version of snapshot directory was present
  Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
  FileSystem fs = mfs.getFileSystem();
  List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir));
  if (ss != null && !ss.isEmpty()) {
    LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
    LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
  }

  // If the user has enabled the snapshot, we force the cleaners to be present
  // otherwise we still need to check if cleaners are enabled or not and verify
  // that there're no snapshot in the .snapshot folder.
  if (snapshotEnabled) {
    // Inject snapshot cleaners, if snapshot.enable is true
    hfileCleaners.add(SnapshotHFileCleaner.class.getName());
    hfileCleaners.add(HFileLinkCleaner.class.getName());
    logCleaners.add(SnapshotLogCleaner.class.getName());

    // Set cleaners conf
    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      hfileCleaners.toArray(new String[hfileCleaners.size()]));
    conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
      logCleaners.toArray(new String[logCleaners.size()]));
  } else {
    // Verify if cleaners are present
    snapshotEnabled = logCleaners.contains(SnapshotLogCleaner.class.getName()) &&
      hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
      hfileCleaners.contains(HFileLinkCleaner.class.getName());

    // Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
    if (snapshotEnabled) {
      LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " +
        "but the '" + HBASE_SNAPSHOT_ENABLED + "' property " +
        (userDisabled ? "is set to 'false'." : "is not set."));
    }
  }

  // Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
  this.isSnapshotSupported = snapshotEnabled && !userDisabled;

  // If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
  // otherwise we end up with snapshot data loss.
  if (!snapshotEnabled) {
    LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
    if (fs.exists(snapshotDir)) {
      FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir,
        new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
      if (snapshots != null) {
        LOG.error("Snapshots are present, but cleaners are not enabled.");
        checkSnapshotSupport();
      }
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:82,代碼來源:SnapshotManager.java

示例14: testRealUserGroupAuthorizationFailure

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testRealUserGroupAuthorizationFailure() throws IOException {
  final Configuration conf = new Configuration();
  configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
      "group3");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();
  
  refreshConf(conf);

  try {
    server.start();

    final InetSocketAddress addr = NetUtils.getConnectAddress(server);

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    String retVal = proxyUserUgi
        .doAs(new PrivilegedExceptionAction<String>() {
          @Override
          public String run() throws IOException {
            proxy = RPC.getProxy(TestProtocol.class,
                TestProtocol.versionID, addr, conf);
            String ret = proxy.aMethod();
            return ret;
          }
        });

    Assert.fail("The RPC must have failed " + retVal);
  } catch (Exception e) {
    e.printStackTrace();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:45,代碼來源:TestDoAsEffectiveUser.java

示例15: initTableReducerJob

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf.
 *
 * @param table  The output table.
 * @param reducer  The reducer class to use.
 * @param job  The current job to adjust.  Make sure the passed job is
 * carrying all necessary HBase configuration.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param quorumAddress Distant cluster to write to; default is null for
 * output to the cluster that is designated in <code>hbase-site.xml</code>.
 * Set this String to the zookeeper ensemble of an alternate remote cluster
 * when you would have the reduce write a cluster that is other than the
 * default; e.g. copying tables between clusters, the source would be
 * designated by <code>hbase-site.xml</code> and this param would have the
 * ensemble address of the remote cluster.  The format to pass is particular.
 * Pass <code> &lt;hbase.zookeeper.quorum&gt;:&lt;
 *             hbase.zookeeper.client.port&gt;:&lt;zookeeper.znode.parent&gt;
 * </code> such as <code>server,server2,server3:2181:/hbase</code>.
 * @param serverClass redefined hbase.regionserver.class
 * @param serverImpl redefined hbase.regionserver.impl
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 */
public static void initTableReducerJob(String table,
  Class<? extends TableReducer> reducer, Job job,
  Class partitioner, String quorumAddress, String serverClass,
  String serverImpl, boolean addDependencyJars) throws IOException {

  Configuration conf = job.getConfiguration();
  HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
  job.setOutputFormatClass(TableOutputFormat.class);
  if (reducer != null) job.setReducerClass(reducer);
  conf.set(TableOutputFormat.OUTPUT_TABLE, table);
  conf.setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName());
  // If passed a quorum/ensemble address, pass it on to TableOutputFormat.
  if (quorumAddress != null) {
    // Calling this will validate the format
    ZKConfig.validateClusterKey(quorumAddress);
    conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress);
  }
  if (serverClass != null && serverImpl != null) {
    conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass);
    conf.set(TableOutputFormat.REGION_SERVER_IMPL, serverImpl);
  }
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Writable.class);
  if (partitioner == HRegionPartitioner.class) {
    job.setPartitionerClass(HRegionPartitioner.class);
    int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table));
    if (job.getNumReduceTasks() > regions) {
      job.setNumReduceTasks(regions);
    }
  } else if (partitioner != null) {
    job.setPartitionerClass(partitioner);
  }

  if (addDependencyJars) {
    addDependencyJars(job);
  }

  initCredentials(job);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:67,代碼來源:TableMapReduceUtil.java


注:本文中的org.apache.hadoop.conf.Configuration.setStrings方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。