當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration類代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration的典型用法代碼示例。如果您正苦於以下問題:Java Configuration類的具體用法?Java Configuration怎麽用?Java Configuration使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


Configuration類屬於org.apache.hadoop.conf包,在下文中一共展示了Configuration類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: createDBRecordReader

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
@Override
/** {@inheritDoc} */
protected RecordReader<LongWritable, T> createDBRecordReader(
    DBInputSplit split, Configuration conf) throws IOException {

  DBConfiguration dbConf = getDBConf();
  Class<T> inputClass = (Class<T>) (dbConf.getInputClass());
  String dbProductName = getDBProductName();
  LOG.debug("Creating db record reader for db product: " + dbProductName);

  try {
    return new SQLServerDBRecordReader<T>(split, inputClass,
        conf, getConnection(), dbConf, dbConf.getInputConditions(),
        dbConf.getInputFieldNames(), dbConf.getInputTableName(),
        dbProductName);
  } catch (SQLException ex) {
    throw new IOException(ex);
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:20,代碼來源:SQLServerDBInputFormat.java

示例2: open

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
開發者ID:Transwarp-DE,項目名稱:Transwarp-Sample-Code,代碼行數:23,代碼來源:HDFSSequenceFile.java

示例3: init

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
@Override
public void init(Configuration configuration, RMProxy<T> rmProxy,
                  Class<T> protocol) {
  this.rmProxy = rmProxy;
  this.protocol = protocol;
  this.rmProxy.checkAllowedProtocols(this.protocol);
  this.conf = new YarnConfiguration(configuration);
  Collection<String> rmIds = HAUtil.getRMHAIds(conf);
  this.rmServiceIds = rmIds.toArray(new String[rmIds.size()]);
  conf.set(YarnConfiguration.RM_HA_ID, rmServiceIds[currentProxyIndex]);

  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES,
          YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES));

  conf.setInt(CommonConfigurationKeysPublic.
      IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      conf.getInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS,
          YarnConfiguration.DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:ConfiguredRMFailoverProxyProvider.java

示例4: getRecordWriter

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
/**
 * 定義每條數據的輸出格式,輸入數據是由reduce任務每次執行write方法輸出的數據
 */
@Override
public RecordWriter<BaseDimension, BaseStatsValueWritable> getRecordWriter(TaskAttemptContext context)
		throws IOException, InterruptedException {
	Configuration conf = context.getConfiguration();
	Connection conn = null;
	IDimensionConverter converter = new DimensionConverterImpl();
	try {
		conn = JdbcManager.getConnection(conf, GlobalConstants.WAREHOUSE_OF_REPORT);
		conn.setAutoCommit(false);
	} catch (SQLException e) {
		logger.error("獲取數據庫連接失敗", e);
		throw new IOException("獲取數據庫連接失敗", e);
	}
	return new TransformerRecordWriter(conn, conf, converter);
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:19,代碼來源:TransformerOutputFormat.java

示例5: initDirectoryScanner

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
/**
 * See {@link DirectoryScanner}
 */
private synchronized void initDirectoryScanner(Configuration conf) {
  if (directoryScanner != null) {
    return;
  }
  String reason = null;
  if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 
                  DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
    reason = "verification is turned off by configuration";
  } else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
    reason = "verifcation is not supported by SimulatedFSDataset";
  } 
  if (reason == null) {
    directoryScanner = new DirectoryScanner(this, data, conf);
    directoryScanner.start();
  } else {
    LOG.info("Periodic Directory Tree Verification scan is disabled because " +
                 reason);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:DataNode.java

示例6: testFailoverToNonExistantServiceFails

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
@Test
public void testFailoverToNonExistantServiceFails() throws Exception {
  DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
  DummyHAService svc2 = spy(new DummyHAService(null, svc2Addr));
  Mockito.doThrow(new IOException("Failed to connect"))
    .when(svc2).getProxy(Mockito.<Configuration>any(),
        Mockito.anyInt());
  svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());

  try {
    doFailover(svc1, svc2, false, false);
    fail("Failed over to a non-existant standby");
  } catch (FailoverFailedException ffe) {
    // Expected
  }

  assertEquals(HAServiceState.ACTIVE, svc1.state);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:19,代碼來源:TestFailoverController.java

示例7: testDelegationTokenWithFS

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
@SuppressWarnings("deprecation")
private void testDelegationTokenWithFS(Class fileSystemClass)
  throws Exception {
  createHttpFSServer();
  Configuration conf = new Configuration();
  conf.set("fs.webhdfs.impl", fileSystemClass.getName());
  conf.set("fs.hdfs.impl.disable.cache", "true");
  URI uri = new URI( "webhdfs://" +
                     TestJettyHelper.getJettyURL().toURI().getAuthority());
  FileSystem fs = FileSystem.get(uri, conf);
  Token<?> tokens[] = fs.addDelegationTokens("foo", null);
  fs.close();
  Assert.assertEquals(1, tokens.length);
  fs = FileSystem.get(uri, conf);
  ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
  fs.listStatus(new Path("/"));
  fs.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:TestHttpFSWithKerberos.java

示例8: initializeStorageStateConf

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
/**
 * Initialize {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} and 
 * {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} with the specified 
 * number of directory entries. Also initialize dfs.blockreport.intervalMsec.
 */
public static Configuration initializeStorageStateConf(int numDirs,
                                                       Configuration conf) {
  StringBuffer nameNodeDirs =
    new StringBuffer(new File(TEST_ROOT_DIR, "name1").toString());
  StringBuffer dataNodeDirs =
    new StringBuffer(new File(TEST_ROOT_DIR, "data1").toString());
  for (int i = 2; i <= numDirs; i++) {
    nameNodeDirs.append("," + new File(TEST_ROOT_DIR, "name"+i));
    dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
  }
  if (conf == null) {
    conf = new HdfsConfiguration();
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameNodeDirs.toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameNodeDirs.toString());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDirs.toString());
  conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000);
  return conf;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:UpgradeUtilities.java

示例9: MockContainer

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
public MockContainer(ApplicationAttemptId appAttemptId,
    Dispatcher dispatcher, Configuration conf, String user,
    ApplicationId appId, int uniqId) throws IOException{

  this.user = user;
  this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
  this.id = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId,
      uniqId);
  this.launchContext = recordFactory
      .newRecordInstance(ContainerLaunchContext.class);
  long currentTime = System.currentTimeMillis();
  this.containerTokenIdentifier =
      BuilderUtils.newContainerTokenIdentifier(BuilderUtils
        .newContainerToken(id, "127.0.0.1", 1234, user,
          BuilderUtils.newResource(1024, 1), currentTime + 10000, 123,
          "password".getBytes(), currentTime));
  this.state = ContainerState.NEW;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:MockContainer.java

示例10: run

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
@Override public int run(String[] args) throws Exception {
  if (args.length < 1) {
    System.err.println("Usage: Clean <output dir>");
    return -1;
  }

  Path p = new Path(args[0]);
  Configuration conf = getConf();
  TableName tableName = getTableName(conf);
  try (FileSystem fs = HFileSystem.get(conf);
      Connection conn = ConnectionFactory.createConnection(conf);
      Admin admin = conn.getAdmin()) {
    if (admin.tableExists(tableName)) {
      admin.disableTable(tableName);
      admin.deleteTable(tableName);
    }

    if (fs.exists(p)) {
      fs.delete(p, true);
    }
  }

  return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:IntegrationTestBigLinkedList.java

示例11: testContextSpecificServletFilterWhenInitThrowsException

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
/**
 * Similar to the above test case, except that it uses a different API to add
 * the filter. Regression test for HADOOP-8786.
 */
@Test
public void testContextSpecificServletFilterWhenInitThrowsException()
    throws Exception {
  Configuration conf = new Configuration();
  HttpServer2 http = createTestServer(conf);
  HttpServer2.defineFilter(http.webAppContext,
      "ErrorFilter", ErrorFilter.class.getName(),
      null, null);
  try {
    http.start();
    fail("expecting exception");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(
        "Unable to initialize WebAppContext", e);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestServletFilter.java

示例12: setConf

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
public void setConf(Configuration conf) {
  if (conf instanceof JobConf) {
    this.conf = (JobConf) conf;
  } else {
    this.conf = new JobConf(conf);
  }
  this.mapOutputFile = ReflectionUtils.newInstance(
      conf.getClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
        MROutputFiles.class, MapOutputFile.class), conf);
  this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
  // add the static resolutions (this is required for the junit to
  // work on testcases that simulate multiple nodes on a single physical
  // node.
  String hostToResolved[] = conf.getStrings(MRConfig.STATIC_RESOLUTIONS);
  if (hostToResolved != null) {
    for (String str : hostToResolved) {
      String name = str.substring(0, str.indexOf('='));
      String resolvedName = str.substring(str.indexOf('=') + 1);
      NetUtils.addStaticResolution(name, resolvedName);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:Task.java

示例13: setupBeforeClass

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
@BeforeClass
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  Configuration conf = TEST_UTIL.getConfiguration();
  // Up the handlers; this test needs more than usual.
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  enableSecurity(conf);
  verifyConfiguration(conf);

  // We expect 0.98 scanning semantics
  conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false);

  TEST_UTIL.startMiniCluster();
  TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000);

  READER = User.createUserForTesting(conf, "reader", new String[0]);
  LIMITED = User.createUserForTesting(conf, "limited", new String[0]);
  DENIED = User.createUserForTesting(conf, "denied", new String[0]);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestAccessControlFilter.java

示例14: getSplitRatio

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
/**
 * Package private, for testability.
 *
 * @param nMaps The number of maps requested for.
 * @param nRecords The number of records to be copied.
 * @param conf The configuration set by users.
 * @return The number of splits each map should handle, ideally.
 */
static int getSplitRatio(int nMaps, int nRecords, Configuration conf) {
  int maxChunksIdeal = getMaxChunksIdeal(conf);
  int minRecordsPerChunk = getMinRecordsPerChunk(conf);
  int splitRatio = getSplitRatio(conf);

  if (nMaps == 1) {
    LOG.warn("nMaps == 1. Why use DynamicInputFormat?");
    return 1;
  }

  if (nMaps > maxChunksIdeal) {
    return splitRatio;
  }

  int nPickups = (int) Math.ceil((float) maxChunksIdeal / nMaps);
  int nRecordsPerChunk = (int) Math.ceil((float) nRecords / (nMaps * nPickups));

  return nRecordsPerChunk < minRecordsPerChunk ? splitRatio : nPickups;
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:28,代碼來源:DynamicInputFormat.java

示例15: testGetPassword

import org.apache.hadoop.conf.Configuration; //導入依賴的package包/類
@Test
public void testGetPassword() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.set(ReflectiveCredentialProviderClient.CREDENTIAL_PROVIDER_PATH, "jceks://file"
      + new File(UTIL.getDataTestDir().toUri().getPath(), "foo.jks").getCanonicalPath());
  ReflectiveCredentialProviderClient client = new ReflectiveCredentialProviderClient();
  if (client.isHadoopCredentialProviderAvailable()) {
    char[] keyPass = { 'k', 'e', 'y', 'p', 'a', 's', 's' };
    char[] storePass = { 's', 't', 'o', 'r', 'e', 'p', 'a', 's', 's' };
    client.createEntry(conf, "ssl.keypass.alias", keyPass);
    client.createEntry(conf, "ssl.storepass.alias", storePass);

    String keypass = HBaseConfiguration.getPassword(conf, "ssl.keypass.alias", null);
    assertEquals(keypass, new String(keyPass));

    String storepass = HBaseConfiguration.getPassword(conf, "ssl.storepass.alias", null);
    assertEquals(storepass, new String(storePass));
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestHBaseConfiguration.java


注:本文中的org.apache.hadoop.conf.Configuration類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。