當前位置: 首頁>>代碼示例>>Java>>正文


Java Configuration.set方法代碼示例

本文整理匯總了Java中org.apache.hadoop.conf.Configuration.set方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.set方法的具體用法?Java Configuration.set怎麽用?Java Configuration.set使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.set方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: T_initialize_3

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void T_initialize_3() throws SerDeException{
  MDSSerde serde = new MDSSerde();
  Configuration conf = new Configuration();
  conf.set( ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR , "num" );

  Properties table = new Properties();
  Properties part = new Properties();
  table.setProperty( serdeConstants.LIST_COLUMNS , "str,num,arry,nest" );
  table.setProperty( serdeConstants.LIST_COLUMN_TYPES , "string,int,array<string>,struct<a:string,b:int>" );

  serde.initialize( conf , table , part );
  StructObjectInspector inspector = (StructObjectInspector)( serde.getObjectInspector() );
  List<? extends StructField> fieldList = inspector.getAllStructFieldRefs();
  assertEquals( fieldList.get(0).getFieldName() , "num" );

  assertEquals( ( fieldList.get(0).getFieldObjectInspector() instanceof PrimitiveObjectInspector ) , true );
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:19,代碼來源:TestMDSSerde.java

示例2: testClusterSetDatanodeHostname

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */
@Test(timeout=100000)
public void testClusterSetDatanodeHostname() throws Throwable {
  assumeTrue(System.getProperty("os.name").startsWith("Linux"));
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
  File testDataCluster5 = new File(testDataPath, CLUSTER_5);
  String c5Path = testDataCluster5.getAbsolutePath();
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
  MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
    .numDataNodes(1)
    .checkDataNodeHostConfig(true)
    .build();
  try {
    assertEquals("DataNode hostname config not respected", "MYHOST",
        cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
  } finally {
    MiniDFSCluster.shutdownCluster(cluster5);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:TestMiniDFSCluster.java

示例3: testAuthorityFromDefaultFS

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testAuthorityFromDefaultFS() throws Exception {
  Configuration config = new Configuration();
  String defaultFsKey = CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
  
  FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123", config);
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);

  config.set(defaultFsKey, "myfs://host");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, true);

  config.set(defaultFsKey, "myfs2://host");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);

  config.set(defaultFsKey, "myfs://host:123");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, true);

  config.set(defaultFsKey, "myfs://host:456");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:21,代碼來源:TestFileSystemCanonicalization.java

示例4: refreshCallQueue

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:39,代碼來源:DFSAdmin.java

示例5: testDeprecatedUmask

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void testDeprecatedUmask() {
  Configuration conf = new Configuration();
  conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "302"); // 302 = 0456
  FsPermission umask = FsPermission.getUMask(conf);

  assertEquals(0456, umask.toShort());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TestFsPermission.java

示例6: getHttpFSFileSystem

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
protected FileSystem getHttpFSFileSystem() throws Exception {
  Configuration conf = new Configuration();
  conf.set("fs.webhdfs.impl", getFileSystemClass().getName());
  URI uri = new URI(getScheme() + "://" +
                    TestJettyHelper.getJettyURL().toURI().getAuthority());
  return FileSystem.get(uri, conf);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:BaseTestHttpFSWith.java

示例7: testCommitter

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  Job job = Job.getInstance();
  FileOutputFormat.setOutputPath(job, outDir);
  Configuration conf = job.getConfiguration();
  conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
  JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
  FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

  // setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);

  // write output
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
  writeOutput(theRecordWriter, tContext);

  // do commit
  committer.commitTask(tContext);
  committer.commitJob(jContext);

  // validate output
  File expectedFile = new File(new Path(outDir, partFile).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:37,代碼來源:TestMRCJCFileOutputCommitter.java

示例8: FileSystemPlugin

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public FileSystemPlugin(FileSystemConfig config, DrillbitContext context, String name) throws ExecutionSetupException{
  try {
    this.config = config;
    this.context = context;

    fsConf = new Configuration();
    fsConf.set(FileSystem.FS_DEFAULT_NAME_KEY, config.connection);
    fsConf.set("fs.classpath.impl", ClassPathFileSystem.class.getName());
    fsConf.set("fs.drill-local.impl", LocalSyncableFileSystem.class.getName());

    formatPluginsByName = FormatCreator.getFormatPlugins(context, fsConf, config);
    List<FormatMatcher> matchers = Lists.newArrayList();
    formatPluginsByConfig = Maps.newHashMap();
    for (FormatPlugin p : formatPluginsByName.values()) {
      matchers.add(p.getMatcher());
      formatPluginsByConfig.put(p.getConfig(), p);
    }

    final boolean noWorkspace = config.workspaces == null || config.workspaces.isEmpty();
    List<WorkspaceSchemaFactory> factories = Lists.newArrayList();
    if (!noWorkspace) {
      for (Map.Entry<String, WorkspaceConfig> space : config.workspaces.entrySet()) {
        factories.add(new WorkspaceSchemaFactory(context.getConfig(), this, space.getKey(), name, space.getValue(), matchers));
      }
    }

    // if the "default" workspace is not given add one.
    if (noWorkspace || !config.workspaces.containsKey(DEFAULT_WS_NAME)) {
      factories.add(new WorkspaceSchemaFactory(context.getConfig(), this, DEFAULT_WS_NAME, name, WorkspaceConfig.DEFAULT, matchers));
    }

    this.schemaFactory = new FileSystemSchemaFactory(name, factories);
  } catch (IOException e) {
    throw new ExecutionSetupException("Failure setting up file system plugin.", e);
  }
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:37,代碼來源:FileSystemPlugin.java

示例9: testValidKeyProvider

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testValidKeyProvider() throws Exception {
  Configuration conf = new Configuration();
  String account = "testacct";
  String key = "testkey";

  conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
  conf.setClass("fs.azure.account.keyprovider." + account,
      SimpleKeyProvider.class, KeyProvider.class);
  String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
      account, conf);
  assertEquals(key, result);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:TestWasbUriAndConfiguration.java

示例10: testCheckPointDirsAreTrimmed

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
 * should tolerate white space between values.
 */
@Test
public void testCheckPointDirsAreTrimmed() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File checkpointNameDir1 = new File(base_dir, "chkptName1");
  File checkpointEditsDir1 = new File(base_dir, "chkptEdits1");
  File checkpointNameDir2 = new File(base_dir, "chkptName2");
  File checkpointEditsDir2 = new File(base_dir, "chkptEdits2");
  File nameDir = new File(base_dir, "name1");
  String whiteSpace = "  \n   \n  ";
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath());
  conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace
      + checkpointNameDir1.getPath() + whiteSpace, whiteSpace
      + checkpointNameDir2.getPath() + whiteSpace);
  conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
      whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace
          + checkpointEditsDir2.getPath() + whiteSpace);
  cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
      .numDataNodes(3).build();
  try {
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    secondary.doCheckpoint();
    assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
        checkpointNameDir1.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
        checkpointNameDir2.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
        + " must be trimmed ", checkpointEditsDir1.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
        + " must be trimmed ", checkpointEditsDir2.exists());
  } finally {
    secondary.shutdown();
    cluster.shutdown();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:42,代碼來源:TestNameEditsConfigs.java

示例11: startServer

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void startServer(boolean isTestSessionCookie) throws Exception {
  Configuration conf = new Configuration();
  if (isTestSessionCookie) {
    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
          DummyFilterInitializer.class.getName());
  } else {
    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
          Dummy2FilterInitializer.class.getName());
  }

  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);

  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  Configuration sslConf = KeyStoreTestUtil.getSslConfig();

  server = new HttpServer2.Builder()
          .setName("test")
          .addEndpoint(new URI("http://localhost"))
          .addEndpoint(new URI("https://localhost"))
          .setConf(conf)
          .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
          .keyStore(sslConf.get("ssl.server.keystore.location"),
                  sslConf.get("ssl.server.keystore.password"),
                  sslConf.get("ssl.server.keystore.type", "jks"))
          .trustStore(sslConf.get("ssl.server.truststore.location"),
                  sslConf.get("ssl.server.truststore.password"),
                  sslConf.get("ssl.server.truststore.type", "jks")).build();
  server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
  server.start();
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:35,代碼來源:TestAuthenticationSessionCookie.java

示例12: addPlugin

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Add the specified plugin class name to the configuration string
 * listing plugin classes.
 */
private static void addPlugin(Configuration conf, String pluginName) {
  String existingPlugins = conf.get(TOOL_PLUGINS_KEY);
  String newPlugins = null;
  if (null == existingPlugins || existingPlugins.length() == 0) {
    newPlugins = pluginName;
  } else {
    newPlugins = existingPlugins + "," + pluginName;
  }

  conf.set(TOOL_PLUGINS_KEY, newPlugins);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:16,代碼來源:SqoopTool.java

示例13: testGetComparator

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/** Test that comparator is defined and configured. */
public static void testGetComparator() throws Exception {
  Configuration conf = new Configuration();

  // Without conf.
  WritableComparator frobComparator = WritableComparator.get(Frob.class);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));

  // With conf.
  conf.set(TEST_CONFIG_PARAM, TEST_CONFIG_VALUE);
  frobComparator = WritableComparator.get(Frob.class, conf);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);

  // Without conf. should reuse configuration.
  frobComparator = WritableComparator.get(Frob.class);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);

  // New conf. should use new configuration.
  frobComparator = WritableComparator.get(Frob.class, new Configuration());
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:TestWritable.java

示例14: testScan

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
 * Tests a MR scan using specific start and stop rows.
 *
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
private void testScan(String start, String stop, String last)
    throws IOException, InterruptedException, ClassNotFoundException {
  String jobName =
      "Scan" + (start != null ? start.toUpperCase() : "Empty") + "To" +
          (stop != null ? stop.toUpperCase() : "Empty");
  LOG.info("Before map/reduce startup - job " + jobName);
  Configuration c = new Configuration(TEST_UTIL.getConfiguration());
  
  c.set(KEY_STARTROW, start != null ? start : "");
  c.set(KEY_LASTROW, last != null ? last : "");
  
  List<Scan> scans = new ArrayList<Scan>();
  
  for(int i=0; i<3; i++){
    Scan scan = new Scan();
    
    scan.addFamily(INPUT_FAMILY);
    scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(TABLE_NAME + i));
    
    if (start != null) {
      scan.setStartRow(Bytes.toBytes(start));
    }
    if (stop != null) {
      scan.setStopRow(Bytes.toBytes(stop));
    }
    
    scans.add(scan);
    
    LOG.info("scan before: " + scan);
  }
  
  Job job = new Job(c, jobName);

  TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class,
      ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
  job.setReducerClass(ScanReducer.class);
  job.setNumReduceTasks(1); // one to get final "first" and "last" key
  FileOutputFormat.setOutputPath(job,
    new Path(TEST_UTIL.getDataTestDirOnTestFS(), job.getJobName()));
  LOG.info("Started " + job.getJobName());
  job.waitForCompletion(true);
  assertTrue(job.isSuccessful());
  LOG.info("After map/reduce completion - job " + jobName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:52,代碼來源:TestMultiTableInputFormat.java

示例15: testPreserveStatus

import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testPreserveStatus() {
  TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
  JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
      taskAttemptContext.getTaskAttemptID().getJobID());
  Configuration conf = jobContext.getConfiguration();


  String sourceBase;
  String targetBase;
  FileSystem fs = null;
  try {
    OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
    fs = FileSystem.get(conf);
    FsPermission sourcePerm = new FsPermission((short) 511);
    FsPermission initialPerm = new FsPermission((short) 448);
    sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
    targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);

    DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)),
        new Path("/out"));
    options.preserve(FileAttribute.PERMISSION);
    options.appendToConf(conf);
    options.setTargetPathExists(false);
    
    CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
    Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
    listing.buildListing(listingFile, options);

    conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);

    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

    //Test for idempotent commit
    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

  } catch (IOException e) {
    LOG.error("Exception encountered while testing for preserve status", e);
    Assert.fail("Preserve status failure");
  } finally {
    TestDistCpUtils.delete(fs, "/tmp1");
    conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  }

}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:52,代碼來源:TestCopyCommitter.java


注:本文中的org.apache.hadoop.conf.Configuration.set方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。