当前位置: 首页>>代码示例>>Java>>正文


Java Configuration.set方法代码示例

本文整理汇总了Java中org.apache.hadoop.conf.Configuration.set方法的典型用法代码示例。如果您正苦于以下问题:Java Configuration.set方法的具体用法?Java Configuration.set怎么用?Java Configuration.set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.conf.Configuration的用法示例。


在下文中一共展示了Configuration.set方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: T_initialize_3

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void T_initialize_3() throws SerDeException{
  MDSSerde serde = new MDSSerde();
  Configuration conf = new Configuration();
  conf.set( ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR , "num" );

  Properties table = new Properties();
  Properties part = new Properties();
  table.setProperty( serdeConstants.LIST_COLUMNS , "str,num,arry,nest" );
  table.setProperty( serdeConstants.LIST_COLUMN_TYPES , "string,int,array<string>,struct<a:string,b:int>" );

  serde.initialize( conf , table , part );
  StructObjectInspector inspector = (StructObjectInspector)( serde.getObjectInspector() );
  List<? extends StructField> fieldList = inspector.getAllStructFieldRefs();
  assertEquals( fieldList.get(0).getFieldName() , "num" );

  assertEquals( ( fieldList.get(0).getFieldObjectInspector() instanceof PrimitiveObjectInspector ) , true );
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:19,代码来源:TestMDSSerde.java

示例2: testClusterSetDatanodeHostname

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */
@Test(timeout=100000)
public void testClusterSetDatanodeHostname() throws Throwable {
  assumeTrue(System.getProperty("os.name").startsWith("Linux"));
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
  File testDataCluster5 = new File(testDataPath, CLUSTER_5);
  String c5Path = testDataCluster5.getAbsolutePath();
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
  MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
    .numDataNodes(1)
    .checkDataNodeHostConfig(true)
    .build();
  try {
    assertEquals("DataNode hostname config not respected", "MYHOST",
        cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
  } finally {
    MiniDFSCluster.shutdownCluster(cluster5);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestMiniDFSCluster.java

示例3: testAuthorityFromDefaultFS

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testAuthorityFromDefaultFS() throws Exception {
  Configuration config = new Configuration();
  String defaultFsKey = CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
  
  FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123", config);
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);

  config.set(defaultFsKey, "myfs://host");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, true);

  config.set(defaultFsKey, "myfs2://host");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);

  config.set(defaultFsKey, "myfs://host:123");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, true);

  config.set(defaultFsKey, "myfs://host:456");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:TestFileSystemCanonicalization.java

示例4: refreshCallQueue

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:DFSAdmin.java

示例5: testDeprecatedUmask

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public void testDeprecatedUmask() {
  Configuration conf = new Configuration();
  conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "302"); // 302 = 0456
  FsPermission umask = FsPermission.getUMask(conf);

  assertEquals(0456, umask.toShort());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestFsPermission.java

示例6: getHttpFSFileSystem

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
protected FileSystem getHttpFSFileSystem() throws Exception {
  Configuration conf = new Configuration();
  conf.set("fs.webhdfs.impl", getFileSystemClass().getName());
  URI uri = new URI(getScheme() + "://" +
                    TestJettyHelper.getJettyURL().toURI().getAuthority());
  return FileSystem.get(uri, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:BaseTestHttpFSWith.java

示例7: testCommitter

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  Job job = Job.getInstance();
  FileOutputFormat.setOutputPath(job, outDir);
  Configuration conf = job.getConfiguration();
  conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
  JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
  FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

  // setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);

  // write output
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
  writeOutput(theRecordWriter, tContext);

  // do commit
  committer.commitTask(tContext);
  committer.commitJob(jContext);

  // validate output
  File expectedFile = new File(new Path(outDir, partFile).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestMRCJCFileOutputCommitter.java

示例8: FileSystemPlugin

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public FileSystemPlugin(FileSystemConfig config, DrillbitContext context, String name) throws ExecutionSetupException{
  try {
    this.config = config;
    this.context = context;

    fsConf = new Configuration();
    fsConf.set(FileSystem.FS_DEFAULT_NAME_KEY, config.connection);
    fsConf.set("fs.classpath.impl", ClassPathFileSystem.class.getName());
    fsConf.set("fs.drill-local.impl", LocalSyncableFileSystem.class.getName());

    formatPluginsByName = FormatCreator.getFormatPlugins(context, fsConf, config);
    List<FormatMatcher> matchers = Lists.newArrayList();
    formatPluginsByConfig = Maps.newHashMap();
    for (FormatPlugin p : formatPluginsByName.values()) {
      matchers.add(p.getMatcher());
      formatPluginsByConfig.put(p.getConfig(), p);
    }

    final boolean noWorkspace = config.workspaces == null || config.workspaces.isEmpty();
    List<WorkspaceSchemaFactory> factories = Lists.newArrayList();
    if (!noWorkspace) {
      for (Map.Entry<String, WorkspaceConfig> space : config.workspaces.entrySet()) {
        factories.add(new WorkspaceSchemaFactory(context.getConfig(), this, space.getKey(), name, space.getValue(), matchers));
      }
    }

    // if the "default" workspace is not given add one.
    if (noWorkspace || !config.workspaces.containsKey(DEFAULT_WS_NAME)) {
      factories.add(new WorkspaceSchemaFactory(context.getConfig(), this, DEFAULT_WS_NAME, name, WorkspaceConfig.DEFAULT, matchers));
    }

    this.schemaFactory = new FileSystemSchemaFactory(name, factories);
  } catch (IOException e) {
    throw new ExecutionSetupException("Failure setting up file system plugin.", e);
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:37,代码来源:FileSystemPlugin.java

示例9: testValidKeyProvider

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testValidKeyProvider() throws Exception {
  Configuration conf = new Configuration();
  String account = "testacct";
  String key = "testkey";

  conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
  conf.setClass("fs.azure.account.keyprovider." + account,
      SimpleKeyProvider.class, KeyProvider.class);
  String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
      account, conf);
  assertEquals(key, result);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestWasbUriAndConfiguration.java

示例10: testCheckPointDirsAreTrimmed

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
 * should tolerate white space between values.
 */
@Test
public void testCheckPointDirsAreTrimmed() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File checkpointNameDir1 = new File(base_dir, "chkptName1");
  File checkpointEditsDir1 = new File(base_dir, "chkptEdits1");
  File checkpointNameDir2 = new File(base_dir, "chkptName2");
  File checkpointEditsDir2 = new File(base_dir, "chkptEdits2");
  File nameDir = new File(base_dir, "name1");
  String whiteSpace = "  \n   \n  ";
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath());
  conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace
      + checkpointNameDir1.getPath() + whiteSpace, whiteSpace
      + checkpointNameDir2.getPath() + whiteSpace);
  conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
      whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace
          + checkpointEditsDir2.getPath() + whiteSpace);
  cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
      .numDataNodes(3).build();
  try {
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    secondary.doCheckpoint();
    assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
        checkpointNameDir1.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
        checkpointNameDir2.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
        + " must be trimmed ", checkpointEditsDir1.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
        + " must be trimmed ", checkpointEditsDir2.exists());
  } finally {
    secondary.shutdown();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestNameEditsConfigs.java

示例11: startServer

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
public void startServer(boolean isTestSessionCookie) throws Exception {
  Configuration conf = new Configuration();
  if (isTestSessionCookie) {
    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
          DummyFilterInitializer.class.getName());
  } else {
    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
          Dummy2FilterInitializer.class.getName());
  }

  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);

  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  Configuration sslConf = KeyStoreTestUtil.getSslConfig();

  server = new HttpServer2.Builder()
          .setName("test")
          .addEndpoint(new URI("http://localhost"))
          .addEndpoint(new URI("https://localhost"))
          .setConf(conf)
          .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
          .keyStore(sslConf.get("ssl.server.keystore.location"),
                  sslConf.get("ssl.server.keystore.password"),
                  sslConf.get("ssl.server.keystore.type", "jks"))
          .trustStore(sslConf.get("ssl.server.truststore.location"),
                  sslConf.get("ssl.server.truststore.password"),
                  sslConf.get("ssl.server.truststore.type", "jks")).build();
  server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
  server.start();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:35,代码来源:TestAuthenticationSessionCookie.java

示例12: addPlugin

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Add the specified plugin class name to the configuration string
 * listing plugin classes.
 */
private static void addPlugin(Configuration conf, String pluginName) {
  String existingPlugins = conf.get(TOOL_PLUGINS_KEY);
  String newPlugins = null;
  if (null == existingPlugins || existingPlugins.length() == 0) {
    newPlugins = pluginName;
  } else {
    newPlugins = existingPlugins + "," + pluginName;
  }

  conf.set(TOOL_PLUGINS_KEY, newPlugins);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:16,代码来源:SqoopTool.java

示例13: testGetComparator

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/** Test that comparator is defined and configured. */
public static void testGetComparator() throws Exception {
  Configuration conf = new Configuration();

  // Without conf.
  WritableComparator frobComparator = WritableComparator.get(Frob.class);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));

  // With conf.
  conf.set(TEST_CONFIG_PARAM, TEST_CONFIG_VALUE);
  frobComparator = WritableComparator.get(Frob.class, conf);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);

  // Without conf. should reuse configuration.
  frobComparator = WritableComparator.get(Frob.class);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);

  // New conf. should use new configuration.
  frobComparator = WritableComparator.get(Frob.class, new Configuration());
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestWritable.java

示例14: testScan

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
/**
 * Tests a MR scan using specific start and stop rows.
 *
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
private void testScan(String start, String stop, String last)
    throws IOException, InterruptedException, ClassNotFoundException {
  String jobName =
      "Scan" + (start != null ? start.toUpperCase() : "Empty") + "To" +
          (stop != null ? stop.toUpperCase() : "Empty");
  LOG.info("Before map/reduce startup - job " + jobName);
  Configuration c = new Configuration(TEST_UTIL.getConfiguration());
  
  c.set(KEY_STARTROW, start != null ? start : "");
  c.set(KEY_LASTROW, last != null ? last : "");
  
  List<Scan> scans = new ArrayList<Scan>();
  
  for(int i=0; i<3; i++){
    Scan scan = new Scan();
    
    scan.addFamily(INPUT_FAMILY);
    scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(TABLE_NAME + i));
    
    if (start != null) {
      scan.setStartRow(Bytes.toBytes(start));
    }
    if (stop != null) {
      scan.setStopRow(Bytes.toBytes(stop));
    }
    
    scans.add(scan);
    
    LOG.info("scan before: " + scan);
  }
  
  Job job = new Job(c, jobName);

  TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class,
      ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
  job.setReducerClass(ScanReducer.class);
  job.setNumReduceTasks(1); // one to get final "first" and "last" key
  FileOutputFormat.setOutputPath(job,
    new Path(TEST_UTIL.getDataTestDirOnTestFS(), job.getJobName()));
  LOG.info("Started " + job.getJobName());
  job.waitForCompletion(true);
  assertTrue(job.isSuccessful());
  LOG.info("After map/reduce completion - job " + jobName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:TestMultiTableInputFormat.java

示例15: testPreserveStatus

import org.apache.hadoop.conf.Configuration; //导入方法依赖的package包/类
@Test
public void testPreserveStatus() {
  TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
  JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
      taskAttemptContext.getTaskAttemptID().getJobID());
  Configuration conf = jobContext.getConfiguration();


  String sourceBase;
  String targetBase;
  FileSystem fs = null;
  try {
    OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
    fs = FileSystem.get(conf);
    FsPermission sourcePerm = new FsPermission((short) 511);
    FsPermission initialPerm = new FsPermission((short) 448);
    sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
    targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);

    DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)),
        new Path("/out"));
    options.preserve(FileAttribute.PERMISSION);
    options.appendToConf(conf);
    options.setTargetPathExists(false);
    
    CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
    Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
    listing.buildListing(listingFile, options);

    conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);

    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

    //Test for idempotent commit
    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

  } catch (IOException e) {
    LOG.error("Exception encountered while testing for preserve status", e);
    Assert.fail("Preserve status failure");
  } finally {
    TestDistCpUtils.delete(fs, "/tmp1");
    conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestCopyCommitter.java


注:本文中的org.apache.hadoop.conf.Configuration.set方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。