当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.setDefaultUri方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.setDefaultUri方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.setDefaultUri方法的具体用法?Java FileSystem.setDefaultUri怎么用?Java FileSystem.setDefaultUri使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.setDefaultUri方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testThrottler

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testThrottler() throws IOException {
  Configuration conf = new HdfsConfiguration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  long bandwidthPerSec = 1024*1024L;
  final long TOTAL_BYTES =6*bandwidthPerSec; 
  long bytesToSend = TOTAL_BYTES; 
  long start = Time.monotonicNow();
  DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
  long totalBytes = 0L;
  long bytesSent = 1024*512L; // 0.5MB
  throttler.throttle(bytesSent);
  bytesToSend -= bytesSent;
  bytesSent = 1024*768L; // 0.75MB
  throttler.throttle(bytesSent);
  bytesToSend -= bytesSent;
  try {
    Thread.sleep(1000);
  } catch (InterruptedException ignored) {}
  throttler.throttle(bytesToSend);
  long end = Time.monotonicNow();
  assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestBlockReplacement.java

示例2: testThatMatchingRPCandHttpPortsThrowException

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Tests setting the rpc port to the same as the web port to test that 
 * an exception
 * is thrown when trying to re-use the same port
 */
@Test(expected = BindException.class, timeout = 300000)
public void testThatMatchingRPCandHttpPortsThrowException() 
    throws IOException {

  NameNode nameNode = null;
  try {
    Configuration conf = new HdfsConfiguration();
    File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        nameDir.getAbsolutePath());

    Random rand = new Random();
    final int port = 30000 + rand.nextInt(30000);

    // set both of these to the same port. It should fail.
    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port);
    DFSTestUtil.formatNameNode(conf);
    nameNode = new NameNode(conf);
  } finally {
    if (nameNode != null) {
      nameNode.stop();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestValidateConfigurationSettings.java

示例3: testFileSystemCloseAll

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testFileSystemCloseAll() throws Exception {
  Configuration conf = getTestConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  URI address = FileSystem.getDefaultUri(conf);

  try {
    FileSystem.closeAll();

    conf = getTestConfiguration();
    FileSystem.setDefaultUri(conf, address);
    FileSystem.get(conf);
    FileSystem.get(conf);
    FileSystem.closeAll();
  }
  finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDistributedFileSystem.java

示例4: processGeneralOptions

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Modify configuration according user-specified generic options
 * @param conf Configuration to be modified
 * @param line User-specified generic options
 */
private void processGeneralOptions(Configuration conf,
    CommandLine line) throws IOException {
  if (line.hasOption("fs")) {
    FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
  }

  if (line.hasOption("jt")) {
    String optionValue = line.getOptionValue("jt");
    if (optionValue.equalsIgnoreCase("local")) {
      conf.set("mapreduce.framework.name", optionValue);
    }

    conf.set("yarn.resourcemanager.address", optionValue, 
        "from -jt command line option");
  }
  if (line.hasOption("conf")) {
    String[] values = line.getOptionValues("conf");
    for(String value : values) {
      conf.addResource(new Path(value));
    }
  }

  if (line.hasOption('D')) {
    String[] property = line.getOptionValues('D');
    for(String prop : property) {
      String[] keyval = prop.split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1], "from command line");
      }
    }
  }

  if (line.hasOption("libjars")) {
    conf.set("tmpjars", 
             validateFiles(line.getOptionValue("libjars"), conf),
             "from -libjars command line option");
    //setting libjars in client classpath
    URL[] libjars = getLibJars(conf);
    if(libjars!=null && libjars.length>0) {
      conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
      Thread.currentThread().setContextClassLoader(
          new URLClassLoader(libjars, 
              Thread.currentThread().getContextClassLoader()));
    }
  }
  if (line.hasOption("files")) {
    conf.set("tmpfiles", 
             validateFiles(line.getOptionValue("files"), conf),
             "from -files command line option");
  }
  if (line.hasOption("archives")) {
    conf.set("tmparchives", 
              validateFiles(line.getOptionValue("archives"), conf),
              "from -archives command line option");
  }
  conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
  
  // tokensFile
  if(line.hasOption("tokenCacheFile")) {
    String fileName = line.getOptionValue("tokenCacheFile");
    // check if the local file exists
    FileSystem localFs = FileSystem.getLocal(conf);
    Path p = localFs.makeQualified(new Path(fileName));
    if (!localFs.exists(p)) {
        throw new FileNotFoundException("File "+fileName+" does not exist.");
    }
    if(LOG.isDebugEnabled()) {
      LOG.debug("setting conf tokensFile: " + fileName);
    }
    UserGroupInformation.getCurrentUser().addCredentials(
        Credentials.readTokenStorageFile(p, conf));
    conf.set("mapreduce.job.credentials.binary", p.toString(),
             "from -tokenCacheFile command line option");

  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:82,代码来源:GenericOptionsParser.java

示例5: runTestNameNodePorts

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Verify namenode port usage.
 */
public void runTestNameNodePorts(boolean withService) throws Exception {
  NameNode nn = null;
  try {
    nn = startNameNode(withService);

    // start another namenode on the same port
    Configuration conf2 = new HdfsConfiguration(config);
    conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        fileAsURI(new File(hdfsDir, "name2")).toString());
    DFSTestUtil.formatNameNode(conf2);
    boolean started = canStartNameNode(conf2);
    assertFalse(started); // should fail

    // start on a different main port
    FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
    started = canStartNameNode(conf2);
    assertFalse(started); // should fail again

    // reset conf2 since NameNode modifies it
    FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
    // different http port
    conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
    started = canStartNameNode(conf2);

    if (withService) {
      assertFalse("Should've failed on service port", started);

      // reset conf2 since NameNode modifies it
      FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
      conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
      // Set Service address      
      conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,  THIS_HOST);
      started = canStartNameNode(conf2);        
    }
    assertTrue(started);
  } finally {
    stopNameNode(nn);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestHDFSServerPorts.java

示例6: configureWordCount

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
static void configureWordCount(FileSystem fs, JobConf conf, String input,
    int numMaps, int numReduces, Path inDir, Path outDir) throws IOException {
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  DataOutputStream file = fs.create(new Path(inDir, "part-0"));
  file.writeBytes(input);
  file.close();
  FileSystem.setDefaultUri(conf, fs.getUri());
  conf.set(JTConfig.FRAMEWORK_NAME, JTConfig.YARN_FRAMEWORK_NAME);
  conf.setJobName("wordcount");
  conf.setInputFormat(TextInputFormat.class);
  
  // the keys are words (strings)
  conf.setOutputKeyClass(Text.class);
  // the values are counts (ints)
  conf.setOutputValueClass(IntWritable.class);
  
  conf.set("mapred.mapper.class", "testjar.ClassWordCount$MapClass");        
  conf.set("mapred.combine.class", "testjar.ClassWordCount$Reduce");
  conf.set("mapred.reducer.class", "testjar.ClassWordCount$Reduce");
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
  //set the tests jar file
  conf.setJarByClass(TestMiniMRClasspath.class);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestMiniMRClasspath.java

示例7: MiniMRCluster

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
    int numTaskTrackers, String namenode, int numDir, String[] racks,
    String[] hosts, UserGroupInformation ugi, JobConf conf,
    int numTrackerToExclude, Clock clock) throws IOException {
  if (conf == null) conf = new JobConf();
  FileSystem.setDefaultUri(conf, namenode);
  String identifier = this.getClass().getSimpleName() + "_"
      + Integer.toString(new Random().nextInt(Integer.MAX_VALUE));
  mrClientCluster = MiniMRClientClusterFactory.create(this.getClass(),
      identifier, numTaskTrackers, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:MiniMRCluster.java

示例8: setUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  config = new Configuration();
  if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
    throw new IOException("Could not delete hdfs directory '" + DFS_BASE_DIR +
                          "'");
  }
  
  // Test has multiple name directories.
  // Format should not really prompt us if one of the directories exist,
  // but is empty. So in case the test hangs on an input, it means something
  // could be wrong in the format prompting code. (HDFS-1636)
  LOG.info("hdfsdir is " + DFS_BASE_DIR.getAbsolutePath());
  File nameDir1 = new File(DFS_BASE_DIR, "name1");
  File nameDir2 = new File(DFS_BASE_DIR, "name2");

  // To test multiple directory handling, we pre-create one of the name directories.
  nameDir1.mkdirs();

  // Set multiple name directories.
  config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
  config.set(DFS_DATANODE_DATA_DIR_KEY, new File(DFS_BASE_DIR, "data").getPath());

  config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(DFS_BASE_DIR, "secondary").getPath());

  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestAllowFormat.java

示例9: startCluster

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "target/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      StringUtils.join(",",
          YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
      + File.pathSeparator + classpathDir;
  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

  // so the minicluster conf is avail to the containers.
  Writer writer = new FileWriter(classpathDir + "/core-site.xml");
  mrCluster.getConfig().writeXml(writer);
  writer.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestEncryptedShuffle.java

示例10: setUpNameDirs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Before
public void setUpNameDirs() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  
  hdfsDir.mkdirs();
  path1 = new File(hdfsDir, "name1");
  path2 = new File(hdfsDir, "name2");
  path3 = new File(hdfsDir, "name3");
  
  path1.mkdir(); path2.mkdir(); path3.mkdir();
  if(!path2.exists() ||  !path3.exists() || !path1.exists()) {
    throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath());
  }
  
  String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath());
  System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() + 
      "; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath());
  
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir);
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath());

  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
 
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
  
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  
  // set the restore feature on
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestStorageRestore.java

示例11: getCustomSocketConfigs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private Configuration getCustomSocketConfigs(final int nameNodePort) {
  // Get another reference via network using a specific socket factory
  Configuration cconf = new Configuration();
  FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/",
      nameNodePort + 10));
  cconf.set("hadoop.rpc.socket.factory.class.default",
      "org.apache.hadoop.ipc.DummySocketFactory");
  cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
      "org.apache.hadoop.ipc.DummySocketFactory");
  cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
      "org.apache.hadoop.ipc.DummySocketFactory");
  return cconf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestMRCJCSocketFactory.java

示例12: TestFileArgs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public TestFileArgs() throws IOException
{
  // Set up mini cluster
  conf = new Configuration();
  dfs = new MiniDFSCluster.Builder(conf).build();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().getAuthority();
  mr  = new MiniMRCluster(1, namenode, 1);

  map = LS_PATH;
  FileSystem.setDefaultUri(conf, "hdfs://" + namenode);
  setTestDir(new File("/tmp/TestFileArgs"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestFileArgs.java

示例13: setUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  // We want to count additional events, so we reset here
  mockQueueConstructions = 0;
  mockQueuePuts = 0;
  int portRetries = 5;
  int nnPort;

  for (; portRetries > 0; --portRetries) {
    // Pick a random port in the range [30000,60000).
    nnPort = 30000 + rand.nextInt(30000);  
    config = new Configuration();
    callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
    config.setClass(callQueueConfigKey,
        MockCallQueue.class, BlockingQueue.class);
    config.set("hadoop.security.authorization", "true");

    FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
    fs = FileSystem.get(config);
    
    try {
      cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
      cluster.waitActive();
      break;
    } catch (BindException be) {
      // Retry with a different port number.
    }
  }
  
  if (portRetries == 0) {
    // Bail if we get very unlucky with our choice of ports.
    fail("Failed to pick an ephemeral port for the NameNode RPC server.");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestRefreshCallQueue.java

示例14: setUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  config.set("hadoop.security.auth_to_local",
      "RULE:[2:[email protected]$0]([email protected]*FOO.COM)s/@.*//" + "DEFAULT");
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
  cluster.waitActive();
  dtSecretManager = NameNodeAdapter.getDtSecretManager(
      cluster.getNamesystem());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestDelegationToken.java

示例15: testThatDifferentRPCandHttpPortsAreOK

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Tests setting the rpc port to a different as the web port that an 
 * exception is NOT thrown 
 */
@Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK() 
    throws IOException {

  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());

  Random rand = new Random();

  // A few retries in case the ports we choose are in use.
  for (int i = 0; i < 5; ++i) {
    final int port1 = 30000 + rand.nextInt(10000);
    final int port2 = port1 + 1 + rand.nextInt(10000);

    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
    DFSTestUtil.formatNameNode(conf);
    NameNode nameNode = null;

    try {
      nameNode = new NameNode(conf); // should be OK!
      break;
    } catch(BindException be) {
      continue;     // Port in use? Try another.
    } finally {
      if (nameNode != null) {
        nameNode.stop();
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestValidateConfigurationSettings.java


注:本文中的org.apache.hadoop.fs.FileSystem.setDefaultUri方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。