当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.getDefaultUri方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getDefaultUri方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getDefaultUri方法的具体用法?Java FileSystem.getDefaultUri怎么用?Java FileSystem.getDefaultUri使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.getDefaultUri方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFileSystemCloseAll

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testFileSystemCloseAll() throws Exception {
  Configuration conf = getTestConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  URI address = FileSystem.getDefaultUri(conf);

  try {
    FileSystem.closeAll();

    conf = getTestConfiguration();
    FileSystem.setDefaultUri(conf, address);
    FileSystem.get(conf);
    FileSystem.get(conf);
    FileSystem.closeAll();
  }
  finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDistributedFileSystem.java

示例2: testHarUriWithHaUriWithNoPort

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);
    
    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);
    
    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestHarFileSystemWithHA.java

示例3: reconstructAuthorityIfNeeded

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Puts in the authority of the default file system if it is a WASB file
 * system and the given URI's authority is null.
 * 
 * @return The URI with reconstructed authority if necessary and possible.
 */
private static URI reconstructAuthorityIfNeeded(URI uri, Configuration conf) {
  if (null == uri.getAuthority()) {
    // If WASB is the default file system, get the authority from there
    URI defaultUri = FileSystem.getDefaultUri(conf);
    if (defaultUri != null && isWasbScheme(defaultUri.getScheme())) {
      try {
        // Reconstruct the URI with the authority from the default URI.
        return new URI(uri.getScheme(), defaultUri.getAuthority(),
            uri.getPath(), uri.getQuery(), uri.getFragment());
      } catch (URISyntaxException e) {
        // This should never happen.
        throw new Error("Bad URI construction", e);
      }
    }
  }
  return uri;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NativeAzureFileSystem.java

示例4: getInfoServer

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Returns the Jetty server that the Namenode is listening on.
 */
private URL getInfoServer() throws IOException {
  URI fsName = FileSystem.getDefaultUri(conf);
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
    throw new IOException("This is not a DFS");
  }

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
      scheme);
  LOG.debug("Will connect to NameNode at " + address);
  return address.toURL();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:SecondaryNameNode.java

示例5: testReadWrite

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testReadWrite() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  // Create a base file for comparison
  final Path baseFile = new Path("/base");
  final int len = 8192;
  DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
  // Create the first enc file
  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Roll the key of the encryption zone
  assertNumZones(1);
  String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
  cluster.getNamesystem().getProvider().rollNewVersion(keyName);
  // Read them back in and compare byte-by-byte
  verifyFilesEqual(fs, baseFile, encFile1, len);
  // Write a new enc file and validate
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  // FEInfos should be different
  FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
  FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
  assertFalse("EDEKs should be different", Arrays
      .equals(feInfo1.getEncryptedDataEncryptionKey(),
          feInfo2.getEncryptedDataEncryptionKey()));
  assertNotEquals("Key was rolled, versions should be different",
      feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
  // Contents still equal
  verifyFilesEqual(fs, encFile1, encFile2, len);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestEncryptionZones.java

示例6: testHdfsAdminSetQuota

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test that we can set and clear quotas via {@link HdfsAdmin}.
 */
@Test
public void testHdfsAdminSetQuota() throws Exception {
  HdfsAdmin dfsAdmin = new HdfsAdmin(
      FileSystem.getDefaultUri(conf), conf);
  FileSystem fs = null;
  try {
    fs = FileSystem.get(conf);
    assertTrue(fs.mkdirs(TEST_PATH));
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.setSpaceQuota(TEST_PATH, 10);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.setQuota(TEST_PATH, 10);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(10, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.clearSpaceQuota(TEST_PATH);
    assertEquals(10, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
    
    dfsAdmin.clearQuota(TEST_PATH);
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getQuota());
    assertEquals(-1, fs.getContentSummary(TEST_PATH).getSpaceQuota());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestHdfsAdmin.java

示例7: doArchive

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * doArchive: Workhorse function to archive log-files.
 * @param logListURI : The uri which will serve list of log-files to archive.
 * @param archiveDirectory : The directory to store archived logfiles.
 * @throws IOException
 */
public void	
  doArchive(String logListURI, String archiveDirectory)
  throws IOException
{
  String destURL = FileSystem.getDefaultUri(fsConfig) + archiveDirectory;
  DistCpV1.copy(new JobConf(fsConfig), logListURI, destURL, null, true, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:Logalyzer.java

示例8: getNNAddress

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static InetSocketAddress getNNAddress(Configuration conf) {
  URI filesystemURI = FileSystem.getDefaultUri(conf);
  return getNNAddressCheckLogical(conf, filesystemURI);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:5,代码来源:NuCypherExtUtilClient.java

示例9: getAddress

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static InetSocketAddress getAddress(Configuration conf) {
  URI filesystemURI = FileSystem.getDefaultUri(conf);
  return getAddress(filesystemURI);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:NameNode.java

示例10: testReadWriteUsingWebHdfs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testReadWriteUsingWebHdfs() throws Exception {
  final HdfsAdmin dfsAdmin =
      new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
  final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
      WebHdfsFileSystem.SCHEME);

  final Path zone = new Path("/zone");
  fs.mkdirs(zone);
  dfsAdmin.createEncryptionZone(zone, TEST_KEY);

  /* Create an unencrypted file for comparison purposes. */
  final Path unencFile = new Path("/unenc");
  final int len = 8192;
  DFSTestUtil.createFile(webHdfsFs, unencFile, len, (short) 1, 0xFEED);

  /*
   * Create the same file via webhdfs, but this time encrypted. Compare it
   * using both webhdfs and DFS.
   */
  final Path encFile1 = new Path(zone, "myfile");
  DFSTestUtil.createFile(webHdfsFs, encFile1, len, (short) 1, 0xFEED);
  verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
  verifyFilesEqual(fs, unencFile, encFile1, len);

  /*
   * Same thing except this time create the encrypted file using DFS.
   */
  final Path encFile2 = new Path(zone, "myfile2");
  DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
  verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
  verifyFilesEqual(fs, unencFile, encFile2, len);

  /* Verify appending to files works correctly. */
  appendOneByte(fs, unencFile);
  appendOneByte(webHdfsFs, encFile1);
  appendOneByte(fs, encFile2);
  verifyFilesEqual(webHdfsFs, unencFile, encFile1, len);
  verifyFilesEqual(fs, unencFile, encFile1, len);
  verifyFilesEqual(webHdfsFs, unencFile, encFile2, len);
  verifyFilesEqual(fs, unencFile, encFile2, len);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestEncryptionZones.java

示例11: testHftpAccessControl

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void testHftpAccessControl() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    final UserGroupInformation DFS_UGI = createUGI("dfs", true); 
    final UserGroupInformation USER_UGI = createUGI("user", false); 

    //start cluster by DFS_UGI
    final Configuration dfsConf = new Configuration();
    cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
    cluster.waitActive();

    final String httpAdd = dfsConf.get("dfs.http.address");
    final URI nnURI = FileSystem.getDefaultUri(dfsConf);
    final String nnUri = nnURI.toString();
    FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, dfsConf);
      }
    });
    final Path home = 
      createHomeDirectory(fs1, USER_UGI);
    
    //now, login as USER_UGI
    final Configuration userConf = new Configuration();
    final FileSystem fs = 
      USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, userConf);
      }
    });
    
    final Path srcrootpath = new Path(home, "src_root"); 
    final String srcrootdir =  srcrootpath.toString();
    final Path dstrootpath = new Path(home, "dst_root"); 
    final String dstrootdir =  dstrootpath.toString();
    final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCpV1>() {
      public DistCpV1 run() {
        return new DistCpV1(userConf);
      }
    });

    FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
    final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};

    { //copy with permission 000, should fail
      fs.setPermission(srcrootpath, new FsPermission((short)0));
      USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
          assertEquals(-3, ToolRunner.run(distcp, args));
          return null;
        }
      });
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestCopyFiles.java

示例12: testDelete

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** test -delete */
public void testDelete() throws Exception {
  final Configuration conf = new Configuration();
  conf.setInt("fs.trash.interval", 60);
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final URI nnURI = FileSystem.getDefaultUri(conf);
    final String nnUri = nnURI.toString();
    final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);

    final DistCpV1 distcp = new DistCpV1(conf);
    final FsShell shell = new FsShell(conf);  

    final String srcrootdir = "/src_root";
    final String dstrootdir = "/dst_root";

    {
      //create source files
      createFiles(nnURI, srcrootdir);
      String srcresults = execCmd(shell, "-lsr", srcrootdir);
      srcresults = removePrefix(srcresults, srcrootdir);
      System.out.println("srcresults=" +  srcresults);

      //create some files in dst
      createFiles(nnURI, dstrootdir);
      System.out.println("dstrootdir=" +  dstrootdir);
      shell.run(new String[]{"-lsr", dstrootdir});

      //run distcp
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log",
                       nnUri+srcrootdir, nnUri+dstrootdir});

      //make sure src and dst contains the same files
      String dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("first dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);

      //create additional file in dst
      create(fs, new Path(dstrootdir, "foo"));
      create(fs, new Path(dstrootdir, "foobar"));

      //run distcp again
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log2",
                       nnUri+srcrootdir, nnUri+dstrootdir});
      
      //make sure src and dst contains the same files
      dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("second dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);
      // verify that files removed in -delete were moved to the trash
      // regrettably, this test will break if Trash changes incompatibly
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foo")));
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foobar")));

      //cleanup
      deldir(fs, dstrootdir);
      deldir(fs, srcrootdir);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:TestCopyFiles.java


注:本文中的org.apache.hadoop.fs.FileSystem.getDefaultUri方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。