当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.get方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.get方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.get方法的具体用法?Java FileSystem.get怎么用?Java FileSystem.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setupCluster

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();
  
  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestWebHdfsWithMultipleNameNodes.java

示例2: testHftpCustomDefaultPorts

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHftpCustomDefaultPorts() throws IOException {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);

  URI uri = URI.create("hftp://localhost");
  HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());

  assertEquals(uri, fs.getUri());

  // HFTP uses http to get the token so canonical service name should
  // return the http port.
  assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestHftpFileSystem.java

示例3: testWebHdfsCustomUriPortWithCustomDefaultPorts

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testWebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException {
  URI uri = URI.create("webhdfs://localhost:789");
  WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestHttpFSPorts.java

示例4: getSnapshotList

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Returns the list of available snapshots in the specified location
 * @param conf the {@link Configuration} to use
 * @return the list of snapshots
 */
public static List<SnapshotDescription> getSnapshotList(final Configuration conf)
    throws IOException {
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
  Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
  FileStatus[] snapshots = fs.listStatus(snapshotDir,
    new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
  List<SnapshotDescription> snapshotLists =
    new ArrayList<SnapshotDescription>(snapshots.length);
  for (FileStatus snapshotDirStat: snapshots) {
    snapshotLists.add(SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()));
  }
  return snapshotLists;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:SnapshotInfo.java

示例5: testTimeout

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testTimeout() throws IOException {
  Configuration conf = new Configuration();
  URI uri = URI.create("hftp://localhost");
  HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
  URLConnection conn = fs.connectionFactory.openConnection(new URL(
      "http://localhost"));
  assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
      conn.getConnectTimeout());
  assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT,
      conn.getReadTimeout());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestHftpFileSystem.java

示例6: testPreserveNothingOnDirectory

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testPreserveNothingOnDirectory() throws IOException {
  FileSystem fs = FileSystem.get(config);
  EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);

  Path dst = new Path("/tmp/abc");
  Path src = new Path("/tmp/src");

  createDirectory(fs, src);
  createDirectory(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);

  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(dstStatus.getAccessTime() == 100);
  Assert.assertTrue(dstStatus.getModificationTime() == 100);
  Assert.assertTrue(dstStatus.getReplication() == 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestDistCpUtils.java

示例7: testWriteHFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testWriteHFile() throws Exception {
  Path hfPath = new Path(testUtil.getDataTestDir(),
      TestHFileInlineToRootChunkConversion.class.getSimpleName() + ".hfile");
  int maxChunkSize = 1024;
  FileSystem fs = FileSystem.get(conf);
  CacheConfig cacheConf = new CacheConfig(conf);
  conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
  HFileContext context = new HFileContextBuilder().withBlockSize(16).build();
  HFileWriterV2 hfw =
      (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf)
          .withFileContext(context)
          .withPath(fs, hfPath).create();
  List<byte[]> keys = new ArrayList<byte[]>();
  StringBuilder sb = new StringBuilder();

  for (int i = 0; i < 4; ++i) {
    sb.append("key" + String.format("%05d", i));
    sb.append("_");
    for (int j = 0; j < 100; ++j) {
      sb.append('0' + j);
    }
    String keyStr = sb.toString();
    sb.setLength(0);

    byte[] k = Bytes.toBytes(keyStr);
    keys.add(k);
    byte[] v = Bytes.toBytes("value" + i);
    hfw.append(CellUtil.createCell(k, v));
  }
  hfw.close();

  HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf, conf);
  // Scanner doesn't do Cells yet.  Fix.
  HFileScanner scanner = reader.getScanner(true, true);
  for (int i = 0; i < keys.size(); ++i) {
    scanner.seekTo(CellUtil.createCell(keys.get(i)));
  }
  reader.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestHFileInlineToRootChunkConversion.java

示例8: setup

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void setup() throws Exception {
  conf = ViewFileSystemTestSetup.createConfig();
  fs1 = setupFileSystem(new URI("fs1:///"), FakeFileSystem.class);
  fs2 = setupFileSystem(new URI("fs2:///"), FakeFileSystem.class);
  viewFs = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestViewFileSystemDelegationTokenSupport.java

示例9: testHsftpCustomUriPortWithDefaultPorts

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
  Configuration conf = new Configuration();
  URI uri = URI.create("hsftp://localhost:123");
  HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);

  assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
      fs.getDefaultPort());

  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestHftpFileSystem.java

示例10: main

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
    final String localSrc = "/tmp/log/bigdata.pdf";
    final String hdfsUri = "hdfs://master:8020/test/bigdata.pdf";
    InputStream in = new BufferedInputStream(new FileInputStream(localSrc));
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(hdfsUri), conf);
    OutputStream out = fs.create(new Path(hdfsUri), new Progressable() {
        // progress只有在Hadoop文件系统是HDFS的时候才调用,local,S3,FTP都不会调用
        @Override
        public void progress() {
            System.out.print(">");
        }
    });
    IOUtils.copyBytes(in, out, 4096, true);
}
 
开发者ID:MedusaLeee,项目名称:HadoopGuides,代码行数:16,代码来源:FileCopyWithProgress.java

示例11: setupMockFileSystem

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static FileSystem setupMockFileSystem(Configuration conf, URI uri)
    throws Exception {
  String scheme = uri.getScheme();
  conf.set("fs." + scheme + ".impl", MockFileSystem.class.getName());
  FileSystem fs = FileSystem.get(uri, conf);
  ConfigUtil.addLink(conf, "/mounts/" + scheme, uri);
  return ((MockFileSystem)fs).getRawFileSystem();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:9,代码来源:TestViewFileSystemDelegation.java

示例12: validateDeletedPartitionsFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
 */
protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
  if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
    return;

  FileSystem fs = FileSystem.get(conf);
  Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
  assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:IntegrationTestImportTsv.java

示例13: createTextFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Create a data file that gets exported to the db.
 * @param fileNum the number of the file (for multi-file export)
 * @param numRecords how many records to write to the file.
 * @param gzip is true if the file should be gzipped.
 */
protected void createTextFile(int fileNum, int numRecords, boolean gzip,
    ColumnGenerator... extraCols) throws IOException {
  int startId = fileNum * numRecords;

  String ext = ".txt";
  if (gzip) {
    ext = ext + ".gz";
  }
  Path tablePath = getTablePath();
  Path filePath = new Path(tablePath, "part" + fileNum + ext);

  Configuration conf = new Configuration();
  if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
    conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
  }
  FileSystem fs = FileSystem.get(conf);
  fs.mkdirs(tablePath);
  OutputStream os = fs.create(filePath);
  if (gzip) {
    CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
    CompressionCodec codec = ccf.getCodec(filePath);
    os = codec.createOutputStream(os);
  }
  BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
  for (int i = 0; i < numRecords; i++) {
    w.write(getRecordLine(startId + i, extraCols));
  }
  w.close();
  os.close();

  if (gzip) {
    verifyCompressedFile(filePath, numRecords);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:41,代码来源:TestExport.java

示例14: setUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();

  // This test requires the most recent HFile format (i.e. v2).
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);

  fs = FileSystem.get(conf);

  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
  assertNotNull(blockCache);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestCompoundBloomFilter.java

示例15: Client

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * constructor
 * @throws IOException
 */
private Client() throws IOException {
    conf.addResource(new Path(System.getenv("HADOOP_CONF_DIR") +"/core-site.xml"));
    conf.addResource(new Path(System.getenv("HADOOP_CONF_DIR") +"/hdfs-site.xml"));
    dfs = FileSystem.get(conf);
    userName = UserGroupInformation.getCurrentUser().getShortUserName();
    credentials = UserGroupInformation.getCurrentUser().getCredentials();
}
 
开发者ID:Intel-bigdata,项目名称:MXNetOnYARN,代码行数:12,代码来源:Client.java


注:本文中的org.apache.hadoop.fs.FileSystem.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。