当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem的典型用法代码示例。如果您正苦于以下问题:Java FileSystem类的具体用法?Java FileSystem怎么用?Java FileSystem使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


FileSystem类属于org.apache.hadoop.fs包,在下文中一共展示了FileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testReadReservedPath

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
/**
 * Regression test for HDFS-7045.
 * If deadlock happen, the test will time out.
 * @throws Exception
 */
@Test(timeout=60000)
public void testReadReservedPath() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
      numDataNodes(1).format(true).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    fs.open(new Path("/.reserved/.inodes/file"));
    Assert.fail("Open a non existing file should fail.");
  } catch (FileNotFoundException e) {
    // Expected
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestRead.java

示例2: ensureEmptyWriteDir

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
/** Create the directory where we'll write our test files to; and
 * make sure it has no files in it.
 */
private void ensureEmptyWriteDir() throws IOException {
  FileSystem fs = FileSystem.getLocal(getConf());
  Path writeDir = getWritePath();

  fs.mkdirs(writeDir);

  FileStatus [] stats = fs.listStatus(writeDir);

  for (FileStatus stat : stats) {
    if (stat.isDir()) {
      fail("setUp(): Write directory " + writeDir
          + " contains subdirectories");
    }

    LOG.debug("setUp(): Removing " + stat.getPath());
    if (!fs.delete(stat.getPath(), false)) {
      fail("setUp(): Could not delete residual file " + stat.getPath());
    }
  }

  if (!fs.exists(writeDir)) {
    fail("setUp: Could not create " + writeDir);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:28,代码来源:TestSplittableBufferedWriter.java

示例3: FileOutputCommitter

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
/**
 * Create a file output committer
 * @param outputPath the job's output path, or null if you want the output
 * committer to act as a noop.
 * @param context the task's context
 * @throws IOException
 */
@Private
public FileOutputCommitter(Path outputPath, 
                           JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();
  algorithmVersion =
      conf.getInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
                  FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT);
  LOG.info("File Output Committer Algorithm version is " + algorithmVersion);
  if (algorithmVersion != 1 && algorithmVersion != 2) {
    throw new IOException("Only 1 or 2 algorithm version is supported");
  }
  if (outputPath != null) {
    FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
    this.outputPath = fs.makeQualified(outputPath);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:FileOutputCommitter.java

示例4: setUp

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
/**
 * Note that this method must be called after the mini hdfs cluster has
 * started or we end up with a local file system.
 */
@Override
protected void setUp() throws Exception {
  super.setUp();
  localfs =
    (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0);

  if (fs == null) {
    this.fs = FileSystem.get(conf);
  }
  try {
    if (localfs) {
      this.testDir = getUnitTestdir(getName());
      if (fs.exists(testDir)) {
        fs.delete(testDir, true);
      }
    } else {
      this.testDir = FSUtils.getRootDir(conf);
    }
  } catch (Exception e) {
    LOG.fatal("error during setup", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:HBaseTestCase.java

示例5: testGetTokensForNamenodes

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testGetTokensForNamenodes() throws IOException,
    URISyntaxException {
  Path TEST_ROOT_DIR =
      new Path(System.getProperty("test.build.data", "test/build/data"));
  // ick, but need fq path minus file:/
  String binaryTokenFile =
      FileSystem.getLocal(conf)
        .makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri()
        .getPath();

  MockFileSystem fs1 = createFileSystemForServiceName("service1");
  Credentials creds = new Credentials();
  Token<?> token1 = fs1.getDelegationToken(renewer);
  creds.addToken(token1.getService(), token1);
  // wait to set, else the obtain tokens call above will fail with FNF
  conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile);
  creds.writeTokenStorageFile(new Path(binaryTokenFile), conf);
  TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf);
  String fs_addr = fs1.getCanonicalServiceName();
  Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr);
  assertNotNull("Token for nn is null", nnt);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestTokenCache.java

示例6: validateFileWithChecksum

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
private static void validateFileWithChecksum(FileSystem fs, Path filePath, BackupFileInfo backupFileInfo) throws IOException {
  final CheckedInputStream cin = new CheckedInputStream(fs.open(filePath), new CRC32());
  final BufferedReader reader = new BufferedReader(new InputStreamReader(cin));
  final ObjectMapper objectMapper = new ObjectMapper();
  String line;
  long records = 0;
  // parse records just to make sure formatting is correct
  while ((line = reader.readLine()) != null) {
    objectMapper.readValue(line, BackupRecord.class);
    ++records;
  }
  cin.close();
  long found = cin.getChecksum().getValue();
  if (backupFileInfo.getChecksum() != found) {
    throw new IOException(format("Corrupt backup data file %s. Expected checksum %x, found %x", filePath, backupFileInfo.getChecksum(), found));
  }
  if (backupFileInfo.getRecords() != records) {
    throw new IOException(format("Corrupt backup data file %s. Expected records %x, found %x", filePath, backupFileInfo.getRecords(), records));
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:21,代码来源:BackupRestoreUtil.java

示例7: getAddress

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
/**
 * @return address of file system
 */
public static InetSocketAddress getAddress(URI filesystemURI) {
  String authority = filesystemURI.getAuthority();
  if (authority == null) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s has no authority.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
  }
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
      filesystemURI.getScheme())) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
        HdfsConstants.HDFS_URI_SCHEME));
  }
  return getAddress(authority);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:NameNode.java

示例8: apply

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
@Override
public PathMetadata apply(@Nonnull Path location) {
  try {
    FileSystem fs = location.getFileSystem(conf);
    FileStatus fileStatus = fs.getFileStatus(location);
    FileChecksum checksum = null;
    if (fileStatus.isFile()) {
      checksum = fs.getFileChecksum(location);
    }

    List<PathMetadata> childPathDescriptors = new ArrayList<>();
    if (fileStatus.isDirectory()) {
      FileStatus[] childStatuses = fs.listStatus(location);
      for (FileStatus childStatus : childStatuses) {
        childPathDescriptors.add(apply(childStatus.getPath()));
      }
    }

    return new PathMetadata(location, fileStatus.getModificationTime(), checksum, childPathDescriptors);

  } catch (IOException e) {
    throw new CircusTrainException("Unable to compute digest for location " + location.toString(), e);
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:25,代码来源:PathToPathMetadata.java

示例9: createBackup

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
public static BackupStats createBackup(FileSystem fs, Path backupRootDir, LocalKVStoreProvider localKVStoreProvider, HomeFileConfig homeFileStore) throws IOException, NamespaceException {
  final Date now = new Date();
  final BackupStats backupStats = new BackupStats();

  final Path backupDir = new Path(backupRootDir, format("%s%s", BACKUP_DIR_PREFIX, DATE_FORMAT.format(now)));
  fs.mkdirs(backupDir, DEFAULT_PERMISSIONS);
  backupStats.backupPath = backupDir.toUri().getPath();

  for (Map.Entry<StoreBuilderConfig, CoreKVStore<?, ?>> entry : localKVStoreProvider.getStores().entrySet()) {
    final StoreBuilderConfig storeBuilderConfig = entry.getKey();
    if (TokenUtils.TOKENS_TABLE_NAME.equals(storeBuilderConfig.getName())) {
      // Skip creating a backup of tokens table
      // TODO: In the future, if there are other tables that should not be backed up, this could be part of
      // StoreBuilderConfig interface
      continue;
    }
    final BackupFileInfo backupFileInfo = new BackupFileInfo().setKvstoreInfo(DataStoreUtils.toInfo(storeBuilderConfig));
    dumpTable(fs, backupDir, backupFileInfo, entry.getValue());
    ++backupStats.tables;
  }
  backupUploadedFiles(fs, backupDir, homeFileStore, backupStats);
  return backupStats;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:24,代码来源:BackupRestoreUtil.java

示例10: checkPermissionRetention

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
public void checkPermissionRetention(Configuration conf, String ourUrl,
    Path path) throws Exception {
  CredentialProvider provider = CredentialProviderFactory.getProviders(conf).get(0);
  // let's add a new credential and flush and check that permissions are still set to 777
  char[] cred = new char[32];
  for(int i =0; i < cred.length; ++i) {
    cred[i] = (char) i;
  }
  // create a new key
  try {
    provider.createCredentialEntry("key5", cred);
  } catch (Exception e) {
    e.printStackTrace();
    throw e;
  }
  provider.flush();
  // get a new instance of the provider to ensure it was saved correctly
  provider = CredentialProviderFactory.getProviders(conf).get(0);
  assertArrayEquals(cred, provider.getCredentialEntry("key5").getCredential());

  FileSystem fs = path.getFileSystem(conf);
  FileStatus s = fs.getFileStatus(path);
  assertTrue("Permissions should have been retained from the preexisting " +
  		"keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:TestCredentialProviderFactory.java

示例11: testServerDefaults

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
/**
 * Test that server default values can be retrieved on the client side
 */
@Test
public void testServerDefaults() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
  conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
  conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
  conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
  conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                   .numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1)
                   .build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    FsServerDefaults serverDefaults = fs.getServerDefaults();
    assertEquals(DFS_BLOCK_SIZE_DEFAULT, serverDefaults.getBlockSize());
    assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT, serverDefaults.getBytesPerChecksum());
    assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT, serverDefaults.getWritePacketSize());
    assertEquals(DFS_REPLICATION_DEFAULT + 1, serverDefaults.getReplication());
    assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT, serverDefaults.getFileBufferSize());
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestFileCreation.java

示例12: configurePartitioner

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
/**
 * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
 * <code>splitPoints</code>. Cleans up the partitions file after job exists.
 */
static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
    throws IOException {
  Configuration conf = job.getConfiguration();
  // create the partitions file
  FileSystem fs = FileSystem.get(conf);
  String hbaseTmpFsDir =
      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
        HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
  Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID());
  fs.makeQualified(partitionsPath);
  writePartitions(conf, partitionsPath, splitPoints);
  fs.deleteOnExit(partitionsPath);

  // configure job to use it
  job.setPartitionerClass(TotalOrderPartitioner.class);
  TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:HFileOutputFormat2.java

示例13: configureTestErrorOnNonExistantDir

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inFile1 = new Path(base1, "file1");
  Path inFile2 = new Path(base1, "file2");

  localFs.createNewFile(inFile1);
  localFs.createNewFile(inFile2);

  List<Path> expectedPaths = Lists.newArrayList();
  return expectedPaths;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestFileInputFormat.java

示例14: testLocalJobLibjarsOption

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
/**
 * test the local job submission options of
 * -jt local -libjars
 * @throws IOException
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
  Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));

  Configuration conf = new Configuration();
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://testcluster");
  final String[] args = {
      "-jt" , "local", "-libjars", jarPath.toString(),
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new SleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestLocalJobSubmission.java

示例15: main

import org.apache.hadoop.fs.FileSystem; //导入依赖的package包/类
public static void main(String[] args) {
    String rootPath = "hdfs://nameservice1";
    Path p = new Path(rootPath + "/tmp/file.txt");
    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    try {
        // 没开kerberos,注释下面两行
        UserGroupInformation.setConfiguration(conf);
        UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\hdfs.keytab");
        FileSystem fs = p.getFileSystem(conf);
        boolean b = fs.delete(p, true);
        System.out.println(b);
        fs.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:20,代码来源:Delete.java


注:本文中的org.apache.hadoop.fs.FileSystem类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。