当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.close方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.close方法的具体用法?Java FileSystem.close怎么用?Java FileSystem.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSetPermission

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
protected void testSetPermission() throws Exception {
  if (Path.WINDOWS) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foodir");
    fs.mkdirs(path);

    fs = getHttpFSFileSystem();
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);

    // sticky bit not supported on Windows with local file system, so the
    // subclass skips that part of the test
  } else {
    super.testSetPermission();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestHttpFSFileSystemLocalFileSystem.java

示例2: testDfsClientFailover

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);
  
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  
  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDFSClientFailover.java

示例3: execute

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void execute(String [] args, String namenode) {
  FsShell shell=new FsShell();
  FileSystem fs=null;
  try {
    ToolRunner.run(shell, args);
    fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
        shell.getConf());
    assertTrue("Directory does not get created", 
               fs.isDirectory(new Path("/data")));
    fs.delete(new Path("/data"), true);
  } catch (Exception e) {
    System.err.println(e.getMessage());
    e.printStackTrace();
  } finally {
    if (fs!=null) {
      try {
        fs.close();
      } catch (IOException ignored) {
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestDFSShellGenericOptions.java

示例4: testAppend

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testAppend() throws Exception {
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path path = new Path(getProxiedFSTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);
    os.close();
    fs.close();
    fs = getHttpFSFileSystem();
    os = fs.append(new Path(path.toUri().getPath()));
    os.write(2);
    os.close();
    fs.close();
    fs = FileSystem.get(getProxiedFSConf());
    InputStream is = fs.open(path);
    Assert.assertEquals(is.read(), 1);
    Assert.assertEquals(is.read(), 2);
    Assert.assertEquals(is.read(), -1);
    is.close();
    fs.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:BaseTestHttpFSWith.java

示例5: main

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
    // 通过Java API创建HDFS目录
    String rootPath = "hdfs://nameservice1";
    Path p = new Path(rootPath + "/tmp/newDir3");

    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    // 没开kerberos,注释下面两行
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\hdfs.keytab");
    FileSystem fs = p.getFileSystem(conf);
    boolean b = fs.mkdirs(p);
    System.out.println(b);
    fs.close();
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:18,代码来源:CreateDir.java

示例6: testPreadLocalFS

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Tests positional read in LocalFS.
 */
@Test
public void testPreadLocalFS() throws IOException {
  Configuration conf = new HdfsConfiguration();
  FileSystem fileSys = FileSystem.getLocal(conf);
  try {
    Path file1 = new Path("build/test/data", "preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestPread.java

示例7: testListStatus

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testListStatus() throws Exception {
  FileSystem fs = FileSystem.get(getProxiedFSConf());
  Path path = new Path(getProxiedFSTestDir(), "foo.txt");
  OutputStream os = fs.create(path);
  os.write(1);
  os.close();
  FileStatus status1 = fs.getFileStatus(path);
  fs.close();

  fs = getHttpFSFileSystem();
  FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
  fs.close();

  Assert.assertEquals(status2.getPermission(), status1.getPermission());
  Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
  Assert.assertEquals(status2.getReplication(), status1.getReplication());
  Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
  Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
  Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
  Assert.assertEquals(status2.getOwner(), status1.getOwner());
  Assert.assertEquals(status2.getGroup(), status1.getGroup());
  Assert.assertEquals(status2.getLen(), status1.getLen());

  FileStatus[] stati = fs.listStatus(path.getParent());
  Assert.assertEquals(stati.length, 1);
  Assert.assertEquals(stati[0].getPath().getName(), path.getName());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:BaseTestHttpFSWith.java

示例8: testModTimePersistsAfterRestart

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Regression test for HDFS-3864 - NN does not update internal file mtime for
 * OP_CLOSE when reading from the edit log.
 */
@Test
public void testModTimePersistsAfterRestart() throws IOException {
  final long sleepTime = 10; // 10 milliseconds
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  Configuration conf = new HdfsConfiguration();
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    fs = cluster.getFileSystem();
    Path testPath = new Path("/test");
    
    // Open a file, and get its initial modification time.
    OutputStream out = fs.create(testPath);
    long initialModTime = fs.getFileStatus(testPath).getModificationTime();
    assertTrue(initialModTime > 0);
    
    // Wait and then close the file. Ensure that the mod time goes up.
    ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime);
    out.close();
    long modTimeAfterClose = fs.getFileStatus(testPath).getModificationTime();
    assertTrue(modTimeAfterClose >= initialModTime + sleepTime);
    
    // Restart the NN, and make sure that the later mod time is still used.
    cluster.restartNameNode();
    long modTimeAfterRestart = fs.getFileStatus(testPath).getModificationTime();
    assertEquals(modTimeAfterClose, modTimeAfterRestart);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestModTime.java

示例9: testTracingGlobber

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test tracing the globber.  This is a regression test for HDFS-9187.
 */
@Test
public void testTracingGlobber() throws Exception {
  // Bypass the normal FileSystem object creation path by just creating an
  // instance of a subclass.
  FileSystem fs = new LocalFileSystem();
  fs.initialize(new URI("file:///"), new Configuration());
  fs.globStatus(new Path("/"));
  fs.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:13,代码来源:TestTraceUtils.java

示例10: writeSeveralPackets

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 1. create files with dfs
 * 2. write MIN_N_PACKET to MAX_N_PACKET packets
 * 3. close file
 * 4. open the same file
 * 5. read the bytes and compare results
 */
private static void writeSeveralPackets(String methodName) throws IOException {
  final Random r = FiTestUtil.RANDOM.get();
  final int nPackets = FiTestUtil.nextRandomInt(MIN_N_PACKET, MAX_N_PACKET + 1);
  final int lastPacketSize = FiTestUtil.nextRandomInt(1, PACKET_SIZE + 1);
  final int size = (nPackets - 1)*PACKET_SIZE + lastPacketSize;

  FiTestUtil.LOG.info("size=" + size + ", nPackets=" + nPackets
      + ", lastPacketSize=" + lastPacketSize);

  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION + 2).build();
  final FileSystem dfs = cluster.getFileSystem();
  try {
    final Path p = new Path("/" + methodName + "/foo");
    final FSDataOutputStream out = createFile(dfs, p);

    final long seed = r.nextLong();
    final Random ran = new Random(seed);
    ran.nextBytes(bytes);
    out.write(bytes, 0, size);
    out.close();

    final FSDataInputStream in = dfs.open(p);
    int totalRead = 0;
    int nRead = 0;
    while ((nRead = in.read(toRead, totalRead, size - totalRead)) > 0) {
      totalRead += nRead;
    }
    Assert.assertEquals("Cannot read file.", size, totalRead);
    for (int i = 0; i < size; i++) {
      Assert.assertTrue("File content differ.", bytes[i] == toRead[i]);
    }
  }
  finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestFiDataTransferProtocol2.java

示例11: testAccessContainerWithWrongVersion

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testAccessContainerWithWrongVersion() throws Exception {
  AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
  MockStorageInterface mockStorage = new MockStorageInterface();
  store.setAzureStorageInteractionLayer(mockStorage);
  FileSystem fs = new NativeAzureFileSystem(store);
  try {
    Configuration conf = new Configuration();
    AzureBlobStorageTestAccount.setMockAccountKey(conf);
    HashMap<String, String> metadata = new HashMap<String, String>();
    metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
        "2090-04-05"); // It's from the future!
    mockStorage.addPreExistingContainer(
        AzureBlobStorageTestAccount.getMockContainerUri(), metadata);

    boolean passed = false;
    try {
      fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
      fs.listStatus(new Path("/"));
      passed = true;
    } catch (AzureException ex) {
      assertTrue("Unexpected exception message: " + ex,
          ex.getMessage().contains("unsupported version: 2090-04-05."));
    }
    assertFalse("Should've thrown an exception because of the wrong version.",
        passed);
  } finally {
    fs.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestAzureFileSystemErrorConditions.java

示例12: testFsckSymlink

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Test fsck with symlinks in the filesystem */
@Test
public void testFsckSymlink() throws Exception {
  final DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
  final Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);

  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    final long precision = 1L;
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    final String fileName = "/srcdat";
    util.createFiles(fs, fileName);
    final FileContext fc = FileContext.getFileContext(
        cluster.getConfiguration(0));
    final Path file = new Path(fileName);
    final Path symlink = new Path("/srcdat-symlink");
    fc.createSymlink(file, symlink, false);
    util.waitReplication(fs, fileName, (short)3);
    long aTime = fc.getFileStatus(symlink).getAccessTime();
    Thread.sleep(precision);
    setupAuditLogs();
    String outStr = runFsck(conf, 0, true, "/");
    verifyAuditLogs();
    assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertTrue(outStr.contains("Total symlinks:\t\t1"));
    util.cleanup(fs, fileName);
  } finally {
    if (fs != null) {try{fs.close();} catch(Exception e){}}
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestFsck.java

示例13: deleteFromHdfs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**从HDFS上删除文件*/
public static void deleteFromHdfs(String fileName) throws IOException {
    String dst = NodeConfig.HDFS_PATH + fileName;
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(dst), conf);
    fs.deleteOnExit(new Path(dst));
    fs.close();
}
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:9,代码来源:HDFSTool.java

示例14: renameFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**重命名**/
public static void renameFile(String origin, String newName) throws IOException{
    Configuration conf = new Configuration();
    String str = NodeConfig.HDFS_PATH+origin;
    String dst = NodeConfig.HDFS_PATH+newName;
    FileSystem fs = FileSystem.get(URI.create(str), conf);
    Path srcPath = new Path(str);
    Path dstPath = new Path(dst);
    fs.rename(srcPath, dstPath);
    fs.close();
}
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:12,代码来源:HDFSTool.java

示例15: testRbwReplicas

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) 
throws IOException {
  FSDataOutputStream out = null;
  FileSystem fs = cluster.getFileSystem();
  final Path src = new Path("/test.txt");
  try {
    final int fileLen = 515;
    // create some rbw replicas on disk
    byte[] writeBuf = new byte[fileLen];
    new Random().nextBytes(writeBuf);
    out = fs.create(src);
    out.write(writeBuf);
    out.hflush();
    DataNode dn = cluster.getDataNodes().get(0);
    for (FsVolumeSpi v : dataset(dn).getVolumes()) {
      final FsVolumeImpl volume = (FsVolumeImpl)v;
      File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
      File rbwDir = new File(currentDir, "rbw");
      for (File file : rbwDir.listFiles()) {
        if (isCorrupt && Block.isBlockFilename(file)) {
          new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt
        }
      }
    }
    cluster.restartDataNodes();
    cluster.waitActive();
    dn = cluster.getDataNodes().get(0);

    // check volumeMap: one rwr replica
    String bpid = cluster.getNamesystem().getBlockPoolId();
    ReplicaMap replicas = dataset(dn).volumeMap;
    Assert.assertEquals(1, replicas.size(bpid));
    ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
    Assert.assertEquals(ReplicaState.RWR, replica.getState());
    if (isCorrupt) {
      Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes());
    } else {
      Assert.assertEquals(fileLen, replica.getNumBytes());
    }
    dataset(dn).invalidate(bpid, new Block[]{replica});
  } finally {
    IOUtils.closeStream(out);
    if (fs.exists(src)) {
      fs.delete(src, false);
    }
    fs.close();
  }      
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestDatanodeRestart.java


注:本文中的org.apache.hadoop.fs.FileSystem.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。