当前位置: 首页>>代码示例>>Java>>正文


Java DistributedFileSystem.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem.close方法的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem.close方法的具体用法?Java DistributedFileSystem.close怎么用?Java DistributedFileSystem.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DistributedFileSystem的用法示例。


在下文中一共展示了DistributedFileSystem.close方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setup

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  Configuration conf = new Configuration();
  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);

  MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).build();
  cluster = miniQjmHaCluster.getDfsCluster();
  jCluster = miniQjmHaCluster.getJournalCluster();
  
  // make nn0 active
  cluster.transitionToActive(0);
  // do sth to generate in-progress edit log data
  DistributedFileSystem dfs = (DistributedFileSystem) 
      HATestUtil.configureFailoverFs(cluster, conf);
  dfs.mkdirs(new Path("/test2"));
  dfs.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestBootstrapStandbyWithQJM.java

示例2: mapreduce

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void mapreduce() {
    String inputPath = ParquetConfiguration.HDFS_URI + "//parquet/mapreduce/input";
    String outputPath = ParquetConfiguration.HDFS_URI + "//parquet/mapreduce/output" + DateFormatUtils.format(new Date(), "yyyyMMddHHmmss");
    try {
        MapReduceParquetMapReducer.main(new String[]{inputPath, outputPath});
        DistributedFileSystem distributedFileSystem = new ParquetConfiguration().distributedFileSystem();
        FileStatus[] fileStatuses = distributedFileSystem.listStatus(new Path(outputPath));
        for (FileStatus fileStatus : fileStatuses) {
            System.out.println(fileStatus);
        }
        distributedFileSystem.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-parquet,代码行数:17,代码来源:MapReduceParquetMapReducerTest.java

示例3: close

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * 关闭资源
 *
 * @param distributedFileSystem
 */
public void close(DistributedFileSystem distributedFileSystem) {
    if (distributedFileSystem != null) {
        try {
            distributedFileSystem.close();
        } catch (IOException e) {
            log.error(e);
        }
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:15,代码来源:MapReduceConfiguration.java

示例4: concat

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
public static void concat(String dir) throws IOException {


        String directory = NodeConfig.HDFS_PATH + dir;
        Configuration conf = new Configuration();
        DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(URI.create(directory), conf);
        FileStatus fileList[] = fs.listStatus(new Path(directory));

        if (fileList.length>=2) {

            ArrayList<Path>  srcs = new ArrayList<Path>(fileList.length);
            for (FileStatus fileStatus : fileList) {
                if ( fileStatus.isFile() &&
                        (fileStatus.getLen()&~fileStatus.getBlockSize())<fileStatus.getBlockSize()/2 ) {
                    srcs.add(fileStatus.getPath());
                }
            }

            if (srcs.size()>=2) {
                Logger.println("come to here");
                Path appended = srcs.get(0);
                Path[] sources = new Path[srcs.size()-1];
                for (int i=0; i<srcs.size()-1; i++) {
                    sources[i] = srcs.get(i+1);
                }
                Logger.println(fs==null);
                Logger.println(appended==null);
                Logger.println(sources==null);
                fs.concat(appended, sources);
                Logger.println("concat to : " + appended.getName());
                Logger.println(Arrays.toString(sources));
            }

            fs.close();
        }


    }
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:39,代码来源:HDFSTool.java

示例5: closeDfs

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
private void closeDfs(DistributedFileSystem dfs) {
  try {
    if (dfs != null)
      dfs.close();

  } catch (Exception ignored) {
    // nothing we can do
    ignored.printStackTrace();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestMRCJCSocketFactory.java

示例6: testIsInSafemode

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
 * 
 * @throws Exception
 */
@Test
public void testIsInSafemode() throws Exception {
  // Check for the standby nn without client failover.
  NameNode nn2 = cluster.getNameNode(1);
  assertTrue("nn2 should be in standby state", nn2.isStandbyState());

  InetSocketAddress nameNodeAddress = nn2.getNameNodeAddress();
  Configuration conf = new Configuration();
  DistributedFileSystem dfs = new DistributedFileSystem();
  try {
    dfs.initialize(
        URI.create("hdfs://" + nameNodeAddress.getHostName() + ":"
            + nameNodeAddress.getPort()), conf);
    dfs.isInSafeMode();
    fail("StandBy should throw exception for isInSafeMode");
  } catch (IOException e) {
    if (e instanceof RemoteException) {
      IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
      assertTrue("StandBy nn should not support isInSafeMode",
          sbExcpetion instanceof StandbyException);
    } else {
      throw e;
    }
  } finally {
    if (null != dfs) {
      dfs.close();
    }
  }

  // Check with Client FailOver
  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  DistributedFileSystem dfsWithFailOver = (DistributedFileSystem) fs;
  assertTrue("ANN should be in SafeMode", dfsWithFailOver.isInSafeMode());

  cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
  assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestHASafeMode.java

示例7: close

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
public void close(DistributedFileSystem distributedFileSystem) {
    if (distributedFileSystem != null) {
        try {
            distributedFileSystem.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-pig,代码行数:10,代码来源:MumuPigConfiguration.java

示例8: testClientMmapDisable

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void testClientMmapDisable() throws Exception {
  HdfsConfiguration conf = initZeroCopyTest();
  conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false);
  MiniDFSCluster cluster = null;
  final Path TEST_PATH = new Path("/a");
  final int TEST_FILE_LENGTH = 16385;
  final int RANDOM_SEED = 23453;
  final String CONTEXT = "testClientMmapDisable";
  FSDataInputStream fsIn = null;
  DistributedFileSystem fs = null;
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);

  try {
    // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
    // mapped reads.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    try {
      fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.fail("expected zero-copy read to fail when client mmaps " +
          "were disabled.");
    } catch (UnsupportedOperationException e) {
    }
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }

  fsIn = null;
  fs = null;
  cluster = null;
  try {
    // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0.  It should work.
    conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true);
    conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0);
    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    fsIn.releaseBuffer(buf);
    // Test EOF behavior
    IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
    buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(null, buf);
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestEnhancedByteBufferAccess.java

示例9: testFsckMissingReplicas

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Tests that the # of missing block replicas and expected replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMissingReplicas() throws IOException {
  // Desired replication factor
  // Set this higher than NUM_REPLICAS so it's under-replicated
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_REPLICAS, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(res.missingReplicas, 
        (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
    assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestFsck.java

示例10: testFsckMisPlacedReplicas

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestFsck.java

示例11: testBootstrapStandbyWithActiveNN

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * While boostrapping, in_progress transaction entries should be skipped.
 * Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
 */
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
  // make nn0 active
  cluster.transitionToActive(0);
 
  // do ops and generate in-progress edit log data
  Configuration confNN1 = cluster.getConfiguration(1);
  DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
      .configureFailoverFs(cluster, confNN1);
  for (int i = 1; i <= 10; i++) {
    dfs.mkdirs(new Path("/test" + i));
  }
  dfs.close();

  // shutdown nn1 and delete its edit log files
  cluster.shutdownNameNode(1);
  deleteEditLogIfExists(confNN1);
  cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
  cluster.getNameNodeRpc(0).saveNamespace();
  cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);

  // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
  // immediately after saveNamespace
  int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
    confNN1);
  Assert.assertEquals("Mismatches return code", 6, rc);

  // check with -skipSharedEditsCheck
  rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
      "-skipSharedEditsCheck" }, confNN1);
  Assert.assertEquals("Mismatches return code", 0, rc);

  // Checkpoint as fast as we can, in a tight loop.
  confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
  cluster.restartNameNode(1);
  cluster.transitionToStandby(1);
 
  NameNode nn0 = cluster.getNameNode(0);
  HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
  long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
      .getFSImage().getMostRecentCheckpointTxId();
  HATestUtil.waitForCheckpoint(cluster, 1,
      ImmutableList.of((int) expectedCheckpointTxId));

  // Should have copied over the namespace
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of((int) expectedCheckpointTxId));
  FSImageTestUtil.assertNNFilesMatch(cluster);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestBootstrapStandbyWithBKJM.java


注:本文中的org.apache.hadoop.hdfs.DistributedFileSystem.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。