當前位置: 首頁>>代碼示例>>Java>>正文


Java FSDataInputStream.close方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FSDataInputStream.close方法的典型用法代碼示例。如果您正苦於以下問題:Java FSDataInputStream.close方法的具體用法?Java FSDataInputStream.close怎麽用?Java FSDataInputStream.close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FSDataInputStream的用法示例。


在下文中一共展示了FSDataInputStream.close方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testVLongRandom

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
public void testVLongRandom() throws IOException {
  int count = 1024 * 1024;
  long data[] = new long[count];
  Random rng = new Random();
  for (int i = 0; i < data.length; ++i) {
    int shift = rng.nextInt(Long.SIZE) + 1;
    long mask = (1L << shift) - 1;
    long a = ((long) rng.nextInt()) << 32;
    long b = ((long) rng.nextInt()) & 0xffffffffL;
    data[i] = (a + b) & mask;
  }
  
  FSDataOutputStream out = fs.create(path);
  for (int i = 0; i < data.length; ++i) {
    Utils.writeVLong(out, data[i]);
  }
  out.close();

  FSDataInputStream in = fs.open(path);
  for (int i = 0; i < data.length; ++i) {
    Assert.assertEquals(Utils.readVLong(in), data[i]);
  }
  in.close();
  fs.delete(path, false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:TestVLong.java

示例2: commitPartitions

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
/**
 *  Move all model output files generated by a PS to the combine directory
 * @param moveSrcPath source path
 * @param moveDestPath dest path
 * @param psId parameter server id
 * @param errorLogs error logs
 * @param matrixMeta model files meta
 */
private void commitPartitions(Path moveSrcPath, Path moveDestPath, ParameterServerId psId, Vector<String> errorLogs, ModelFilesMeta matrixMeta) {
  Path psPath = new Path(moveSrcPath, String.valueOf(psId));
  Path serverMatrixPath = new Path(psPath, matrixMeta.getMatrixName());

  Path psMetaFilePath = new Path(serverMatrixPath, ModelFilesConstent.psModelMetaFileName);

  try {
    FSDataInputStream input = fs.open(psMetaFilePath);
    PSModelFilesMeta serverMatrixMeta = new PSModelFilesMeta();
    serverMatrixMeta.read(input);
    input.close();
    fs.delete(psMetaFilePath, false);

    matrixMeta.merge(serverMatrixMeta);
    HdfsUtil.copyFilesInSameHdfs(serverMatrixPath, moveDestPath, fs);
    LOG.info("copy files of matrix " + matrixMeta.getMatrixName() + " from " + serverMatrixPath + " to " + moveDestPath + " success.");
  } catch (Throwable x) {
    errorLogs.add("copy files of matrix " + matrixMeta.getMatrixName() + " from " + serverMatrixPath + " to " + moveDestPath + " failed, error log is " + x.getMessage());
    LOG.error("copy files of matrix " + matrixMeta.getMatrixName() + " from " + serverMatrixPath + " to " + moveDestPath + " failed. ", x);
  }
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:30,代碼來源:AMMatrixCommitter.java

示例3: checkSnapshots

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
@Override
void checkSnapshots() throws Exception {
  byte[] buffer = new byte[32];
  for (Path snapshotFile : snapshotFileLengthMap.keySet()) {        
    long currentSnapshotFileLen = fs.exists(snapshotFile) ? fs
        .getFileStatus(snapshotFile).getLen() : -1L;
    long originalSnapshotFileLen = snapshotFileLengthMap.get(snapshotFile);
    String s = null;
    if (currentSnapshotFileLen != originalSnapshotFileLen) {
      s = "FAILED: " + getClass().getSimpleName()
          + ": file="  + file + ", snapshotFile" + snapshotFile
          + "\n\n currentSnapshotFileLen = " + currentSnapshotFileLen
          +   "\noriginalSnapshotFileLen = " + originalSnapshotFileLen
          + "\n\nfile        : " + fsdir.getINode(file.toString()).toDetailString()
          + "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
      SnapshotTestHelper.dumpTree(s, cluster);
    }
    assertEquals(s, originalSnapshotFileLen, currentSnapshotFileLen);
    // Read the snapshot file out of the boundary
    if (currentSnapshotFileLen != -1L
        && !(this instanceof FileAppendNotClose)) {
      FSDataInputStream input = fs.open(snapshotFile);
      int readLen = input.read(currentSnapshotFileLen, buffer, 0, 1);
      if (readLen != -1) {
        s = "FAILED: " + getClass().getSimpleName()
            + ": file="  + file + ", snapshotFile" + snapshotFile
            + "\n\n currentSnapshotFileLen = " + currentSnapshotFileLen
            +   "\n                readLen = " + readLen
            + "\n\nfile        : " + fsdir.getINode(file.toString()).toDetailString()
            + "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
        SnapshotTestHelper.dumpTree(s, cluster);
      }
      assertEquals(s, -1, readLen);
      input.close();
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:38,代碼來源:TestSnapshot.java

示例4: loadMatrixMetaFromFile

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
private void loadMatrixMetaFromFile(String name, String path, Configuration conf) throws IOException {
  Path meteFilePath = new Path(new Path(path, name), ModelFilesConstent.modelMetaFileName);
  ModelFilesMeta meta = new ModelFilesMeta();

  FileSystem fs  = meteFilePath.getFileSystem(conf);
  LOG.info("Load matrix meta for matrix " + name + " from " + path);

  if(!fs.exists(meteFilePath)) {
    throw new IOException("matrix meta file does not exist ");
  }

  FSDataInputStream input = fs.open(meteFilePath);
  meta.read(input);
  input.close();

  rowNum = meta.getRow();
  colNum = meta.getCol();
  maxRowNumInBlock = meta.getBlockRow();
  maxColNumInBlock = meta.getBlockCol();
  rowType = RowType.valueOf(meta.getRowType());
  Map<String, String> oldAttributes = meta.getOptions();
  if(oldAttributes != null && !oldAttributes.isEmpty()) {
    for(Map.Entry<String, String> kv : oldAttributes.entrySet()) {
      attributes.putIfAbsent(kv.getKey(), kv.getValue());
    }
  }
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:28,代碼來源:MatrixContext.java

示例5: loadPartitions

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
private void loadPartitions(Path matrixPath, FileSystem fs,
  List<Integer> partitionIds, int startPos, int endPos,
  PSModelFilesMeta serverMatrixMeta) throws IOException {

  ServerPartition partition = null;
  FSDataInputStream input = null;
  long offset = 0;
  String currentFileName = "";
  for(int i = startPos; i < endPos; i++) {
    partition = partitionMaps.get(partitionIds.get(i));
    ModelPartitionMeta partMeta = serverMatrixMeta.getPartitionMeta(partitionIds.get(i));
    String fileName = partMeta.getFileName();
    offset = partMeta.getOffset();
    if(!fileName.equals(currentFileName)) {
      currentFileName = fileName;
      if(input != null) {
        input.close();
      }
      input = fs.open(new Path(matrixPath, currentFileName));

    }
    input.seek(offset);
    partition.load(input);
  }

  if(input != null) {
    input.close();
  }
}
 
開發者ID:Tencent,項目名稱:angel,代碼行數:30,代碼來源:ServerMatrix.java

示例6: copyMapOutput

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
/**
 * Retrieve the map output of a single map task
 * and send it to the merger.
 */
private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException {
  // Figure out where the map task stored its output.
  Path mapOutputFileName = localMapFiles.get(mapTaskId).getOutputFile();
  Path indexFileName = mapOutputFileName.suffix(".index");

  // Read its index to determine the location of our split
  // and its size.
  SpillRecord sr = new SpillRecord(indexFileName, job);
  IndexRecord ir = sr.getIndex(reduce);

  long compressedLength = ir.partLength;
  long decompressedLength = ir.rawLength;

  compressedLength -= CryptoUtils.cryptoPadding(job);
  decompressedLength -= CryptoUtils.cryptoPadding(job);

  // Get the location for the map output - either in-memory or on-disk
  MapOutput<K, V> mapOutput = merger.reserve(mapTaskId, decompressedLength,
      id);

  // Check if we can shuffle *now* ...
  if (mapOutput == null) {
    LOG.info("fetcher#" + id + " - MergeManager returned Status.WAIT ...");
    return false;
  }

  // Go!
  LOG.info("localfetcher#" + id + " about to shuffle output of map " + 
           mapOutput.getMapId() + " decomp: " +
           decompressedLength + " len: " + compressedLength + " to " +
           mapOutput.getDescription());

  // now read the file, seek to the appropriate section, and send it.
  FileSystem localFs = FileSystem.getLocal(job).getRaw();
  FSDataInputStream inStream = localFs.open(mapOutputFileName);

  inStream = CryptoUtils.wrapIfNecessary(job, inStream);

  try {
    inStream.seek(ir.startOffset + CryptoUtils.cryptoPadding(job));
    mapOutput.shuffle(LOCALHOST, inStream, compressedLength, decompressedLength, metrics, reporter);
  } finally {
    try {
      inStream.close();
    } catch (IOException ioe) {
      LOG.warn("IOException closing inputstream from map output: "
          + ioe.toString());
    }
  }

  scheduler.copySucceeded(mapTaskId, LOCALHOST, compressedLength, 0, 0,
      mapOutput);
  return true; // successful fetch.
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:59,代碼來源:LocalFetcher.java

示例7: getFileContentsUsingDfs

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
private byte[] getFileContentsUsingDfs(String fileName, int len)
    throws Exception {
  final FSDataInputStream in = hdfs.open(new Path(fileName));
  final byte[] ret = new byte[len];
  in.readFully(ret);
  try {
    in.readByte();
    Assert.fail("expected end of file");
  } catch (EOFException e) {
    // expected. Unfortunately there is no associated message to check
  }
  in.close();
  return ret;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:TestRpcProgramNfs3.java

示例8: reloadState

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
private void reloadState(Path stateFile, Configuration conf) 
throws Exception {
  FileSystem fs = stateFile.getFileSystem(conf);
  if (fs.exists(stateFile)) {
    System.out.println("Reading state from " + stateFile.toString());
    FSDataInputStream in = fs.open(stateFile);
    
    read(in);
    in.close();
  } else {
    System.out.println("No state information found for " + stateFile);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:14,代碼來源:StatePool.java

示例9: initReader

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
@Override
protected String initReader(FSDataInputStream stream) throws IOException {
  // We don't use the stream because we have to have the magic stream above.
  if (stream != null) {
    stream.close();
  }
  reset();
  return null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:10,代碼來源:SequenceFileLogReader.java

示例10: getFileChecksum

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
@Override
public String getFileChecksum(Path sourceFile)
    throws IOException {
  FileSystem fs = sourceFile.getFileSystem(this.conf);
  FSDataInputStream in = null;
  try {
    in = fs.open(sourceFile);
    return this.checksum.computeChecksum(in);
  } finally {
    if (in != null) {
      in.close();
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:SharedCacheClientImpl.java

示例11: testDataBlockEncryption

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
@Test(timeout=20000)
public void testDataBlockEncryption() throws IOException {
  final int blocks = 10;
  final int[] blockSizes = new int[blocks];
  for (int i = 0; i < blocks; i++) {
    blockSizes[i] = (1024 + RNG.nextInt(1024 * 63)) / Bytes.SIZEOF_INT;
  }
  for (Compression.Algorithm compression : TestHFileBlock.COMPRESSION_ALGORITHMS) {
    Path path = new Path(TEST_UTIL.getDataTestDir(), "block_v3_" + compression + "_AES");
    LOG.info("testDataBlockEncryption: encryption=AES compression=" + compression);
    long totalSize = 0;
    HFileContext fileContext = new HFileContextBuilder()
      .withCompression(compression)
      .withEncryptionContext(cryptoContext)
      .build();
    FSDataOutputStream os = fs.create(path);
    try {
      for (int i = 0; i < blocks; i++) {
        totalSize += writeBlock(os, fileContext, blockSizes[i]);
      }
    } finally {
      os.close();
    }
    FSDataInputStream is = fs.open(path);
    try {
      HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(is, totalSize, fileContext);
      long pos = 0;
      for (int i = 0; i < blocks; i++) {
        pos += readAndVerifyBlock(pos, fileContext, hbr, blockSizes[i]);
      }
    } finally {
      is.close();
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:36,代碼來源:TestHFileEncryption.java

示例12: testRandomSeeks

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
/**
 * Lifted from TestLocalFileSystem:
 * Regression test for HADOOP-9307: BufferedFSInputStream returning
 * wrong results after certain sequences of seeks and reads.
 */
@Test
public void testRandomSeeks() throws Throwable {
  int limit = getContract().getLimit(TEST_RANDOM_SEEK_COUNT,
                                     DEFAULT_RANDOM_SEEK_COUNT);
  describe("Testing " + limit + " random seeks");
  int filesize = 10 * 1024;
  byte[] buf = dataset(filesize, 0, 255);
  Path randomSeekFile = path("testrandomseeks.bin");
  createFile(getFileSystem(), randomSeekFile, false, buf);
  Random r = new Random();
  FSDataInputStream stm = getFileSystem().open(randomSeekFile);

  // Record the sequence of seeks and reads which trigger a failure.
  int[] seeks = new int[10];
  int[] reads = new int[10];
  try {
    for (int i = 0; i < limit; i++) {
      int seekOff = r.nextInt(buf.length);
      int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));

      seeks[i % seeks.length] = seekOff;
      reads[i % reads.length] = toRead;
      verifyRead(stm, buf, seekOff, toRead);
    }
  } catch (AssertionError afe) {
    StringBuilder sb = new StringBuilder();
    sb.append("Sequence of actions:\n");
    for (int j = 0; j < seeks.length; j++) {
      sb.append("seek @ ").append(seeks[j]).append("  ")
        .append("read ").append(reads[j]).append("\n");
    }
    LOG.error(sb.toString());
    throw afe;
  } finally {
    stm.close();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:43,代碼來源:AbstractContractSeekTest.java

示例13: getVersion

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
/**
 * Verifies current version of file system
 *
 * @param fs filesystem object
 * @param rootdir root hbase directory
 * @return null if no version file exists, version string otherwise.
 * @throws IOException e
 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException
 */
public static String getVersion(FileSystem fs, Path rootdir)
throws IOException, DeserializationException {
  Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
  FileStatus[] status = null;
  try {
    // hadoop 2.0 throws FNFE if directory does not exist.
    // hadoop 1.0 returns null if directory does not exist.
    status = fs.listStatus(versionFile);
  } catch (FileNotFoundException fnfe) {
    return null;
  }
  if (status == null || status.length == 0) return null;
  String version = null;
  byte [] content = new byte [(int)status[0].getLen()];
  FSDataInputStream s = fs.open(versionFile);
  try {
    IOUtils.readFully(s, content, 0, content.length);
    if (ProtobufUtil.isPBMagicPrefix(content)) {
      version = parseVersionFrom(content);
    } else {
      // Presume it pre-pb format.
      InputStream is = new ByteArrayInputStream(content);
      DataInputStream dis = new DataInputStream(is);
      try {
        version = dis.readUTF();
      } finally {
        dis.close();
      }
    }
  } catch (EOFException eof) {
    LOG.warn("Version file was empty, odd, will try to set it.");
  } finally {
    s.close();
  }
  return version;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:46,代碼來源:FSUtils.java

示例14: testRevocation

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
/**
 * Test that when we have an uncache request, and the client refuses to release
 * the replica for a long time, we will un-mlock it.
 */
@Test(timeout=120000)
public void testRevocation() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  BlockReaderTestUtil.enableHdfsCachingTracing();
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  Configuration conf = getDefaultConf();
  // Set a really short revocation timeout.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file2";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
          setPool("pool").setPath(new Path(TEST_FILE)).
          setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should get uncached.
  LOG.info("removing cache directive {}", cacheDirectiveId);
  dfs.removeCacheDirective(cacheDirectiveId);
  LOG.info("finished removing cache directive {}", cacheDirectiveId);
  Thread.sleep(1000);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.releaseBuffer(buf);
  in.close();
  cluster.shutdown();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:49,代碼來源:TestFsDatasetCacheRevocation.java

示例15: testReaderV2Internals

import org.apache.hadoop.fs.FSDataInputStream; //導入方法依賴的package包/類
protected void testReaderV2Internals() throws IOException {
  if(includesTag) {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
  }
  for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
    for (boolean pread : new boolean[] { false, true }) {
        LOG.info("testReaderV2: Compression algorithm: " + algo +
                 ", pread=" + pread);
      Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
          + algo);
      FSDataOutputStream os = fs.create(path);
      HFileContext meta = new HFileContextBuilder()
                         .withCompression(algo)
                         .withIncludesMvcc(includesMemstoreTS)
                         .withIncludesTags(includesTag)
                         .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                         .build();
      HFileBlock.Writer hbw = new HFileBlock.Writer(null,
         meta);
      long totalSize = 0;
      for (int blockId = 0; blockId < 2; ++blockId) {
        DataOutputStream dos = hbw.startWriting(BlockType.DATA);
        for (int i = 0; i < 1234; ++i)
          dos.writeInt(i);
        hbw.writeHeaderAndData(os);
        totalSize += hbw.getOnDiskSizeWithHeader();
      }
      os.close();

      FSDataInputStream is = fs.open(path);
      meta = new HFileContextBuilder()
      .withHBaseCheckSum(true)
      .withIncludesMvcc(includesMemstoreTS)
      .withIncludesTags(includesTag)
      .withCompression(algo).build();
      HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, totalSize, meta);
      HFileBlock b = hbr.readBlockData(0, -1, -1, pread);
      is.close();
      assertEquals(0, HFile.getChecksumFailuresCount());

      b.sanityCheck();
      assertEquals(4936, b.getUncompressedSizeWithoutHeader());
      assertEquals(algo == GZ ? 2173 : 4936,
                   b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
      HFileBlock expected = b;

      if (algo == GZ) {
        is = fs.open(path);
        hbr = new HFileBlock.FSReaderImpl(is, totalSize, meta);
        b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE +
                              b.totalChecksumBytes(), -1, pread);
        assertEquals(expected, b);
        int wrongCompressedSize = 2172;
        try {
          b = hbr.readBlockData(0, wrongCompressedSize
              + HConstants.HFILEBLOCK_HEADER_SIZE, -1, pread);
          fail("Exception expected");
        } catch (IOException ex) {
          String expectedPrefix = "On-disk size without header provided is "
              + wrongCompressedSize + ", but block header contains "
              + b.getOnDiskSizeWithoutHeader() + ".";
          assertTrue("Invalid exception message: '" + ex.getMessage()
              + "'.\nMessage is expected to start with: '" + expectedPrefix
              + "'", ex.getMessage().startsWith(expectedPrefix));
        }
        is.close();
      }
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:71,代碼來源:TestHFileBlock.java


注:本文中的org.apache.hadoop.fs.FSDataInputStream.close方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。