当前位置: 首页>>代码示例>>Java>>正文


Java DFSInputStream.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSInputStream.close方法的典型用法代码示例。如果您正苦于以下问题:Java DFSInputStream.close方法的具体用法?Java DFSInputStream.close怎么用?Java DFSInputStream.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSInputStream的用法示例。


在下文中一共展示了DFSInputStream.close方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readDFSPaths

import org.apache.hadoop.hdfs.DFSInputStream; //导入方法依赖的package包/类
/**
 * lists, and then reads the first byte in all of the files in
 * this hostname/process' namespace
 * @return -1 on error, 0 on success
 */
private static int readDFSPaths() {
	if (listDFSPaths() != 0) {
		return -1;
	}
	
	try{ 
 	for (Map.Entry<String, OutputStream> file : files_.entrySet()) {
 		
 		long startTime = System.nanoTime();
 		DFSInputStream os = dfsClient_.open(file.getKey());
      timingOpen_.add(new Double((System.nanoTime() - startTime)/(1E9)));
      
 		os.read();
 		os.close();
 	}
	} catch (IOException e) {
		e.printStackTrace();
	}
	
	return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:MStress_Client.java

示例2: addDirToMaps

import org.apache.hadoop.hdfs.DFSInputStream; //导入方法依赖的package包/类
private void addDirToMaps(Path dir, DFSClient client) throws IOException {
  FileStatus[] children = dfs.listStatus(dir);

  if (children == null) return;

  for (FileStatus child: children) {
    if (!child.isDir()) { // get block ids for file
      Path path = child.getPath(); // paths will be unique
      fileMap.put(path, new ArrayList<Long>());

      DFSInputStream stm = client.open(child.getPath().toUri().getPath());
      LocatedBlocks blocks = stm.fetchLocatedBlocks();
      stm.close();

      for (int i = 0; i < blocks.locatedBlockCount(); i++) {
        Long blockId = blocks.get(i).getBlock().getBlockId();
        fileMap.get(path).add(blockId); // add to file block list
        blockRefMap.put(blockId, null); // mark as unrefereced
      }
    }
    else {
      // If child is a directory, recurse on it
      addDirToMaps(child.getPath(), client);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:WaitingRoom.java

示例3: cacheInitialContents

import org.apache.hadoop.hdfs.DFSInputStream; //导入方法依赖的package包/类
private byte[] cacheInitialContents() throws IOException {
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  byte[] content = new byte[(int)status.getLen()];
  DFSInputStream in = null;
  try {
    in = dfsClient.open(name);
    IOUtils.readFully(in, content, 0, content.length);
  } finally {
    in.close();
  }
  return content;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestFsck.java

示例4: checkSalvagedRemains

import org.apache.hadoop.hdfs.DFSInputStream; //导入方法依赖的package包/类
public void checkSalvagedRemains() throws IOException {
  int chainIdx = 0;
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  long length = status.getLen();
  int numBlocks = (int)((length + blockSize - 1) / blockSize);
  DFSInputStream in = null;
  byte[] blockBuffer = new byte[blockSize];

  try {
    for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
      if (blocksToCorrupt.contains(blockIdx)) {
        if (in != null) {
          in.close();
          in = null;
        }
        continue;
      }
      if (in == null) {
        in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
        chainIdx++;
      }
      int len = blockBuffer.length;
      if (blockIdx == (numBlocks - 1)) {
        // The last block might not be full-length
        len = (int)(in.getFileLength() % blockSize);
        if (len == 0) len = blockBuffer.length;
      }
      IOUtils.readFully(in, blockBuffer, 0, len);
      int startIdx = blockIdx * blockSize;
      for (int i = 0; i < len; i++) {
        if (initialContents[startIdx + i] != blockBuffer[i]) {
          throw new IOException("salvaged file " + name + " differed " +
          "from what we expected on block " + blockIdx);
        }
      }
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestFsck.java

示例5: checkSalvagedRemains

import org.apache.hadoop.hdfs.DFSInputStream; //导入方法依赖的package包/类
public void checkSalvagedRemains() throws IOException {
  int chainIdx = 0;
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  long length = status.getLen();
  int numBlocks = (int)((length + blockSize - 1) / blockSize);
  DFSInputStream in = null;
  byte[] blockBuffer = new byte[blockSize];

  try {
    for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
      if (blocksToCorrupt.contains(blockIdx)) {
        if (in != null) {
          in.close();
          in = null;
        }
        continue;
      }
      if (in == null) {
        in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
        chainIdx++;
      }
      int len = blockBuffer.length;
      if (blockIdx == (numBlocks - 1)) {
        // The last block might not be full-length
        len = (int)(in.getFileLength() % blockSize);
        if (len == 0) len = blockBuffer.length;
      }
      IOUtils.readFully(in, blockBuffer, 0, (int)len);
      int startIdx = blockIdx * blockSize;
      for (int i = 0; i < len; i++) {
        if (initialContents[startIdx + i] != blockBuffer[i]) {
          throw new IOException("salvaged file " + name + " differed " +
          "from what we expected on block " + blockIdx);
        }
      }
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:41,代码来源:TestFsck.java

示例6: cacheInitialContents

import org.apache.hadoop.hdfs.DFSInputStream; //导入方法依赖的package包/类
private byte[] cacheInitialContents() throws IOException {
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  byte[] content = new byte[(int) status.getLen()];
  DFSInputStream in = null;
  try {
    in = dfsClient.open(name);
    IOUtils.readFully(in, content, 0, content.length);
  } finally {
    in.close();
  }
  return content;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:13,代码来源:TestFsck.java

示例7: testClientUpdateMethodList

import org.apache.hadoop.hdfs.DFSInputStream; //导入方法依赖的package包/类
/**
 * Test when name-node's finger-print changes, client re-fetch the
 * name-node proxy.
 */
public void testClientUpdateMethodList() throws IOException {
  InetSocketAddress addr = cluster.getNameNode().getNameNodeDNAddress();
  DFSClient client = new DFSClient(addr, cluster.getNameNode().getConf());
  ClientProtocol oldNamenode = client.namenode;
  
  // Client's name-node proxy should keep the same if the same namenode
  // sends the same fingerprint
  //
  OutputStream os = client.create("/testClientUpdateMethodList.txt", true);
  os.write(66);
  os.close();
  TestCase.assertSame(oldNamenode, client.namenode);    
  int oldFingerprint = cluster.getNameNode().getClientProtocolMethodsFingerprint();
  TestCase.assertEquals(oldFingerprint, client.namenodeProtocolProxy
      .getMethodsFingerprint());
  
  // Namenode's fingerprint will be different to client. Client is suppsoed
  // to get a new proxy.
  //
  cluster.getNameNode().setClientProtocolMethodsFingerprint(666);
  os = client.create("/testClientUpdateMethodList1.txt", true);
  os.write(88);
  os.close();
  TestCase.assertNotSame(oldNamenode, client.namenode);
  // Since we didn't change method list of name-node, the fingerprint
  // got from the new proxy should be the same as the previous one.
  TestCase.assertEquals(oldFingerprint, client.namenodeProtocolProxy
      .getMethodsFingerprint());
  
  // Client's name-node proxy should keep the same if the same namenode
  // sends the same fingerprint
  //
  ClientProtocol namenode1 = client.namenode;
  cluster.getNameNode().setClientProtocolMethodsFingerprint(oldFingerprint);
  DFSInputStream dis = client.open("/testClientUpdateMethodList.txt");
  int val = dis.read();
  TestCase.assertEquals(66, val);
  dis.close();
  TestCase.assertSame(namenode1, client.namenode);

  // Namenode's fingerprint will be different to client. Client is suppsoed
  // to get a new proxy.
  //
  cluster.getNameNode().setClientProtocolMethodsFingerprint(888);
  dis = client.open("/testClientUpdateMethodList1.txt");
  val = dis.read();
  TestCase.assertEquals(88, val);
  dis.close();
  // Since we didn't change method list of name-node, the fingerprint
  // got from the new proxy should be the same as the previous one.
  TestCase.assertNotSame(namenode1, client.namenode);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:57,代码来源:TestDFSClientUpdateNameNodeSignature.java

示例8: checkSalvagedRemains

import org.apache.hadoop.hdfs.DFSInputStream; //导入方法依赖的package包/类
public void checkSalvagedRemains() throws IOException {
  int chainIdx = 0;
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  long length = status.getLen();
  int numBlocks = (int) ((length + blockSize - 1) / blockSize);
  DFSInputStream in = null;
  byte[] blockBuffer = new byte[blockSize];

  try {
    for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
      if (blocksToCorrupt.contains(blockIdx)) {
        if (in != null) {
          in.close();
          in = null;
        }
        continue;
      }
      if (in == null) {
        in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
        chainIdx++;
      }
      int len = blockBuffer.length;
      if (blockIdx == (numBlocks - 1)) {
        // The last block might not be full-length
        len = (int) (in.getFileLength() % blockSize);
        if (len == 0) {
          len = blockBuffer.length;
        }
      }
      IOUtils.readFully(in, blockBuffer, 0, (int) len);
      int startIdx = blockIdx * blockSize;
      for (int i = 0; i < len; i++) {
        if (initialContents[startIdx + i] != blockBuffer[i]) {
          throw new IOException("salvaged file " + name + " differed " +
              "from what we expected on block " + blockIdx);
        }
      }
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:43,代码来源:TestFsck.java


注:本文中的org.apache.hadoop.hdfs.DFSInputStream.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。