当前位置: 首页>>代码示例>>Java>>正文


Java IOUtils.closeStream方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.IOUtils.closeStream方法的典型用法代码示例。如果您正苦于以下问题:Java IOUtils.closeStream方法的具体用法?Java IOUtils.closeStream怎么用?Java IOUtils.closeStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.IOUtils的用法示例。


在下文中一共展示了IOUtils.closeStream方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    // 没开kerberos,注释下面两行
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\任务\\2016年11月28日\\hdfs.keytab");
    String localFile = "E:\\星环\\yarn-site.xml";
    InputStream in = new BufferedInputStream(new FileInputStream(localFile));
    Path p = new Path( "/tmp/yarn-site.xml");
    FileSystem fs = p.getFileSystem(conf);
    OutputStream out = fs.create(p);
    IOUtils.copyBytes(in, out, conf);
    fs.close();
    IOUtils.closeStream(in);
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:18,代码来源:UploadFile.java

示例2: testOfflineImageViewerHelpMessage

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test
public void testOfflineImageViewerHelpMessage() throws Throwable {
  final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  final PrintStream out = new PrintStream(bytes);
  final PrintStream oldOut = System.out;
  try {
    System.setOut(out);
    int status = OfflineImageViewerPB.run(new String[] { "-h" });
    assertTrue("Exit code returned for help option is incorrect", status == 0);
    Assert.assertFalse(
            "Invalid Command error displayed when help option is passed.", bytes
                    .toString().contains("Error parsing command-line options"));
    status =
            OfflineImageViewerPB.run(new String[] { "-h", "-i",
                    originalFsimage.getAbsolutePath(), "-o", "-", "-p",
                    "FileDistribution", "-maxSize", "512", "-step", "8" });
    Assert.assertTrue(
            "Exit code returned for help with other option is incorrect",
            status == -1);
  } finally {
    System.setOut(oldOut);
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestOfflineImageViewer.java

示例3: testRmWithNonexistentGlob

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testRmWithNonexistentGlob() throws Exception {
  Configuration conf = new Configuration();
  FsShell shell = new FsShell();
  shell.setConf(conf);
  final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  final PrintStream err = new PrintStream(bytes);
  final PrintStream oldErr = System.err;
  System.setErr(err);
  final String results;
  try {
    int exit = shell.run(new String[]{"-rm", "nomatch*"});
    assertEquals(1, exit);
    results = bytes.toString();
    assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
  } finally {
    IOUtils.closeStream(err);
    System.setErr(oldErr);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:TestFsShellReturnCode.java

示例4: getPersistedPaxosData

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Retrieve the persisted data for recovering the given segment from disk.
 */
private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId)
    throws IOException {
  File f = storage.getPaxosFile(segmentTxId);
  if (!f.exists()) {
    // Default instance has no fields filled in (they're optional)
    return null;
  }
  
  InputStream in = new FileInputStream(f);
  try {
    PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in);
    Preconditions.checkState(ret != null &&
        ret.getSegmentState().getStartTxId() == segmentTxId,
        "Bad persisted data for segment %s: %s",
        segmentTxId, ret);
    return ret;
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:Journal.java

示例5: writeStreamToFile

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
void writeStreamToFile(InputStream in, PathData target,
                       boolean lazyPersist) throws IOException {
  FSDataOutputStream out = null;
  try {
    out = create(target, lazyPersist);
    IOUtils.copyBytes(in, out, getConf(), true);
  } finally {
    IOUtils.closeStream(out); // just in case copyBytes didn't
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:CommandWithDestination.java

示例6: testMultiByteCharacters

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Tests use of multi-byte characters in property names and values.  This test
 * round-trips multi-byte string literals through saving and loading of config
 * and asserts that the same values were read.
 */
public void testMultiByteCharacters() throws IOException {
  String priorDefaultEncoding = System.getProperty("file.encoding");
  try {
    System.setProperty("file.encoding", "US-ASCII");
    String name = "multi_byte_\u611b_name";
    String value = "multi_byte_\u0641_value";
    out = new BufferedWriter(new OutputStreamWriter(
      new FileOutputStream(CONFIG_MULTI_BYTE), "UTF-8"));
    startConfig();
    declareProperty(name, value, value);
    endConfig();

    Configuration conf = new Configuration(false);
    conf.addResource(new Path(CONFIG_MULTI_BYTE));
    assertEquals(value, conf.get(name));
    FileOutputStream fos = new FileOutputStream(CONFIG_MULTI_BYTE_SAVED);
    try {
      conf.writeXml(fos);
    } finally {
      IOUtils.closeStream(fos);
    }

    conf = new Configuration(false);
    conf.addResource(new Path(CONFIG_MULTI_BYTE_SAVED));
    assertEquals(value, conf.get(name));
  } finally {
    System.setProperty("file.encoding", priorDefaultEncoding);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestConfiguration.java

示例7: close

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private void close() throws IOException {
  for (int i = 0; i < inReaders.length; i++) {
    IOUtils.closeStream(inReaders[i]);
    inReaders[i] = null;
  }
  if (outWriter != null) {
    outWriter.close();
    outWriter = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:MapFile.java

示例8: determineMaxIpcNumber

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Run through the creation of a log without any faults injected,
 * and count how many RPCs are made to each node. This sets the
 * bounds for the other test cases, so they can exhaustively explore
 * the space of potential failures.
 */
private static long determineMaxIpcNumber() throws Exception {
  Configuration conf = new Configuration();
  MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
  QuorumJournalManager qjm = null;
  long ret;
  try {
    qjm = createInjectableQJM(cluster);
    qjm.format(FAKE_NSINFO);
    doWorkload(cluster, qjm);
    
    SortedSet<Integer> ipcCounts = Sets.newTreeSet();
    for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
      InvocationCountingChannel ch = (InvocationCountingChannel)l;
      ch.waitForAllPendingCalls();
      ipcCounts.add(ch.getRpcCount());
    }

    // All of the loggers should have sent the same number of RPCs, since there
    // were no failures.
    assertEquals(1, ipcCounts.size());
    
    ret = ipcCounts.first();
    LOG.info("Max IPC count = " + ret);
  } finally {
    IOUtils.closeStream(qjm);
    cluster.shutdown();
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestQJMWithFaults.java

示例9: testAppendWithPipelineRecovery

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Test to append to the file, when one of datanode in the existing pipeline
 * is down.
 */
@Test
public void testAppendWithPipelineRecovery() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  FSDataOutputStream out = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true)
        .manageNameDfsDirs(true).numDataNodes(4)
        .racks(new String[] { "/rack1", "/rack1", "/rack2", "/rack2" })
        .build();
    cluster.waitActive();

    DistributedFileSystem fs = cluster.getFileSystem();
    Path path = new Path("/test1");
    
    out = fs.create(path, true, BLOCK_SIZE, (short) 3, BLOCK_SIZE);
    AppendTestUtil.write(out, 0, 1024);
    out.close();

    cluster.stopDataNode(3);
    out = fs.append(path);
    AppendTestUtil.write(out, 1024, 1024);
    out.close();
    
    cluster.restartNameNode(true);
    AppendTestUtil.check(fs, path, 2048);
  } finally {
    IOUtils.closeStream(out);
    if (null != cluster) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestFileAppendRestart.java

示例10: calcPartialBlockChecksum

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
    long requestLength, DataChecksum checksum, DataInputStream checksumIn)
    throws IOException {
  final int bytesPerCRC = checksum.getBytesPerChecksum();
  final int csize = checksum.getChecksumSize();
  final byte[] buffer = new byte[4*1024];
  MessageDigest digester = MD5Hash.getDigester();

  long remaining = requestLength / bytesPerCRC * csize;
  for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
    toDigest = checksumIn.read(buffer, 0,
        (int) Math.min(remaining, buffer.length));
    if (toDigest < 0) {
      break;
    }
    digester.update(buffer, 0, toDigest);
  }
  
  int partialLength = (int) (requestLength % bytesPerCRC);
  if (partialLength > 0) {
    byte[] buf = new byte[partialLength];
    final InputStream blockIn = datanode.data.getBlockInputStream(block,
        requestLength - partialLength);
    try {
      // Get the CRC of the partialLength.
      IOUtils.readFully(blockIn, buf, 0, partialLength);
    } finally {
      IOUtils.closeStream(blockIn);
    }
    checksum.update(buf, 0, partialLength);
    byte[] partialCrc = new byte[csize];
    checksum.writeValue(partialCrc, 0, true);
    digester.update(partialCrc);
  }
  return new MD5Hash(digester.digest());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:DataXceiver.java

示例11: testFileIdMismatch

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Test complete(..) - verifies that the fileId in the request
 * matches that of the Inode.
 * This test checks that FileNotFoundException exception is thrown in case
 * the fileId does not match.
 */
@Test
public void testFileIdMismatch() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    final Path f = new Path("/testFileIdMismatch.txt");
    createFile(dfs, f, 3);
    long someOtherFileId = -1;
    try {
      cluster.getNameNodeRpc()
          .complete(f.toString(), client.clientName, null, someOtherFileId);
      fail();
    } catch(LeaseExpiredException e) {
      FileSystem.LOG.info("Caught Expected LeaseExpiredException: ", e);
    }
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestFileCreation.java

示例12: testMultiTableImport

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
public void testMultiTableImport() throws IOException {
  String [] argv = getArgv(null, null);
  runImport(new ImportAllTablesTool(), argv);

  Path warehousePath = new Path(this.getWarehouseDir());
  int i = 0;
  for (String tableName : this.tableNames) {
    Path tablePath = new Path(warehousePath, tableName);
    Path filePath = new Path(tablePath, "part-m-00000");

    // dequeue the expected value for this table. This
    // list has the same order as the tableNames list.
    String expectedVal = Integer.toString(i++) + ","
        + this.expectedStrings.get(0);
    this.expectedStrings.remove(0);

    BufferedReader reader = null;
    if (!isOnPhysicalCluster()) {
      reader = new BufferedReader(
          new InputStreamReader(new FileInputStream(
              new File(filePath.toString()))));
    } else {
      FileSystem dfs = FileSystem.get(getConf());
      FSDataInputStream dis = dfs.open(filePath);
      reader = new BufferedReader(new InputStreamReader(dis));
    }
    try {
      String line = reader.readLine();
      assertEquals("Table " + tableName + " expected a different string",
          expectedVal, line);
    } finally {
      IOUtils.closeStream(reader);
    }
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:36,代码来源:TestAllTables.java

示例13: close

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override
public void close() {
  keyManager.close();

  // close the output file
  IOUtils.closeStream(out); 
  if (fs != null) {
    try {
      fs.delete(idPath, true);
    } catch(IOException ioe) {
      LOG.warn("Failed to delete " + idPath, ioe);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:NameNodeConnector.java

示例14: close

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/** Close the connection. */
protected synchronized void close() {
  if (!shouldCloseConnection.get()) {
    LOG.error(getName() + ": the connection is not in the closed state");
    return;
  }

  // release the resources
  // first thing to do;take the connection out of the connection list
  synchronized (connections) {
    connections.removeValue(remoteId, this);
  }

  // close the streams and therefore the socket
  synchronized(this.outLock) {
    if (this.out != null) {
      IOUtils.closeStream(out);
      this.out = null;
    }
  }
  IOUtils.closeStream(in);
  this.in = null;
  if (this.socket != null) {
    try {
      this.socket.close();
      this.socket = null;
    } catch (IOException e) {
      LOG.error("Error while closing socket", e);
    }
  }

  disposeSasl();

  // log the info
  if (LOG.isTraceEnabled()) {
    LOG.trace(getName() + ": closing ipc connection to " + server);
  }

  cleanupCalls(true);

  if (LOG.isTraceEnabled()) {
    LOG.trace(getName() + ": ipc connection to " + server + " closed");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:RpcClientImpl.java

示例15: copy

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Copy from src to dst, optionally deleting src and overwriting dst.
 * @param src
 * @param dst
 * @param deleteSource - delete src if true
 * @param overwrite  overwrite dst if true; throw IOException if dst exists
 *         and overwrite is false.
 *
 * @return true if copy is successful
 *
 * @throws AccessControlException If access is denied
 * @throws FileAlreadyExistsException If <code>dst</code> already exists
 * @throws FileNotFoundException If <code>src</code> does not exist
 * @throws ParentNotDirectoryException If parent of <code>dst</code> is not
 *           a directory
 * @throws UnsupportedFileSystemException If file system for 
 *         <code>src</code> or <code>dst</code> is not supported
 * @throws IOException If an I/O error occurred
 * 
 * Exceptions applicable to file systems accessed over RPC:
 * @throws RpcClientException If an exception occurred in the RPC client
 * @throws RpcServerException If an exception occurred in the RPC server
 * @throws UnexpectedServerException If server implementation throws 
 *           undeclared exception to RPC server
 * 
 * RuntimeExceptions:
 * @throws InvalidPathException If path <code>dst</code> is invalid
 */
public boolean copy(final Path src, final Path dst, boolean deleteSource,
    boolean overwrite) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException, 
    IOException {
  src.checkNotSchemeWithRelative();
  dst.checkNotSchemeWithRelative();
  Path qSrc = makeQualified(src);
  Path qDst = makeQualified(dst);
  checkDest(qSrc.getName(), qDst, overwrite);
  FileStatus fs = FileContext.this.getFileStatus(qSrc);
  if (fs.isDirectory()) {
    checkDependencies(qSrc, qDst);
    mkdir(qDst, FsPermission.getDirDefault(), true);
    FileStatus[] contents = listStatus(qSrc);
    for (FileStatus content : contents) {
      copy(makeQualified(content.getPath()), makeQualified(new Path(qDst,
          content.getPath().getName())), deleteSource, overwrite);
    }
  } else {
    InputStream in=null;
    OutputStream out = null;
    try {
      in = open(qSrc);
      EnumSet<CreateFlag> createFlag = overwrite ? EnumSet.of(
          CreateFlag.CREATE, CreateFlag.OVERWRITE) : 
            EnumSet.of(CreateFlag.CREATE);
      out = create(qDst, createFlag);
      IOUtils.copyBytes(in, out, conf, true);
    } finally {
      IOUtils.closeStream(out);
      IOUtils.closeStream(in);
    }
  }
  if (deleteSource) {
    return delete(qSrc, true);
  } else {
    return true;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:69,代码来源:FileContext.java


注:本文中的org.apache.hadoop.io.IOUtils.closeStream方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。