当前位置: 首页>>代码示例>>Java>>正文


Java IOUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.io.IOUtils的典型用法代码示例。如果您正苦于以下问题:Java IOUtils类的具体用法?Java IOUtils怎么用?Java IOUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


IOUtils类属于org.apache.hadoop.io包,在下文中一共展示了IOUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFailoverOnConnectTimeout

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  
  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDFSClientFailover.java

示例2: testExcludedCiphers

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/**
 * Test that verifies that excluded ciphers (SSL_RSA_WITH_RC4_128_SHA,
 * TLS_ECDH_ECDSA_WITH_RC4_128_SHA,TLS_ECDH_RSA_WITH_RC4_128_SHA,
 * TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDHE_RSA_WITH_RC4_128_SHA) are not
 * available for negotiation during SSL connection.
 */
@Test
public void testExcludedCiphers() throws Exception {
  URL url = new URL(baseUrl, "/echo?a=b&c=d");
  HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
  SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
  PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
      = new PrefferedCipherSSLSocketFactory(sslSocketF,
          excludeCiphers.split(","));
  conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
  assertFalse("excludedCipher list is empty", excludeCiphers.isEmpty());
  try {
    InputStream in = conn.getInputStream();
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    IOUtils.copyBytes(in, out, 1024);
    fail("No Ciphers in common, SSLHandshake must fail.");
  } catch (SSLHandshakeException ex) {
    LOG.info("No Ciphers in common, expected succesful test result.", ex);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:TestSSLHttpServer.java

示例3: main

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    // 没开kerberos,注释下面两行
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\任务\\2016年11月28日\\hdfs.keytab");
    String localFile = "E:\\星环\\yarn-site.xml";
    InputStream in = new BufferedInputStream(new FileInputStream(localFile));
    Path p = new Path( "/tmp/yarn-site.xml");
    FileSystem fs = p.getFileSystem(conf);
    OutputStream out = fs.create(p);
    IOUtils.copyBytes(in, out, conf);
    fs.close();
    IOUtils.closeStream(in);
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:18,代码来源:UploadFile.java

示例4: testIOExceptionInWriterConstructor

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/**
 * test {@link BloomMapFile.Reader} constructor with 
 * IOException
 */
public void testIOExceptionInWriterConstructor() {
  Path dirNameSpy = spy(TEST_FILE);
  BloomMapFile.Reader reader = null;
  BloomMapFile.Writer writer = null;
  try {
    writer = new BloomMapFile.Writer(conf, TEST_FILE,
        MapFile.Writer.keyClass(IntWritable.class),
        MapFile.Writer.valueClass(Text.class));
    writer.append(new IntWritable(1), new Text("123124142"));
    writer.close();

    when(dirNameSpy.getFileSystem(conf)).thenThrow(new IOException());
    reader = new BloomMapFile.Reader(dirNameSpy, conf,
        MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));

    assertNull("testIOExceptionInWriterConstructor error !!!",
        reader.getBloomFilter());
  } catch (Exception ex) {
    fail("unexpect ex in testIOExceptionInWriterConstructor !!!");
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestBloomMapFile.java

示例5: checkResult

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
private void checkResult(Path listFile, int count) throws IOException {
  if (count == 0) {
    return;
  }

  int recCount = 0;
  SequenceFile.Reader reader = new SequenceFile.Reader(config,
                                          SequenceFile.Reader.file(listFile));
  try {
    Text relPath = new Text();
    CopyListingFileStatus fileStatus = new CopyListingFileStatus();
    while (reader.next(relPath, fileStatus)) {
      if (fileStatus.isDirectory() && relPath.toString().equals("")) {
        // ignore root with empty relPath, which is an entry to be 
        // used for preserving root attributes etc.
        continue;
      }
      Assert.assertEquals(fileStatus.getPath().toUri().getPath(), map.get(relPath.toString()));
      recCount++;
    }
  } finally {
    IOUtils.closeStream(reader);
  }
  Assert.assertEquals(recCount, count);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestFileBasedCopyListing.java

示例6: disableBlockPoolId

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/**
 * Disallow the scanner from scanning the given block pool.
 *
 * @param bpid       The block pool id.
 */
public synchronized void disableBlockPoolId(String bpid) {
  Iterator<BlockIterator> i = blockIters.iterator();
  while (i.hasNext()) {
    BlockIterator iter = i.next();
    if (iter.getBlockPoolId().equals(bpid)) {
      LOG.trace("{}: disabling scanning on block pool {}", this, bpid);
      i.remove();
      IOUtils.cleanup(null, iter);
      if (curBlockIter == iter) {
        curBlockIter = null;
      }
      notify();
      return;
    }
  }
  LOG.warn("{}: can't remove block pool {}, because it was never " +
      "added.", this, bpid);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:VolumeScanner.java

示例7: storeToken

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
@Override
public void storeToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
    throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Storing token " + tokenId.getSequenceNumber());
  }

  ByteArrayOutputStream memStream = new ByteArrayOutputStream();
  DataOutputStream dataStream = new DataOutputStream(memStream);
  try {
    tokenId.write(dataStream);
    dataStream.writeLong(renewDate);
    dataStream.close();
    dataStream = null;
  } finally {
    IOUtils.cleanup(LOG, dataStream);
  }

  String dbKey = getTokenDatabaseKey(tokenId);
  try {
    db.put(bytes(dbKey), memStream.toByteArray());
  } catch (DBException e) {
    throw new IOException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:HistoryServerLeveldbStateStoreService.java

示例8: testCleanupOldJars

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
@Test
public void testCleanupOldJars() throws Exception {
  String className = "TestCleanupOldJars";
  String folder = TEST_UTIL.getDataTestDir().toString();
  File jarFile = ClassLoaderTestHelper.buildJar(
    folder, className, null, ClassLoaderTestHelper.localDirPath(conf));
  File tmpJarFile = new File(jarFile.getParent(), "/tmp/" + className + ".test.jar");
  if (tmpJarFile.exists()) tmpJarFile.delete();
  assertFalse("tmp jar file should not exist", tmpJarFile.exists());
  IOUtils.copyBytes(new FileInputStream(jarFile),
    new FileOutputStream(tmpJarFile), conf, true);
  assertTrue("tmp jar file should be created", tmpJarFile.exists());
  Path path = new Path(jarFile.getAbsolutePath());
  ClassLoader parent = TestCoprocessorClassLoader.class.getClassLoader();
  CoprocessorClassLoader.parentDirLockSet.clear(); // So that clean up can be triggered
  ClassLoader classLoader = CoprocessorClassLoader.getClassLoader(path, parent, "111", conf);
  assertNotNull("Classloader should be created", classLoader);
  assertFalse("tmp jar file should be removed", tmpJarFile.exists());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestCoprocessorClassLoader.java

示例9: uncompressTags

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/**
 * Uncompress tags from the InputStream and writes to the destination array.
 * @param src Stream where the compressed tags are available
 * @param dest Destination array where to write the uncompressed tags
 * @param offset Offset in destination where tags to be written
 * @param length Length of all tag bytes
 * @throws IOException
 */
public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
    throws IOException {
  int endOffset = offset + length;
  while (offset < endOffset) {
    byte status = (byte) src.read();
    if (status == Dictionary.NOT_IN_DICTIONARY) {
      int tagLen = StreamUtils.readRawVarint32(src);
      offset = Bytes.putAsShort(dest, offset, tagLen);
      IOUtils.readFully(src, dest, offset, tagLen);
      tagDict.addEntry(dest, offset, tagLen);
      offset += tagLen;
    } else {
      short dictIdx = StreamUtils.toShort(status, (byte) src.read());
      byte[] entry = tagDict.getEntry(dictIdx);
      if (entry == null) {
        throw new IOException("Missing dictionary entry for index " + dictIdx);
      }
      offset = Bytes.putAsShort(dest, offset, entry.length);
      System.arraycopy(entry, 0, dest, offset, entry.length);
      offset += entry.length;
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TagCompressionContext.java

示例10: testMidKeyOnCurrentApi

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/**
 * test {@code MapFile.Reader.midKey() } method 
 */
@Test
public void testMidKeyOnCurrentApi() throws Exception {
  // Write a mapfile of simple data: keys are
  final String TEST_PREFIX = "testMidKeyOnCurrentApi.mapfile";
  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = createWriter(TEST_PREFIX, IntWritable.class, IntWritable.class);
    // 0,1,....9
    int SIZE = 10;
    for (int i = 0; i < SIZE; i++)
      writer.append(new IntWritable(i), new IntWritable(i));
    writer.close();

    reader = createReader(TEST_PREFIX, IntWritable.class);
    assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestMapFile.java

示例11: confirmCanAppend

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/**
 * Ensure that even if a file is in a directory with the sticky bit on,
 * another user can write to that file (assuming correct permissions).
 */
private void confirmCanAppend(Configuration conf, Path p) throws Exception {
  // Write a file to the new tmp directory as a regular user
  Path file = new Path(p, "foo");
  writeFile(hdfsAsUser1, file);
  hdfsAsUser1.setPermission(file, new FsPermission((short) 0777));

  // Log onto cluster as another user and attempt to append to file
  Path file2 = new Path(p, "foo");
  FSDataOutputStream h = null;
  try {
    h = hdfsAsUser2.append(file2);
    h.write("Some more data".getBytes());
    h.close();
    h = null;
  } finally {
    IOUtils.cleanup(null, h);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestStickyBit.java

示例12: testRmForceWithNonexistentGlob

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
@Test (timeout = 30000)
public void testRmForceWithNonexistentGlob() throws Exception {
  Configuration conf = new Configuration();
  FsShell shell = new FsShell();
  shell.setConf(conf);
  final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  final PrintStream err = new PrintStream(bytes);
  final PrintStream oldErr = System.err;
  System.setErr(err);
  try {
    int exit = shell.run(new String[]{"-rm", "-f", "nomatch*"});
    assertEquals(0, exit);
    assertTrue(bytes.toString().isEmpty());
  } finally {
    IOUtils.closeStream(err);
    System.setErr(oldErr);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:TestFsShellReturnCode.java

示例13: testOpenFileTwice

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
@Test
public void testOpenFileTwice() throws Throwable {
  describe("verify that two opened file streams are independent");
  Path path = path("testopenfiletwice.txt");
  byte[] block = dataset(TEST_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(getFileSystem(), path, false, block);
  //open first
  FSDataInputStream instream1 = getFileSystem().open(path);
  int c = instream1.read();
  assertEquals(0,c);
  FSDataInputStream instream2 = null;
  try {
    instream2 = getFileSystem().open(path);
    assertEquals("first read of instream 2", 0, instream2.read());
    assertEquals("second read of instream 1", 1, instream1.read());
    instream1.close();
    assertEquals("second read of instream 2", 1, instream2.read());
    //close instream1 again
    instream1.close();
  } finally {
    IOUtils.closeStream(instream1);
    IOUtils.closeStream(instream2);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:AbstractContractOpenTest.java

示例14: testSaveNamespaceWithRenamedLease

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/**
 * Test for save namespace should succeed when parent directory renamed with
 * open lease and destination directory exist. 
 * This test is a regression for HDFS-2827
 */
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
      .numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
  OutputStream out = null;
  try {
    fs.mkdirs(new Path("/test-target"));
    out = fs.create(new Path("/test-source/foo")); // don't close
    fs.rename(new Path("/test-source/"), new Path("/test-target/"));

    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    cluster.getNameNodeRpc().saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } finally {
    IOUtils.cleanup(LOG, out, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestSaveNamespace.java

示例15: testExclusiveEnabledCiphers

import org.apache.hadoop.io.IOUtils; //导入依赖的package包/类
/** Test verifies that mutually exclusive server's disabled cipher suites and
 * client's enabled cipher suites can successfully establish TLS connection.
 */
@Test
public void testExclusiveEnabledCiphers() throws Exception {
  URL url = new URL(baseUrl, "/echo?a=b&c=d");
  HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
  SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
  PrefferedCipherSSLSocketFactory testPreferredCipherSSLSocketF
      = new PrefferedCipherSSLSocketFactory(sslSocketF,
          exclusiveEnabledCiphers.split(","));
  conn.setSSLSocketFactory(testPreferredCipherSSLSocketF);
  assertFalse("excludedCipher list is empty",
      exclusiveEnabledCiphers.isEmpty());
  try {
    InputStream in = conn.getInputStream();
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    IOUtils.copyBytes(in, out, 1024);
    assertEquals(out.toString(), "a:b\nc:d\n");
    LOG.info("Atleast one additional enabled cipher than excluded ciphers,"
        + " expected successful test result.");
  } catch (SSLHandshakeException ex) {
    fail("Atleast one additional cipher available for successful handshake."
        + " Unexpected test failure: " + ex);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:27,代码来源:TestSSLHttpServer.java


注:本文中的org.apache.hadoop.io.IOUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。