当前位置: 首页>>代码示例>>Java>>正文


Java TemporarySocketDirectory.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.net.unix.TemporarySocketDirectory.close方法的典型用法代码示例。如果您正苦于以下问题:Java TemporarySocketDirectory.close方法的具体用法?Java TemporarySocketDirectory.close怎么用?Java TemporarySocketDirectory.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.net.unix.TemporarySocketDirectory的用法示例。


在下文中一共展示了TemporarySocketDirectory.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSkipWithLocalBlockReader

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testSkipWithLocalBlockReader() throws IOException {
  Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  DomainSocket.disableBindPathValidation();
  Configuration conf = new Configuration();
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    DFSInputStream.tcpReadsDisabledForTesting = true;
    testSkipInner(cluster);
  } finally {
    DFSInputStream.tcpReadsDisabledForTesting = false;
    cluster.shutdown();
    sockDir.close();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:TestDFSInputStream.java

示例2: testDataXceiverCleansUpSlotsOnFailure

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  fs.getClient().getConf().brfFailureInjector =
      new TestCleanupFailureInjector();
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestShortCircuitCache.java

示例3: testPreReceiptVerificationDfsClientCanDoScr

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.getClient().getConf().brfFailureInjector =
      new TestPreReceiptVerificationFailureInjector();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestShortCircuitCache.java

示例4: testFallbackFromShortCircuitToUnixDomainTraffic

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
/**
 * If we have a UNIX domain socket configured,
 * and we have dfs.client.domain.socket.data.traffic set to true,
 * and short-circuit access fails, we should still be able to pass
 * data traffic over the UNIX domain socket.  Test this.
 */
@Test(timeout=60000)
public void testFallbackFromShortCircuitToUnixDomainTraffic()
    throws Exception {
  DFSInputStream.tcpReadsDisabledForTesting = true;
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();

  // The server is NOT configured with short-circuit local reads;
  // the client is.  Both support UNIX domain reads.
  Configuration clientConf = createShortCircuitConf(
      "testFallbackFromShortCircuitToUnixDomainTraffic", sockDir);
  clientConf.set(DFS_CLIENT_CONTEXT,
      "testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
  clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
  Configuration serverConf = new Configuration(clientConf);
  serverConf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false);

  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
  cluster.waitActive();
  FileSystem dfs = FileSystem.get(cluster.getURI(0), clientConf);
  String TEST_FILE = "/test_file";
  final int TEST_FILE_LEN = 8193;
  final int SEED = 0xFADED;
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN,
      (short)1, SEED);
  byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
  byte expected[] = DFSTestUtil.
      calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
  Assert.assertTrue(Arrays.equals(contents, expected));
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestBlockReaderFactory.java

示例5: testShortCircuitReadFromClientWithoutShm

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
/**
 * Test that a client which does not support short-circuit reads using
 * shared memory can talk with a server which supports it.
 */
@Test
public void testShortCircuitReadFromClientWithoutShm() throws Exception {
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration clientConf = createShortCircuitConf(
      "testShortCircuitReadWithoutShm", sockDir);
  Configuration serverConf = new Configuration(clientConf);
  DFSInputStream.tcpReadsDisabledForTesting = true;
  final MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
  cluster.waitActive();
  clientConf.setInt(
      DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
  clientConf.set(DFS_CLIENT_CONTEXT,
      "testShortCircuitReadFromClientWithoutShm_clientContext");
  final DistributedFileSystem fs =
      (DistributedFileSystem)FileSystem.get(cluster.getURI(0), clientConf);
  final String TEST_FILE = "/test_file";
  final int TEST_FILE_LEN = 4000;
  final int SEED = 0xFADEC;
  DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
      (short)1, SEED);
  byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
  byte expected[] = DFSTestUtil.
      calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
  Assert.assertTrue(Arrays.equals(contents, expected));
  final ShortCircuitCache cache =
      fs.dfs.getClientContext().getShortCircuitCache();
  Assert.assertEquals(null, cache.getDfsClientShmManager());
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestBlockReaderFactory.java

示例6: testShortCircuitCacheShutdown

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
/**
 * Test shutting down the ShortCircuitCache while there are things in it.
 */
@Test
public void testShortCircuitCacheShutdown() throws Exception {
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testShortCircuitCacheShutdown", sockDir);
  conf.set(DFS_CLIENT_CONTEXT, "testShortCircuitCacheShutdown");
  Configuration serverConf = new Configuration(conf);
  DFSInputStream.tcpReadsDisabledForTesting = true;
  final MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
  cluster.waitActive();
  final DistributedFileSystem fs =
      (DistributedFileSystem)FileSystem.get(cluster.getURI(0), conf);
  final String TEST_FILE = "/test_file";
  final int TEST_FILE_LEN = 4000;
  final int SEED = 0xFADEC;
  DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
      (short)1, SEED);
  byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
  byte expected[] = DFSTestUtil.
      calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
  Assert.assertTrue(Arrays.equals(contents, expected));
  final ShortCircuitCache cache =
      fs.dfs.getClientContext().getShortCircuitCache();
  cache.close();
  Assert.assertTrue(cache.getDfsClientShmManager().
      getDomainSocketWatcher().isClosed());
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestBlockReaderFactory.java

示例7: testBothOldAndNewShortCircuitConfigured

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test
public void testBothOldAndNewShortCircuitConfigured() throws Exception {
  final short REPL_FACTOR = 1;
  final int FILE_LENGTH = 512;
  Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
  TemporarySocketDirectory socketDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = getConfiguration(socketDir);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  socketDir.close();
  FileSystem fs = cluster.getFileSystem();

  Path path = new Path("/foo");
  byte orig[] = new byte[FILE_LENGTH];
  for (int i = 0; i < orig.length; i++) {
    orig[i] = (byte)(i%10);
  }
  FSDataOutputStream fos = fs.create(path, (short)1);
  fos.write(orig);
  fos.close();
  DFSTestUtil.waitReplication(fs, path, REPL_FACTOR);
  FSDataInputStream fis = cluster.getFileSystem().open(path);
  byte buf[] = new byte[FILE_LENGTH];
  IOUtils.readFully(fis, buf, 0, FILE_LENGTH);
  fis.close();
  Assert.assertArrayEquals(orig, buf);
  Arrays.equals(orig, buf);
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestBlockReaderLocalLegacy.java

示例8: testDataXceiverCleansUpSlotsOnFailure

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(
      HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  BlockReaderFactory.setFailureInjectorForTesting(
      new TestCleanupFailureInjector());
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:41,代码来源:TestShortCircuitCache.java

示例9: testPreReceiptVerificationDfsClientCanDoScr

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(
      HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  BlockReaderFactory.setFailureInjectorForTesting(
      new TestPreReceiptVerificationFailureInjector());
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:TestShortCircuitCache.java

示例10: testFallbackFromShortCircuitToUnixDomainTraffic

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
/**
 * If we have a UNIX domain socket configured,
 * and we have dfs.client.domain.socket.data.traffic set to true,
 * and short-circuit access fails, we should still be able to pass
 * data traffic over the UNIX domain socket.  Test this.
 */
@Test(timeout=60000)
public void testFallbackFromShortCircuitToUnixDomainTraffic()
    throws Exception {
  DFSInputStream.tcpReadsDisabledForTesting = true;
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();

  // The server is NOT configured with short-circuit local reads;
  // the client is.  Both support UNIX domain reads.
  Configuration clientConf = createShortCircuitConf(
      "testFallbackFromShortCircuitToUnixDomainTraffic", sockDir);
  clientConf.set(DFS_CLIENT_CONTEXT,
      "testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
  clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
  Configuration serverConf = new Configuration(clientConf);
  serverConf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);

  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
  cluster.waitActive();
  FileSystem dfs = FileSystem.get(cluster.getURI(0), clientConf);
  String TEST_FILE = "/test_file";
  final int TEST_FILE_LEN = 8193;
  final int SEED = 0xFADED;
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN,
      (short)1, SEED);
  byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
  byte expected[] = DFSTestUtil.
      calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
  Assert.assertTrue(Arrays.equals(contents, expected));
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:TestBlockReaderFactory.java

示例11: testCreateAndDestroySpanReceiver

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test
public void testCreateAndDestroySpanReceiver() throws Exception {
  Configuration conf = new Configuration();
  conf = new Configuration();
  conf.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY, "");
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  TemporarySocketDirectory tempDir = new TemporarySocketDirectory();
  String tracePath =
      new File(tempDir.getDir(), "tracefile").getAbsolutePath();
  try {
    TraceAdmin trace = new TraceAdmin();
    trace.setConf(conf);
    Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
        runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
    Assert.assertEquals("ret:0, Added trace span receiver 1 with " +
        "configuration local-file-span-receiver.path = " + tracePath + NEWLINE,
        runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
            "-class", "org.htrace.impl.LocalFileSpanReceiver",
            "-Clocal-file-span-receiver.path=" + tracePath));
    String list =
        runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster));
    Assert.assertTrue(list.startsWith("ret:0"));
    Assert.assertTrue(list.contains("1   org.htrace.impl.LocalFileSpanReceiver"));
    Assert.assertEquals("ret:0, Removed trace span receiver 1" + NEWLINE,
        runTraceCommand(trace, "-remove", "1", "-host",
            getHostPortForNN(cluster)));
    Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
        runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
  } finally {
    cluster.shutdown();
    tempDir.close();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:TestTraceAdmin.java

示例12: testDataXceiverHandlesRequestShortCircuitShmFailure

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
    throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
      (short)1, 0xFADE1);
  LOG.info("Setting failure injector and performing a read which " +
      "should fail...");
  DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      throw new IOException("injected error into sendShmResponse");
    }
  }).when(failureInjector).sendShortCircuitShmResponse();
  DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
  DataNodeFaultInjector.instance = failureInjector;

  try {
    // The first read will try to allocate a shared memory segment and slot.
    // The shared memory segment allocation will fail because of the failure
    // injector.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    Assert.fail("expected readFileBuffer to fail, but it succeeded.");
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }

  checkNumberOfSegmentsAndSlots(0, 0,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  LOG.info("Clearing failure injector and performing another read...");
  DataNodeFaultInjector.instance = prevInjector;

  fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();

  // The second read should succeed.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // We should have added a new short-circuit shared memory segment and slot.
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestShortCircuitCache.java

示例13: testMultipleWaitersOnShortCircuitCache

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
/**
 * Test the case where we have multiple threads waiting on the
 * ShortCircuitCache delivering a certain ShortCircuitReplica.
 *
 * In this case, there should only be one call to
 * createShortCircuitReplicaInfo.  This one replica should be shared
 * by all threads.
 */
@Test(timeout=60000)
public void testMultipleWaitersOnShortCircuitCache()
    throws Exception {
  final CountDownLatch latch = new CountDownLatch(1);
  final AtomicBoolean creationIsBlocked = new AtomicBoolean(true);
  final AtomicBoolean testFailed = new AtomicBoolean(false);
  DFSInputStream.tcpReadsDisabledForTesting = true;
  BlockReaderFactory.createShortCircuitReplicaInfoCallback =
    new ShortCircuitCache.ShortCircuitReplicaCreator() {
      @Override
      public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
        Uninterruptibles.awaitUninterruptibly(latch);
        if (!creationIsBlocked.compareAndSet(true, false)) {
          Assert.fail("there were multiple calls to "
              + "createShortCircuitReplicaInfo.  Only one was expected.");
        }
        return null;
      }
    };
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testMultipleWaitersOnShortCircuitCache", sockDir);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  final DistributedFileSystem dfs = cluster.getFileSystem();
  final String TEST_FILE = "/test_file";
  final int TEST_FILE_LEN = 4000;
  final int SEED = 0xFADED;
  final int NUM_THREADS = 10;
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN,
      (short)1, SEED);
  Runnable readerRunnable = new Runnable() {
    @Override
    public void run() {
      try {
        byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
        Assert.assertFalse(creationIsBlocked.get());
        byte expected[] = DFSTestUtil.
            calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
        Assert.assertTrue(Arrays.equals(contents, expected));
      } catch (Throwable e) {
        LOG.error("readerRunnable error", e);
        testFailed.set(true);
      }
    }
  };
  Thread threads[] = new Thread[NUM_THREADS];
  for (int i = 0; i < NUM_THREADS; i++) {
    threads[i] = new Thread(readerRunnable);
    threads[i].start();
  }
  Thread.sleep(500);
  latch.countDown();
  for (int i = 0; i < NUM_THREADS; i++) {
    Uninterruptibles.joinUninterruptibly(threads[i]);
  }
  cluster.shutdown();
  sockDir.close();
  Assert.assertFalse(testFailed.get());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:TestBlockReaderFactory.java

示例14: testShortCircuitReadFromServerWithoutShm

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
/**
 * Test that a client which supports short-circuit reads using
 * shared memory can fall back to not using shared memory when
 * the server doesn't support it.
 */
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration clientConf = createShortCircuitConf(
      "testShortCircuitReadFromServerWithoutShm", sockDir);
  Configuration serverConf = new Configuration(clientConf);
  serverConf.setInt(
      DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
  DFSInputStream.tcpReadsDisabledForTesting = true;
  final MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
  cluster.waitActive();
  clientConf.set(DFS_CLIENT_CONTEXT,
      "testShortCircuitReadFromServerWithoutShm_clientContext");
  final DistributedFileSystem fs =
      (DistributedFileSystem)FileSystem.get(cluster.getURI(0), clientConf);
  final String TEST_FILE = "/test_file";
  final int TEST_FILE_LEN = 4000;
  final int SEED = 0xFADEC;
  DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN,
      (short)1, SEED);
  byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
  byte expected[] = DFSTestUtil.
      calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
  Assert.assertTrue(Arrays.equals(contents, expected));
  final ShortCircuitCache cache =
      fs.dfs.getClientContext().getShortCircuitCache();
  final DatanodeInfo datanode =
      new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
  cache.getDfsClientShmManager().visit(new Visitor() {
    @Override
    public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
        throws IOException {
      Assert.assertEquals(1,  info.size());
      PerDatanodeVisitorInfo vinfo = info.get(datanode);
      Assert.assertTrue(vinfo.disabled);
      Assert.assertEquals(0, vinfo.full.size());
      Assert.assertEquals(0, vinfo.notFull.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestBlockReaderFactory.java

示例15: testCreateAndDestroySpanReceiver

import org.apache.hadoop.net.unix.TemporarySocketDirectory; //导入方法依赖的package包/类
@Test
public void testCreateAndDestroySpanReceiver() throws Exception {
  Configuration conf = new Configuration();
  conf = new Configuration();
  conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
      Tracer.SPAN_RECEIVER_CLASSES_KEY, "");
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  TemporarySocketDirectory tempDir = new TemporarySocketDirectory();
  String tracePath =
      new File(tempDir.getDir(), "tracefile").getAbsolutePath();
  try {
    TraceAdmin trace = new TraceAdmin();
    trace.setConf(conf);
    Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
        runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
    Assert.assertEquals("ret:0, Added trace span receiver 1 with " +
        "configuration hadoop.htrace.local.file.span.receiver.path = " + tracePath + NEWLINE,
        runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
            "-class", "org.apache.htrace.core.LocalFileSpanReceiver",
            "-Chadoop.htrace.local.file.span.receiver.path=" + tracePath));
    String list =
        runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster));
    Assert.assertTrue(list.startsWith("ret:0"));
    Assert.assertTrue(list.contains("1   org.apache.htrace.core.LocalFileSpanReceiver"));
    Assert.assertEquals("ret:0, Removed trace span receiver 1" + NEWLINE,
        runTraceCommand(trace, "-remove", "1", "-host",
            getHostPortForNN(cluster)));
    Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
        runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
    Assert.assertEquals("ret:0, Added trace span receiver 2 with " +
        "configuration hadoop.htrace.local.file.span.receiver.path = " + tracePath + NEWLINE,
        runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
            "-class", "LocalFileSpanReceiver",
            "-Chadoop.htrace.local.file.span.receiver.path=" + tracePath));
    Assert.assertEquals("ret:0, Removed trace span receiver 2" + NEWLINE,
        runTraceCommand(trace, "-remove", "2", "-host",
            getHostPortForNN(cluster)));
  } finally {
    cluster.shutdown();
    tempDir.close();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:45,代码来源:TestTraceAdmin.java


注:本文中的org.apache.hadoop.net.unix.TemporarySocketDirectory.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。