当前位置: 首页>>代码示例>>Java>>正文


Java ExecutorCompletionService.take方法代码示例

本文整理汇总了Java中java.util.concurrent.ExecutorCompletionService.take方法的典型用法代码示例。如果您正苦于以下问题:Java ExecutorCompletionService.take方法的具体用法?Java ExecutorCompletionService.take怎么用?Java ExecutorCompletionService.take使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在java.util.concurrent.ExecutorCompletionService的用法示例。


在下文中一共展示了ExecutorCompletionService.take方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMultipleClients

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
@Test
public void testMultipleClients() throws Exception {
  ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
  try {
    ExecutorCompletionService<Boolean> ecs =
        new ExecutorCompletionService<Boolean>(exec);
    for (int i = 0; i < NUM_THREADS; ++i)
      ecs.submit(new IdLockTestThread("client_" + i));
    for (int i = 0; i < NUM_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
    }
    idLock.assertMapEmpty();
  } finally {
    exec.shutdown();
    exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestIdLock.java

示例2: testMultipleClients

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
  ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
  try {
    ExecutorCompletionService<Boolean> ecs =
        new ExecutorCompletionService<Boolean>(exec);
    for (int i = 0; i < NUM_THREADS; ++i)
      ecs.submit(new IdLockTestThread("client_" + i));
    for (int i = 0; i < NUM_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
    }
    // make sure the entry pool will be cleared after GC and purge call
    int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
    LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
    assertEquals(0, entryPoolSize);
  } finally {
    exec.shutdown();
    exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestIdReadWriteLock.java

示例3: testMultipleClients

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
@Test
public void testMultipleClients() throws Exception {
  ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
  try {
    ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);
    for (int i = 0; i < NUM_THREADS; ++i)
      ecs.submit(new IdLockTestThread("client_" + i));
    for (int i = 0; i < NUM_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
    }
    idLock.assertMapEmpty();
  } finally {
    exec.shutdown();
    exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:TestIdLock.java

示例4: wait

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
private void wait(ExecutorCompletionService<Void> submitterSvc, int max)
        throws Exception {
  int completed = 0;
  while (completed < max) {
    submitterSvc.take();
    completed++;
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:9,代码来源:TestKafkaChannel.java

示例5: testConcurrentReading

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
@Test
public void testConcurrentReading() throws Exception {
  for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
    Path path =
        new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
    Random rand = defaultRandom();
    List<Long> offsets = new ArrayList<Long>();
    List<BlockType> types = new ArrayList<BlockType>();
    writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
    FSDataInputStream is = fs.open(path);
    long fileSize = fs.getFileStatus(path).getLen();
    HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, compressAlgo,
        fileSize);

    Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
    ExecutorCompletionService<Boolean> ecs =
        new ExecutorCompletionService<Boolean>(exec);

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr,
          offsets, types, fileSize));
    }

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
      if (detailedLogging) {
        LOG.info(String.valueOf(i + 1)
          + " reader threads finished successfully (algo=" + compressAlgo
          + ")");
      }
    }

    is.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:37,代码来源:TestHFileBlock.java

示例6: testMultipleClients

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
  ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
  try {
    ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);
    for (int i = 0; i < NUM_THREADS; ++i)
      ecs.submit(new IdLockTestThread("client_" + i));
    for (int i = 0; i < NUM_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
    }
    int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
    LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
    ReferenceType refType = idLock.getReferenceType();
    switch (refType) {
    case WEAK:
      // make sure the entry pool will be cleared after GC and purge call
      assertEquals(0, entryPoolSize);
      break;
    case SOFT:
      // make sure the entry pool won't be cleared when JVM memory is enough
      // even after GC and purge call
      assertEquals(NUM_IDS, entryPoolSize);
      break;
    default:
      break;
    }
  } finally {
    exec.shutdown();
    exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:33,代码来源:TestIdReadWriteLock.java

示例7: testConcurrentReadingInternals

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
protected void testConcurrentReadingInternals() throws IOException,
    InterruptedException, ExecutionException {
  for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
    Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
    Random rand = defaultRandom();
    List<Long> offsets = new ArrayList<>();
    List<BlockType> types = new ArrayList<>();
    writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
    FSDataInputStream is = fs.open(path);
    long fileSize = fs.getFileStatus(path).getLen();
    HFileContext meta = new HFileContextBuilder()
                        .withHBaseCheckSum(true)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTag)
                        .withCompression(compressAlgo)
                        .build();
    HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, fileSize, meta);

    Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
    ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr,
          offsets, types, fileSize));
    }

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
      if (detailedLogging) {
        LOG.info(String.valueOf(i + 1)
          + " reader threads finished successfully (algo=" + compressAlgo
          + ")");
      }
    }

    is.close();
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:40,代码来源:TestHFileBlock.java

示例8: testMultipleClients

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
@Test
public void testMultipleClients() throws Exception {
  ExecutorService submitter = Executors.newCachedThreadPool();
  client = RpcClientFactory.getThriftInstance(props);
  Context context = new Context();
  context.put("capacity", "1000");
  context.put("transactionCapacity", "1000");
  channel.configure(context);
  configureSource();
  context.put(ThriftSource.CONFIG_BIND, "0.0.0.0");
  context.put(ThriftSource.CONFIG_PORT, String.valueOf(port));
  Configurables.configure(source, context);
  source.start();
  ExecutorCompletionService<Void> completionService = new ExecutorCompletionService(submitter);
  for (int i = 0; i < 30; i++) {
    completionService.submit(new SubmitHelper(i), null);
  }
  //wait for all threads to be done


  for (int i = 0; i < 30; i++) {
    completionService.take();
  }

  Transaction transaction = channel.getTransaction();
  transaction.begin();
  long after = System.currentTimeMillis();
  List<Integer> events = Lists.newArrayList();
  for (int i = 0; i < 300; i++) {
    Event event = channel.take();
    Assert.assertNotNull(event);
    Assert.assertTrue(Long.valueOf(event.getHeaders().get("time")) < after);
    events.add(Integer.parseInt(new String(event.getBody())));
  }
  transaction.commit();
  transaction.close();

  Collections.sort(events);

  int index = 0;
  //30 batches of 10
  for (int i = 0; i < 30; i++) {
    for (int j = 0; j < 10; j++) {
      Assert.assertEquals(i, events.get(index++).intValue());
    }
  }
}
 
开发者ID:moueimei,项目名称:flume-release-1.7.0,代码行数:48,代码来源:TestThriftSource.java

示例9: testBulkRenameAndDelete

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
@Test
public void testBulkRenameAndDelete() throws Throwable {
  final Path scaleTestDir = getTestPath();
  final Path srcDir = new Path(scaleTestDir, "src");
  final Path finalDir = new Path(scaleTestDir, "final");
  final long count = getOperationCount();
  ContractTestUtils.rm(fs, scaleTestDir, true, false);

  fs.mkdirs(srcDir);
  fs.mkdirs(finalDir);

  int testBufferSize = fs.getConf()
      .getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE,
          ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
  // use Executor to speed up file creation
  ExecutorService exec = Executors.newFixedThreadPool(16);
  final ExecutorCompletionService<Boolean> completionService =
      new ExecutorCompletionService<Boolean>(exec);
  try {
    final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');

    for (int i = 0; i < count; ++i) {
      final String fileName = "foo-" + i;
      completionService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws IOException {
          ContractTestUtils.createFile(fs, new Path(srcDir, fileName),
              false, data);
          return fs.exists(new Path(srcDir, fileName));
        }
      });
    }
    for (int i = 0; i < count; ++i) {
      final Future<Boolean> future = completionService.take();
      try {
        if (!future.get()) {
          LOG.warn("cannot create file");
        }
      } catch (ExecutionException e) {
        LOG.warn("Error while uploading file", e.getCause());
        throw e;
      }
    }
  } finally {
    exec.shutdown();
  }

  int nSrcFiles = fs.listStatus(srcDir).length;
  fs.rename(srcDir, finalDir);
  assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + 0));
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + count / 2));
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + (count - 1)));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + 0));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + count/2));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + (count-1)));

  ContractTestUtils.assertDeleted(fs, finalDir, true, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestS3ADeleteManyFiles.java

示例10: testConcurrentReadingInternals

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
protected void testConcurrentReadingInternals() throws IOException,
    InterruptedException, ExecutionException {
  for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
    Path path =
        new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
    Random rand = defaultRandom();
    List<Long> offsets = new ArrayList<Long>();
    List<BlockType> types = new ArrayList<BlockType>();
    writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
    FSDataInputStream is = fs.open(path);
    long fileSize = fs.getFileStatus(path).getLen();
    HFileContext meta = new HFileContextBuilder()
                        .withHBaseCheckSum(true)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTag)
                        .withCompression(compressAlgo)
                        .build();
    HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, fileSize, meta);

    Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
    ExecutorCompletionService<Boolean> ecs =
        new ExecutorCompletionService<Boolean>(exec);

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr,
          offsets, types, fileSize));
    }

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
      if (detailedLogging) {
        LOG.info(String.valueOf(i + 1)
          + " reader threads finished successfully (algo=" + compressAlgo
          + ")");
      }
    }

    is.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:TestHFileBlock.java

示例11: testConcurrentReadingInternals

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
protected void testConcurrentReadingInternals() throws IOException,
    InterruptedException, ExecutionException {
  for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
    Path path =
        new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
    Random rand = defaultRandom();
    List<Long> offsets = new ArrayList<Long>();
    List<BlockType> types = new ArrayList<BlockType>();
    writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
    FSDataInputStream is = fs.open(path);
    long fileSize = fs.getFileStatus(path).getLen();
    HFileContext meta = new HFileContextBuilder()
                        .withHBaseCheckSum(true)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTag)
                        .withCompression(compressAlgo)
                        .build();
    HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, fileSize, meta);

    Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
    ExecutorCompletionService<Boolean> ecs =
        new ExecutorCompletionService<Boolean>(exec);

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr,
          offsets, types, fileSize));
    }

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
      if (detailedLogging) {
        LOG.info(String.valueOf(i + 1)
          + " reader threads finished successfully (algo=" + compressAlgo
          + ")");
      }
    }

    is.close();
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:42,代码来源:TestHFileBlock.java

示例12: testWritesDoubleConcurrency

import java.util.concurrent.ExecutorCompletionService; //导入方法依赖的package包/类
@Test
public void testWritesDoubleConcurrency() throws URISyntaxException, StorageException, IOException,
        InterruptedException {
    String blobName = BlobTestHelper.generateRandomBlobNameWithPrefix("concurrency");
    CloudBlockBlob blockBlob = this.container.getBlockBlobReference(blobName);

    // setup the blob output stream with a concurrency of 5
    BlobRequestOptions options = new BlobRequestOptions();
    options.setConcurrentRequestCount(5);
    BlobOutputStream blobOutputStream = blockBlob.openOutputStream(null, options, null);

    // set up the execution completion service
    ExecutorService threadExecutor = Executors.newFixedThreadPool(5);
    ExecutorCompletionService<Void> completion = new ExecutorCompletionService<Void>(threadExecutor);
    
    int tasks = 10;
    int writes = 10;
    int length = 512;
    
    // submit tasks to write and flush many blocks
    for (int i = 0; i < tasks; i++) {
        completion.submit(new WriteTask(blobOutputStream, length, writes, 4 /*flush period*/));
    }

    // wait for all tasks to complete
    for (int i = 0; i < tasks; i++) {
        completion.take();
    }

    // shut down the thread executor for this method
    threadExecutor.shutdown();

    // check that blocks were committed
    ArrayList<BlockEntry> blocks = blockBlob.downloadBlockList(BlockListingFilter.UNCOMMITTED, null, null, null);
    assertTrue(blocks.size() != 0);
    
    // close the stream and check that the blob is the expected length
    blobOutputStream.close();
    blockBlob.downloadAttributes();
    assertTrue(blockBlob.getProperties().getLength() == length*writes*tasks);
}
 
开发者ID:Azure,项目名称:azure-storage-android,代码行数:42,代码来源:BlobOutputStreamTests.java


注:本文中的java.util.concurrent.ExecutorCompletionService.take方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。