本文整理汇总了Java中com.google.common.base.Stopwatch.start方法的典型用法代码示例。如果您正苦于以下问题:Java Stopwatch.start方法的具体用法?Java Stopwatch.start怎么用?Java Stopwatch.start使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.google.common.base.Stopwatch
的用法示例。
在下文中一共展示了Stopwatch.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: pushConfigWithConflictingVersionRetries
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
private synchronized boolean pushConfigWithConflictingVersionRetries(final ConfigSnapshotHolder configSnapshotHolder) throws ConfigSnapshotFailureException {
ConflictingVersionException lastException;
Stopwatch stopwatch = Stopwatch.createUnstarted();
do {
//TODO wait untill all expected modules are in yangStoreService, do we even need to with yangStoreService instead on netconfOperationService?
String idForReporting = configSnapshotHolder.toString();
SortedSet<String> expectedCapabilities = checkNotNull(configSnapshotHolder.getCapabilities(),
"Expected capabilities must not be null - %s, check %s", idForReporting,
configSnapshotHolder.getClass().getName());
// wait max time for required capabilities to appear
waitForCapabilities(expectedCapabilities, idForReporting);
try {
if(!stopwatch.isRunning()) {
stopwatch.start();
}
return pushConfig(configSnapshotHolder);
} catch (final ConflictingVersionException e) {
lastException = e;
LOG.info("Conflicting version detected, will retry after timeout");
sleep();
}
} while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < conflictingVersionTimeoutMillis);
throw new IllegalStateException("Max wait for conflicting version stabilization timeout after " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms",
lastException);
}
示例2: getMappings
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
/**
* Does the work of creating the mappings for this AssignmentCreator
* @return the minor fragment id to work units mapping
*/
private ListMultimap<Integer, T> getMappings() {
Stopwatch watch = new Stopwatch();
watch.start();
maxWork = (int) Math.ceil(units.size() / ((float) incomingEndpoints.size()));
LinkedList<WorkEndpointListPair<T>> workList = getWorkList();
LinkedList<WorkEndpointListPair<T>> unassignedWorkList;
Map<DrillbitEndpoint,FragIteratorWrapper> endpointIterators = getEndpointIterators();
unassignedWorkList = assign(workList, endpointIterators, true);
assignLeftovers(unassignedWorkList, endpointIterators, true);
assignLeftovers(unassignedWorkList, endpointIterators, false);
if (unassignedWorkList.size() != 0) {
throw new DrillRuntimeException("There are still unassigned work units");
}
logger.debug("Took {} ms to assign {} work units to {} fragments", watch.elapsed(TimeUnit.MILLISECONDS), units.size(), incomingEndpoints.size());
return mappings;
}
示例3: ignoredTimedMult
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
public void ignoredTimedMult() {
Stopwatch watch = Stopwatch.createUnstarted();
DenseMatrix dense = new DenseMatrix(1000, 1000);
int[][] nz = Utilities.getRowPattern(dense.numRows(),
dense.numColumns(), 100);
Utilities.rowPopulate(dense, nz);
log.info("created matrices");
Matrix sparse = new LinkedSparseMatrix(dense.numRows(),
dense.numColumns());
sparse.set(dense);
for (Matrix m : Lists.newArrayList(dense, sparse)) {
log.info("starting " + m.getClass());
Matrix t = new DenseMatrix(m);
t.transpose();
Matrix o = new DenseMatrix(dense.numRows(), dense.numColumns());
log.info("warming up " + m.getClass() + " " + o.getClass());
for (int i = 0; i < 10; i++)
m.mult(t, o);
log.info("starting " + m.getClass() + " " + o.getClass());
watch.start();
for (int i = 0; i < 100; i++)
m.mult(t, o);
watch.stop();
log.info(m.getClass() + " " + o.getClass() + " " + watch);
}
}
示例4: seriesTimeThen
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
/**
* @param uniqueKey unique key
* @param r the task
* @param then (this time, total time) -> {...}
*/
public static void seriesTimeThen(Object uniqueKey, Runnable r, BiConsumer<Long, Long> then) {
Stopwatch total = CacheUtil.cache(TimingUtil.class, uniqueKey, () -> Stopwatch.createUnstarted());
Stopwatch temp = getShareStopwatch();
temp.reset();
temp.start();
total.start();
r.run();
temp.stop();
total.stop();
if (then != null) {
then.accept(temp.elapsed(TimeUnit.MILLISECONDS), total.elapsed(TimeUnit.MILLISECONDS));
}
}
示例5: getExec
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
/**
* Create and return fragment RootExec for given FragmentRoot. RootExec has one or more RecordBatches as children
* (which may contain child RecordBatches and so on).
*
* @param context
* FragmentContext.
* @param root
* FragmentRoot.
* @return RootExec of fragment.
* @throws ExecutionSetupException
*/
public static RootExec getExec(FragmentContext context, FragmentRoot root) throws ExecutionSetupException {
Preconditions.checkNotNull(root);
Preconditions.checkNotNull(context);
if (AssertionUtil.isAssertionsEnabled()) {
root = IteratorValidatorInjector.rewritePlanWithIteratorValidator(context, root);
}
final ImplCreator creator = new ImplCreator();
Stopwatch watch = new Stopwatch();
watch.start();
try {
final RootExec rootExec = creator.getRootExec(root, context);
// skip over this for SimpleRootExec (testing)
if (rootExec instanceof BaseRootExec) {
((BaseRootExec) rootExec).setOperators(creator.getOperators());
}
logger.debug("Took {} ms to create RecordBatch tree", watch.elapsed(TimeUnit.MILLISECONDS));
if (rootExec == null) {
throw new ExecutionSetupException(
"The provided fragment did not have a root node that correctly created a RootExec value.");
}
return rootExec;
} catch(Exception e) {
context.fail(e);
for(final CloseableRecordBatch crb : creator.getOperators()) {
AutoCloseables.close(crb, logger);
}
}
return null;
}
示例6: add
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
@Override
public void add(FragmentContext context, RecordBatchData batch) throws SchemaChangeException{
Stopwatch watch = new Stopwatch();
watch.start();
if (hyperBatch == null) {
hyperBatch = new ExpandableHyperContainer(batch.getContainer());
} else {
hyperBatch.addBatch(batch.getContainer());
}
doSetup(context, hyperBatch, null); // may not need to do this every time
int count = 0;
SelectionVector2 sv2 = null;
if (hasSv2) {
sv2 = batch.getSv2();
}
for (; queueSize < limit && count < batch.getRecordCount(); count++) {
heapSv4.set(queueSize, batchCount, hasSv2 ? sv2.getIndex(count) : count);
queueSize++;
siftUp();
}
for (; count < batch.getRecordCount(); count++) {
heapSv4.set(limit, batchCount, hasSv2 ? sv2.getIndex(count) : count);
if (compare(limit, 0) < 0) {
swap(limit, 0);
siftDown();
}
}
batchCount++;
if (hasSv2) {
sv2.clear();
}
logger.debug("Took {} us to add {} records", watch.elapsed(TimeUnit.MICROSECONDS), count);
}
示例7: testHdfsStreaming
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
protected void testHdfsStreaming(Path filename) throws IOException {
byte[] buf = new byte[1024];
FileSystem fs = filename.getFileSystem(getConf());
// read the file from start to finish
Stopwatch fileOpenTimer = new Stopwatch();
Stopwatch streamTimer = new Stopwatch();
fileOpenTimer.start();
FSDataInputStream in = fs.open(filename);
fileOpenTimer.stop();
long totalBytes = 0;
streamTimer.start();
while (true) {
int read = in.read(buf);
if (read < 0) {
break;
}
totalBytes += read;
}
streamTimer.stop();
double throughput = (double)totalBytes / streamTimer.elapsedTime(TimeUnit.SECONDS);
System.out.println("HDFS streaming: ");
System.out.println("total time to open: " + fileOpenTimer.elapsedMillis() + " ms");
System.out.println("total time to read: " + streamTimer.elapsedMillis() + " ms");
System.out.println("total bytes: " + totalBytes + " bytes ("
+ StringUtils.humanReadableInt(totalBytes) + ")");
System.out.println("throghput : " + StringUtils.humanReadableInt((long)throughput) + "B/s");
}
示例8: sort
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
@Override
public void sort(SelectionVector4 vector4, VectorContainer container){
Stopwatch watch = new Stopwatch();
watch.start();
QuickSort qs = new QuickSort();
qs.sort(this, 0, vector4.getTotalCount());
logger.debug("Took {} us to sort {} records", watch.elapsed(TimeUnit.MICROSECONDS), vector4.getTotalCount());
}
示例9: sort
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
@Override
public void sort(SelectionVector2 vector2){
QuickSort qs = new QuickSort();
Stopwatch watch = new Stopwatch();
watch.start();
if (vector2.getCount() > 0) {
qs.sort(this, 0, vector2.getCount());
}
logger.debug("Took {} us to sort {} records", watch.elapsed(TimeUnit.MICROSECONDS), vector2.getCount());
}
示例10: buildEndpointMap
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
/**
* Builds a mapping of Drillbit endpoints to hostnames
*/
private static ImmutableMap<String, DrillbitEndpoint> buildEndpointMap(Collection<DrillbitEndpoint> endpoints) {
Stopwatch watch = new Stopwatch();
watch.start();
HashMap<String, DrillbitEndpoint> endpointMap = Maps.newHashMap();
for (DrillbitEndpoint d : endpoints) {
String hostName = d.getAddress();
endpointMap.put(hostName, d);
}
watch.stop();
logger.debug("Took {} ms to build endpoint map", watch.elapsed(TimeUnit.MILLISECONDS));
return ImmutableMap.copyOf(endpointMap);
}
示例11: getParquetTableMetadata
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
/**
* Get the parquet metadata for the parquet files in a directory
* @param path the path of the directory
* @return
* @throws IOException
*/
private ParquetTableMetadata_v1 getParquetTableMetadata(String path) throws IOException {
Path p = new Path(path);
FileStatus fileStatus = fs.getFileStatus(p);
Stopwatch watch = new Stopwatch();
watch.start();
List<FileStatus> fileStatuses = getFileStatuses(fileStatus);
logger.info("Took {} ms to get file statuses", watch.elapsed(TimeUnit.MILLISECONDS));
return getParquetTableMetadata(fileStatuses);
}
示例12: testParseParquetPhysicalPlanRemote
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
@Test
@Ignore
public void testParseParquetPhysicalPlanRemote() throws Exception {
DrillConfig config = DrillConfig.create();
try(DrillClient client = new DrillClient(config);) {
client.connect();
ParquetResultsListener listener = new ParquetResultsListener();
Stopwatch watch = new Stopwatch();
watch.start();
client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Resources.toString(Resources.getResource(fileName),Charsets.UTF_8), listener);
System.out.println(String.format("Got %d total records in %d seconds", listener.await(), watch.elapsed(TimeUnit.SECONDS)));
client.close();
}
}
示例13: readPage
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
public void readPage(PageHeader pageHeader, int compressedSize, int uncompressedSize, ArrowBuf dest) throws IOException {
Stopwatch timer = Stopwatch.createUnstarted();
long timeToRead;
long start = inputStream.getPos();
if (parentColumnReader.columnChunkMetaData.getCodec() == CompressionCodecName.UNCOMPRESSED) {
timer.start();
dataReader.loadPage(dest, compressedSize);
timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
this.updateStats(pageHeader, "Page Read", start, timeToRead, compressedSize, uncompressedSize);
} else {
final ArrowBuf compressedData = allocateTemporaryBuffer(compressedSize);
try {
timer.start();
dataReader.loadPage(compressedData, compressedSize);
timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
timer.reset();
this.updateStats(pageHeader, "Page Read", start, timeToRead, compressedSize, compressedSize);
start = inputStream.getPos();
timer.start();
codecFactory.getDecompressor(parentColumnReader.columnChunkMetaData
.getCodec()).decompress(compressedData.nioBuffer(0, compressedSize), compressedSize,
dest.nioBuffer(0, uncompressedSize), uncompressedSize);
timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
this.updateStats(pageHeader, "Decompress", start, timeToRead, compressedSize, uncompressedSize);
} finally {
compressedData.release();
}
}
}
示例14: submitQuery
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
public int submitQuery(DrillClient client, String plan, String type, String format, int width) throws Exception {
PrintingResultsListener listener;
String[] queries;
QueryType queryType;
type = type.toLowerCase();
switch (type) {
case "sql":
queryType = QueryType.SQL;
queries = plan.trim().split(";");
break;
case "logical":
queryType = QueryType.LOGICAL;
queries = new String[]{ plan };
break;
case "physical":
queryType = QueryType.PHYSICAL;
queries = new String[]{ plan };
break;
default:
System.out.println("Invalid query type: " + type);
return -1;
}
Format outputFormat;
format = format.toLowerCase();
switch (format) {
case "csv":
outputFormat = Format.CSV;
break;
case "tsv":
outputFormat = Format.TSV;
break;
case "table":
outputFormat = Format.TABLE;
break;
default:
System.out.println("Invalid format type: " + format);
return -1;
}
Stopwatch watch = new Stopwatch();
for (String query : queries) {
listener = new PrintingResultsListener(client.getConfig(), outputFormat, width);
watch.start();
client.runQuery(queryType, query, listener);
int rows = listener.await();
System.out.println(String.format("%d record%s selected (%f seconds)", rows, rows > 1 ? "s" : "", (float) watch.elapsed(TimeUnit.MILLISECONDS) / (float) 1000));
if (query != queries[queries.length - 1]) {
System.out.println();
}
watch.stop();
watch.reset();
}
return 0;
}
示例15: testPerformance
import com.google.common.base.Stopwatch; //导入方法依赖的package包/类
@Test
@Ignore
public void testPerformance(@Injectable final DrillbitContext bitContext,
@Injectable UserServer.UserClientConnection connection) throws Exception {
final DrillConfig c = DrillConfig.create();
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContext context = new FragmentContext(bitContext, BitControl.PlanFragment.getDefaultInstance(), connection, registry);
// new NonStrictExpectations() {
// {
// context.getAllocator(); result = BufferAllocator.getAllocator(DrillConfig.create());
// }
// };
final String fileName = "/tmp/parquet_test_performance.parquet";
final HashMap<String, FieldInfo> fields = new HashMap<>();
final ParquetTestProperties props = new ParquetTestProperties(1, 20 * 1000 * 1000, DEFAULT_BYTES_PER_PAGE, fields);
populateFieldInfoMap(props);
//generateParquetFile(fileName, props);
final Configuration dfsConfig = new Configuration();
final List<Footer> footers = ParquetFileReader.readFooters(dfsConfig, new Path(fileName));
final Footer f = footers.iterator().next();
final List<SchemaPath> columns = Lists.newArrayList();
columns.add(new SchemaPath("_MAP.integer", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bigInt", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.f", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.d", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.b", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bin", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bin2", ExpressionPosition.UNKNOWN));
int totalRowCount = 0;
final FileSystem fs = new CachedSingleFileSystem(fileName);
final BufferAllocator allocator = RootAllocatorFactory.newRoot(c);
for(int i = 0; i < 25; i++) {
final ParquetRecordReader rr = new ParquetRecordReader(context, 256000, fileName, 0, fs,
new DirectCodecFactory(dfsConfig, allocator), f.getParquetMetadata(), columns);
final TestOutputMutator mutator = new TestOutputMutator(allocator);
rr.setup(null, mutator);
final Stopwatch watch = new Stopwatch();
watch.start();
int rowCount = 0;
while ((rowCount = rr.next()) > 0) {
totalRowCount += rowCount;
}
System.out.println(String.format("Time completed: %s. ", watch.elapsed(TimeUnit.MILLISECONDS)));
rr.close();
}
allocator.close();
System.out.println(String.format("Total row count %s", totalRowCount));
}