本文整理汇总了Java中org.apache.ignite.IgniteLogger.info方法的典型用法代码示例。如果您正苦于以下问题:Java IgniteLogger.info方法的具体用法?Java IgniteLogger.info怎么用?Java IgniteLogger.info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.ignite.IgniteLogger
的用法示例。
在下文中一共展示了IgniteLogger.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testLogInitialize
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/** */
public void testLogInitialize() {
IgniteLogger log = new Log4JLogger().getLogger(GridLog4jNotInitializedTest.class);
System.out.println(log.toString());
assertTrue(log.toString().contains("Log4JLogger"));
assertTrue(log.toString().contains("config=null"));
if (log.isDebugEnabled())
log.debug("This is 'debug' message.");
else
System.out.println("DEBUG level is not enabled.");
if (log.isInfoEnabled())
log.info("This is 'info' message.");
else
System.out.println("INFO level is not enabled.");
log.warning("This is 'warning' message.");
log.error("This is 'error' message.");
}
示例2: setUp
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
super.setUp(cfg);
jcommander(cfg.commandLineArguments(), args, "<ignite-driver>");
if (Ignition.state() != IgniteState.STARTED) {
node = new IgniteNode(args.isClientOnly() && !args.isNearCache());
node.start(cfg);
}
else
// Support for mixed benchmarks mode.
node = new IgniteNode(args.isClientOnly() && !args.isNearCache(), Ignition.ignite());
waitForNodes();
IgniteLogger log = ignite().log();
if (log.isInfoEnabled())
log.info("Benchmark arguments: " + args);
}
示例3: ensureDirectory
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/**
* Checks if the given directory exists and attempts to create one if not.
*
* @param dir Directory to check.
* @param msg Directory name for the messages.
* @param log Optional logger to log a message that the directory has been resolved.
* @throws IgniteCheckedException If directory does not exist and failed to create it, or if a file with
* the same name already exists.
*/
public static void ensureDirectory(Path dir, String msg, IgniteLogger log) throws IgniteCheckedException {
if (!Files.exists(dir)) {
try {
Files.createDirectories(dir);
}
catch (IOException e) {
throw new IgniteCheckedException("Failed to create " + msg + ": " + dir.toAbsolutePath(), e);
}
}
else if (!Files.isDirectory(dir))
throw new IgniteCheckedException("Failed to initialize " + msg +
" (a file with the same name already exists): " + dir.toAbsolutePath());
if (log != null && log.isInfoEnabled())
log.info("Resolved " + msg + ": " + dir.toAbsolutePath());
}
示例4: showAscii
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/**
* @param mtx {@link Matrix} object to show as a plain text.
* @param log {@link IgniteLogger} instance to output the logged matrix.
* @param fmt Format string for matrix rows.
*/
public static void showAscii(Matrix mtx, IgniteLogger log, String fmt) {
String cls = mtx.getClass().getSimpleName();
int rows = mtx.rowSize();
int cols = mtx.columnSize();
log.info(String.format(LOCALE, "%s(%dx%d)", cls, rows, cols));
for (int row = 0; row < rows; row++)
log.info(rowStr(mtx, row, fmt));
}
示例5: doLog
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
@Override public void doLog(IgniteLogger log, String longMsg, String shortMsg, Throwable e, boolean quiet) {
if (quiet)
U.quietAndInfo(log, longMsg);
else {
if (log.isInfoEnabled())
log.info(longMsg);
}
}
示例6: quietAndInfo
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/**
* Prints out the message in quiet and info modes.
*
* @param log Logger.
* @param msg Message to print.
*/
public static void quietAndInfo(IgniteLogger log, String msg) {
if (log.isQuiet())
U.quiet(false, msg);
if (log.isInfoEnabled())
log.info(msg);
}
示例7: run
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override public void run() {
IgniteLogger log = localNode.log();
log.info("Start local rebalancing caches");
for (String cacheName : localNode.cacheNames()) {
IgniteCache<?, ?> cache = localNode.cache(cacheName);
assertNotNull(cache);
boolean finished;
log.info("Start rebalancing cache: " + cacheName + ", size: " + cache.localSize());
do {
IgniteFuture<?> rebalance = cache.rebalance();
log.info("Wait rebalancing cache: " + cacheName + " - " + rebalance);
finished = (Boolean)rebalance.get();
log.info("Rebalancing cache: " + cacheName + " - " + rebalance);
if (finished) {
log.info("Finished rebalancing cache: " + cacheName + ", size: " +
cache.localSize(CachePeekMode.PRIMARY) + cache.localSize(CachePeekMode.BACKUP));
} else
log.info("Rescheduled rebalancing cache: " + cacheName + ", size: " + cache.localSize());
}
while (!finished);
}
log.info("Finished local rebalancing caches");
}
示例8: execute
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/**
* @param log Logger.
*/
private void execute(IgniteLogger log) {
try {
log.info("Started execute.");
// Countdown shared job latch so that the main thread know that all jobs are
// inside the "execute" routine.
jobLatch.countDown();
log.info("After job latch.");
// Await for the main thread to allow jobs to proceed.
latch.await();
log.info("After latch.");
if (awaitMasterLeaveCallback) {
latch0.await();
log.info("After latch0.");
}
else
log.info("Latch 0 skipped.");
}
catch (InterruptedException e) {
// We do not expect any interruptions here, hence this statement.
fail("Unexpected exception: " + e);
}
}
示例9: onMasterLeave
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/**
* @param log Logger.
* @param job Actual job.
*/
private void onMasterLeave(IgniteLogger log, Object job) {
log.info("Callback executed: " + job);
latch0.countDown();
invokeLatch.countDown();
}
示例10: testToString
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/**
* @throws Exception If failed.
*/
public void testToString() throws Exception {
TestClass1 obj = new TestClass1();
IgniteLogger log = log();
log.info(obj.toStringManual());
log.info(obj.toStringAutomatic());
assertEquals (obj.toStringManual(), obj.toStringAutomatic());
}
示例11: HadoopShuffleJob
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/**
* @param locReduceAddr Local reducer address.
* @param log Logger.
* @param job Job.
* @param mem Memory.
* @param totalReducerCnt Amount of reducers in the Job.
* @param locReducers Reducers will work on current node.
* @param locMappersCnt Number of mappers running on the given node.
* @param embedded Whether shuffle is running in embedded mode.
* @throws IgniteCheckedException If error.
*/
public HadoopShuffleJob(T locReduceAddr, IgniteLogger log, HadoopJobEx job, GridUnsafeMemory mem,
int totalReducerCnt, int[] locReducers, int locMappersCnt, boolean embedded) throws IgniteCheckedException {
this.locReduceAddr = locReduceAddr;
this.totalReducerCnt = totalReducerCnt;
this.job = job;
this.mem = mem;
this.log = log.getLogger(HadoopShuffleJob.class);
this.embedded = embedded;
boolean stripeMappers0 = get(job.info(), SHUFFLE_MAPPER_STRIPED_OUTPUT, true);
if (stripeMappers0) {
if (!embedded) {
if (log.isInfoEnabled())
log.info("Striped mapper output is disabled becuase it cannot be used in external mode [jobId=" +
job.id() + ']');
stripeMappers0 = false;
}
}
stripeMappers = stripeMappers0;
msgSize = get(job.info(), SHUFFLE_MSG_SIZE, DFLT_SHUFFLE_MSG_SIZE);
msgGzip = get(job.info(), SHUFFLE_MSG_GZIP, DFLT_SHUFFLE_MSG_GZIP);
locReducersCtx = new AtomicReferenceArray<>(totalReducerCnt);
if (!F.isEmpty(locReducers)) {
for (int rdc : locReducers) {
HadoopTaskInfo taskInfo = new HadoopTaskInfo(HadoopTaskType.REDUCE, job.id(), rdc, 0, null);
locReducersCtx.set(rdc, new LocalTaskContextProxy(taskInfo));
}
}
needPartitioner = totalReducerCnt > 1;
// Size of local map is always equal to total reducer number to allow index-based lookup.
locMaps = new AtomicReferenceArray<>(totalReducerCnt);
// Size of remote map:
// - If there are no local mappers, then we will not send anything, so set to 0;
// - If output is not striped, then match it to total reducer count, the same way as for local maps.
// - If output is striped, then multiply previous value by number of local mappers.
int rmtMapsSize = locMappersCnt == 0 ? 0 : totalReducerCnt;
if (stripeMappers)
rmtMapsSize *= locMappersCnt;
rmtMaps = new AtomicReferenceArray<>(rmtMapsSize);
msgs = new HadoopShuffleMessage[rmtMapsSize];
throttle = get(job.info(), SHUFFLE_JOB_THROTTLE, 0);
}
示例12: setUp
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
super.setUp(cfg);
aff = ignite().affinity(cache().getName());
Collection<ClusterNode> nodes = ignite().cluster().forServers().nodes();
stripesCnt = ignite().cluster().forServers().forRandom().metrics().getTotalCpus();
srvrCnt = nodes.size();
IgniteLogger log = ignite().log();
if (log.isInfoEnabled())
log.info("Servers info [srvrsCnt=" + srvrCnt + ", stripesCnt=" + stripesCnt + ']');
}
示例13: test
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override public boolean test(Map<Object, Object> ctx) throws Exception {
List<Map<Integer, Integer>> putMaps = (List<Map<Integer, Integer>>)ctx.get(PUT_MAPS_KEY);
if (putMaps == null) {
putMaps = new ArrayList<>(PUT_MAPS_CNT);
ctx.put(PUT_MAPS_KEY, putMaps);
}
Map<Integer, Integer> vals;
if (putMaps.size() == PUT_MAPS_CNT)
vals = putMaps.get(nextRandom(PUT_MAPS_CNT));
else {
vals = new TreeMap<>();
ClusterNode node = args.collocated() ? aff.mapKeyToNode(nextRandom(args.range())) : null;
Map<ClusterNode, Integer> stripesMap = null;
if (args.singleStripe())
stripesMap = U.newHashMap(srvrCnt);
for (; vals.size() < args.batch(); ) {
int key = nextRandom(args.range());
if (args.collocated() && !aff.isPrimary(
node,
key))
continue;
if (args.singleStripe()) {
int part = aff.partition(key);
ClusterNode node0 = node != null ? node : aff.mapPartitionToNode(part);
Integer stripe0 = stripesMap.get(node0);
int stripe = part % stripesCnt;
if (stripe0 != null) {
if (stripe0 != stripe)
continue;
}
else
stripesMap.put(
node0,
stripe);
}
vals.put(
key,
key);
}
putMaps.add(vals);
if (putMaps.size() == PUT_MAPS_CNT) {
IgniteLogger log = ignite().log();
if (log.isInfoEnabled())
log.info("Put maps set generated.");
}
}
IgniteCache<Integer, Object> cache = cacheForOperation();
cache.putAll(vals);
return true;
}
示例14: awaitPartitionMapExchange
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/**
* Awaits for partitiona map exchage.
*
* @param ignite Ignite.
* @throws Exception If failed.
*/
@SuppressWarnings("BusyWait")
protected static void awaitPartitionMapExchange(Ignite ignite) throws Exception {
IgniteLogger log = ignite.log();
log.info("Waiting for finishing of a partition exchange on node: " + ignite);
IgniteKernal kernal = (IgniteKernal)ignite;
while (true) {
boolean partitionsExchangeFinished = true;
for (IgniteInternalCache<?, ?> cache : kernal.cachesx(null)) {
log.info("Checking cache: " + cache.name());
GridCacheAdapter<?, ?> c = kernal.internalCache(cache.name());
if (!(c instanceof GridDhtCacheAdapter))
break;
GridDhtCacheAdapter<?, ?> dht = (GridDhtCacheAdapter<?, ?>)c;
GridDhtPartitionFullMap partMap = dht.topology().partitionMap(true);
for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
log.info("Checking node: " + e.getKey());
for (Map.Entry<Integer, GridDhtPartitionState> e1 : e.getValue().entrySet()) {
if (e1.getValue() != GridDhtPartitionState.OWNING) {
log.info("Undesired state [id=" + e1.getKey() + ", state=" + e1.getValue() + ']');
partitionsExchangeFinished = false;
break;
}
}
if (!partitionsExchangeFinished)
break;
}
if (!partitionsExchangeFinished)
break;
}
if (partitionsExchangeFinished)
return;
Thread.sleep(100);
}
}
示例15: dumpStatistics
import org.apache.ignite.IgniteLogger; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override public void dumpStatistics(IgniteLogger log) {
long dataPages = 0;
final boolean dumpBucketsInfo = false;
for (int b = 0; b < BUCKETS; b++) {
long size = bucketsSize[b].longValue();
if (!isReuseBucket(b))
dataPages += size;
if (dumpBucketsInfo) {
Stripe[] stripes = getBucket(b);
boolean empty = true;
if (stripes != null) {
for (Stripe stripe : stripes) {
if (!stripe.empty) {
empty = false;
break;
}
}
}
if (log.isInfoEnabled())
log.info("Bucket [b=" + b +
", size=" + size +
", stripes=" + (stripes != null ? stripes.length : 0) +
", stripesEmpty=" + empty + ']');
}
}
if (dataPages > 0) {
if (log.isInfoEnabled())
log.info("FreeList [name=" + name +
", buckets=" + BUCKETS +
", dataPages=" + dataPages +
", reusePages=" + bucketsSize[REUSE_BUCKET].longValue() + "]");
}
}