本文整理汇总了Java中org.apache.ignite.internal.util.typedef.internal.U.quietAndWarn方法的典型用法代码示例。如果您正苦于以下问题:Java U.quietAndWarn方法的具体用法?Java U.quietAndWarn怎么用?Java U.quietAndWarn使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.ignite.internal.util.typedef.internal.U
的用法示例。
在下文中一共展示了U.quietAndWarn方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkClientQueueSize
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/**
* Checks client message queue size and initiates client drop if message queue size exceeds the configured limit.
*
* @param ses Node communication session.
* @param msgQueueSize Message queue size.
*/
private void checkClientQueueSize(GridNioSession ses, int msgQueueSize) {
if (slowClientQueueLimit > 0 && msgQueueSize > slowClientQueueLimit) {
ConnectionKey id = ses.meta(CONN_IDX_META);
if (id != null) {
ClusterNode node = getSpiContext().node(id.nodeId);
if (node != null && node.isClient()) {
String msg = "Client node outbound message queue size exceeded slowClientQueueLimit, " +
"the client will be dropped " +
"(consider changing 'slowClientQueueLimit' configuration property) " +
"[srvNode=" + getSpiContext().localNode().id() +
", clientNode=" + node +
", slowClientQueueLimit=" + slowClientQueueLimit + ']';
U.quietAndWarn(log, msg);
getSpiContext().failNode(id.nodeId(), msg);
}
}
}
}
示例2: initCache
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/**
* Init cache.
*/
@SuppressWarnings("unchecked")
void initCache() {
cache = webSesIgnite.cache(cacheName);
binaryCache = webSesIgnite.cache(cacheName);
if (cache == null)
throw new IgniteException("Cache for web sessions is not started (is it configured?): " + cacheName);
CacheConfiguration cacheCfg = cache.getConfiguration(CacheConfiguration.class);
if (cacheCfg.getWriteSynchronizationMode() == FULL_ASYNC)
throw new IgniteException("Cache for web sessions cannot be in FULL_ASYNC mode: " + cacheName);
if (!cacheCfg.isEagerTtl())
throw new IgniteException("Cache for web sessions cannot operate with lazy TTL. " +
"Consider setting eagerTtl to true for cache: " + cacheName);
if (cacheCfg.getCacheMode() == LOCAL)
U.quietAndWarn(webSesIgnite.log(), "Using LOCAL cache for web sessions caching " +
"(this is only OK in test mode): " + cacheName);
if (cacheCfg.getCacheMode() == PARTITIONED && cacheCfg.getAtomicityMode() != ATOMIC)
U.quietAndWarn(webSesIgnite.log(), "Using " + cacheCfg.getAtomicityMode() + " atomicity for web sessions " +
"caching (switch to ATOMIC mode for better performance)");
txEnabled = cacheCfg.getAtomicityMode() == TRANSACTIONAL;
}
示例3: getRegisteredAddresses
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override public synchronized Collection<InetSocketAddress> getRegisteredAddresses() {
if (mcastAddr != null && reqItfs != null) {
Collection<InetSocketAddress> ret;
if (reqItfs.size() > 1)
ret = requestAddresses(reqItfs);
else {
T2<Collection<InetSocketAddress>, Boolean> res = requestAddresses(mcastAddr, F.first(reqItfs));
ret = res.get1();
mcastErr |= res.get2();
}
if (ret.isEmpty()) {
if (mcastErr && firstReq) {
if (getRegisteredAddresses().isEmpty()) {
InetSocketAddress addr = new InetSocketAddress("localhost", TcpDiscoverySpi.DFLT_PORT);
U.quietAndWarn(log, "TcpDiscoveryMulticastIpFinder failed to initialize multicast, " +
"will use default address: " + addr);
registerAddresses(Collections.singleton(addr));
}
else
U.quietAndWarn(log, "TcpDiscoveryMulticastIpFinder failed to initialize multicast, " +
"will use pre-configured addresses.");
}
}
else
registerAddresses(ret);
firstReq = false;
}
return super.getRegisteredAddresses();
}
示例4: onUndeploy0
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/**
* @param ldr Loader.
* @param cacheCtx Cache context.
*/
private void onUndeploy0(final ClassLoader ldr, final GridCacheContext<K, V> cacheCtx) {
GridCacheAdapter<K, V> cache = cacheCtx.cache();
Collection<KeyCacheObject> keys = new ArrayList<>();
addEntries(ldr, keys, cache);
if (cache.isNear())
addEntries(ldr, keys, (((GridNearCacheAdapter)cache).dht()));
if (log.isDebugEnabled())
log.debug("Finished searching keys for undeploy [keysCnt=" + keys.size() + ']');
cache.clearLocally(keys, true);
if (cacheCtx.isNear())
cacheCtx.near().dht().clearLocally(keys, true);
// Examine swap for entries to undeploy.
int swapUndeployCnt = cacheCtx.offheap().onUndeploy(ldr);
if (cacheCtx.userCache() && (!keys.isEmpty() || swapUndeployCnt != 0)) {
U.quietAndWarn(log, "");
U.quietAndWarn(
log,
"Cleared all cache entries for undeployed class loader [cacheName=" + cacheCtx.name() +
", undeployCnt=" + keys.size() + ", swapUndeployCnt=" + swapUndeployCnt +
", clsLdr=" + ldr.getClass().getName() + ']');
U.quietAndWarn(
log,
" ^-- Cache auto-undeployment happens in SHARED deployment mode " +
"(to turn off, switch to CONTINUOUS mode)");
U.quietAndWarn(log, "");
}
// Avoid class caching issues inside classloader.
globalLdr = new CacheClassLoader();
}
示例5: checkPhysicalRam
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/**
* Checks whether physical RAM is not exceeded.
*/
@SuppressWarnings("ConstantConditions")
private void checkPhysicalRam() {
long ram = ctx.discovery().localNode().attribute(ATTR_PHY_RAM);
if (ram != -1) {
String macs = ctx.discovery().localNode().attribute(ATTR_MACS);
long totalHeap = 0;
long totalOffheap = 0;
for (ClusterNode node : ctx.discovery().allNodes()) {
if (macs.equals(node.attribute(ATTR_MACS))) {
long heap = node.metrics().getHeapMemoryMaximum();
Long offheap = node.<Long>attribute(ATTR_OFFHEAP_SIZE);
if (heap != -1)
totalHeap += heap;
if (offheap != null)
totalOffheap += offheap;
}
}
long total = totalHeap + totalOffheap;
if (total < 0)
total = Long.MAX_VALUE;
// 4GB or 20% of available memory is expected to be used by OS and user applications
long safeToUse = ram - Math.max(4L << 30, (long)(ram * 0.2));
if (total > safeToUse) {
U.quietAndWarn(log, "Nodes started on local machine require more than 20% of physical RAM what can " +
"lead to significant slowdown due to swapping (please decrease JVM heap size, data region " +
"size or checkpoint buffer size) [required=" + (total >> 20) + "MB, available=" +
(ram >> 20) + "MB]");
}
}
}
示例6: spiStart
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override public void spiStart(String igniteInstanceName) throws IgniteSpiException {
synchronized (mux) {
spiState = DISCONNECTED;
}
utilityPool = new IgniteThreadPoolExecutor("disco-pool",
spi.ignite().name(),
0,
1,
2000,
new LinkedBlockingQueue<Runnable>());
if (debugMode) {
if (!log.isInfoEnabled())
throw new IgniteSpiException("Info log level should be enabled for TCP discovery to work " +
"in debug mode.");
debugLogQ = new ConcurrentLinkedDeque<>();
U.quietAndWarn(log, "TCP discovery SPI is configured in debug mode.");
}
// Clear addresses collections.
fromAddrs.clear();
noResAddrs.clear();
msgWorker = new RingMessageWorker();
msgWorker.start();
if (tcpSrvr == null)
tcpSrvr = new TcpServer();
spi.initLocalNode(tcpSrvr.port, true);
locNode = spi.locNode;
// Start TCP server thread after local node is initialized.
tcpSrvr.start();
ring.localNode(locNode);
if (spi.ipFinder.isShared())
registerLocalNodeAddress();
else {
if (F.isEmpty(spi.ipFinder.getRegisteredAddresses()))
throw new IgniteSpiException("Non-shared IP finder must have IP addresses specified in " +
"TcpDiscoveryIpFinder.getRegisteredAddresses() configuration property " +
"(specify list of IP addresses in configuration).");
ipFinderHasLocAddr = spi.ipFinderHasLocalAddress();
}
if (spi.getStatisticsPrintFrequency() > 0 && log.isInfoEnabled()) {
statsPrinter = new StatisticsPrinter();
statsPrinter.start();
}
spi.stats.onJoinStarted();
joinTopology();
spi.stats.onJoinFinished();
if (spi.ipFinder.isShared()) {
ipFinderCleaner = new IpFinderCleaner();
ipFinderCleaner.start();
}
spi.printStartInfo();
}
示例7: start0
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override public void start0() throws IgniteCheckedException {
if (!cctx.kernalContext().clientNode()) {
final PdsFolderSettings resolveFolders = cctx.kernalContext().pdsFolderResolver().resolveFolders();
checkWalConfiguration();
walWorkDir = initDirectory(
dsCfg.getWalPath(),
DataStorageConfiguration.DFLT_WAL_PATH,
resolveFolders.folderName(),
"write ahead log work directory"
);
walArchiveDir = initDirectory(
dsCfg.getWalArchivePath(),
DataStorageConfiguration.DFLT_WAL_ARCHIVE_PATH,
resolveFolders.folderName(),
"write ahead log archive directory"
);
serializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVer);
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)cctx.database();
metrics = dbMgr.persistentStoreMetricsImpl();
checkOrPrepareFiles();
IgniteBiTuple<Long, Long> tup = scanMinMaxArchiveIndices();
lastTruncatedArchiveIdx = tup == null ? -1 : tup.get1() - 1;
archiver = new FileArchiver(tup == null ? -1 : tup.get2());
if (dsCfg.isWalCompactionEnabled()) {
compressor = new FileCompressor();
decompressor = new FileDecompressor();
if (!cctx.kernalContext().clientNode())
decompressor.start();
}
if (mode != WALMode.NONE) {
if (log.isInfoEnabled())
log.info("Started write-ahead log manager [mode=" + mode + ']');
}
else
U.quietAndWarn(log, "Started write-ahead log manager in NONE mode, persisted data may be lost in " +
"a case of unexpected node failure. Make sure to deactivate the cluster before shutdown.");
}
}
示例8: start0
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/** {@inheritDoc} */
@Override protected void start0() throws IgniteCheckedException {
if (store instanceof LifecycleAware) {
try {
// Avoid second start() call on store in case when near cache is enabled.
if (cctx.config().isWriteBehindEnabled()) {
if (!cctx.isNear())
((LifecycleAware)store).start();
}
}
catch (Exception e) {
throw new IgniteCheckedException("Failed to start cache store: " + e, e);
}
}
CacheConfiguration cfg = cctx.config();
if (cfgStore != null) {
if (!cfg.isWriteThrough() && !cfg.isReadThrough()) {
U.quietAndWarn(log,
"Persistence store is configured, but both read-through and write-through are disabled. This " +
"configuration makes sense if the store implements loadCache method only. If this is the " +
"case, ignore this warning. Otherwise, fix the configuration for the cache: " + cfg.getName(),
"Persistence store is configured, but both read-through and write-through are disabled " +
"for cache: " + cfg.getName());
}
if (!cfg.isWriteThrough() && cfg.isWriteBehindEnabled()) {
U.quietAndWarn(log,
"To enable write-behind mode for the cache store it's also required to set " +
"CacheConfiguration.setWriteThrough(true) property, otherwise the persistence " +
"store will be never updated. Consider fixing configuration for the cache: " + cfg.getName(),
"Write-behind mode for the cache store also requires CacheConfiguration.setWriteThrough(true) " +
"property. Fix configuration for the cache: " + cfg.getName());
}
if (cctx.group().persistenceEnabled() && (cfg.isWriteThrough() || cfg.isReadThrough()))
U.quietAndWarn(log,
"Both Ignite native persistence and CacheStore are configured for cache '" + cfg.getName() + "'. " +
"This configuration does not guarantee strict consistency between CacheStore and Ignite data " +
"storage upon restarts. Consult documentation for more details.");
}
sesLsnrs = CU.startStoreSessionListeners(cctx.kernalContext(), cfg.getCacheStoreSessionListenerFactories());
if (sesLsnrs == null) {
sesLsnrs = cctx.shared().storeSessionListeners();
globalSesLsnrs = true;
}
}
示例9: doLog
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
@Override public void doLog(IgniteLogger log, String longMsg, String shortMsg, Throwable e, boolean quiet) {
if (quiet)
U.quietAndWarn(log, longMsg, F.isEmpty(shortMsg) ? longMsg : shortMsg);
else
U.warn(log, longMsg, F.isEmpty(shortMsg) ? longMsg : shortMsg);
}
示例10: createHadoopComponent
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/**
* Create Hadoop component.
*
* @return Non-null Hadoop component: workable or no-op.
* @throws IgniteCheckedException If the component is mandatory and cannot be initialized.
*/
private HadoopProcessorAdapter createHadoopComponent() throws IgniteCheckedException {
boolean mandatory = cfg.getHadoopConfiguration() != null;
if (mandatory) {
if (cfg.isPeerClassLoadingEnabled())
throw new IgniteCheckedException("Hadoop module cannot be used with peer class loading enabled " +
"(set IgniteConfiguration.peerClassLoadingEnabled to \"false\").");
HadoopProcessorAdapter res = IgniteComponentType.HADOOP.createIfInClassPath(ctx, true);
res.validateEnvironment();
return res;
}
else {
HadoopProcessorAdapter cmp = null;
if (!ctx.hadoopHelper().isNoOp() && cfg.isPeerClassLoadingEnabled()) {
U.warn(log, "Hadoop module is found in classpath, but will not be started because peer class " +
"loading is enabled (set IgniteConfiguration.peerClassLoadingEnabled to \"false\" if you want " +
"to use Hadoop module).");
}
else {
cmp = IgniteComponentType.HADOOP.createIfInClassPath(ctx, false);
try {
cmp.validateEnvironment();
}
catch (IgniteException | IgniteCheckedException e) {
U.quietAndWarn(log, "Hadoop module will not start due to exception: " + e.getMessage());
cmp = null;
}
}
if (cmp == null)
cmp = IgniteComponentType.HADOOP.create(ctx, true);
return cmp;
}
}
示例11: resolvePageSizeFromPartitionFile
import org.apache.ignite.internal.util.typedef.internal.U; //导入方法依赖的package包/类
/**
* @param partFile Partition file.
*/
private int resolvePageSizeFromPartitionFile(Path partFile) throws IOException, IgniteCheckedException {
try (FileIO fileIO = persistenceCfg.getFileIOFactory().create(partFile.toFile())) {
int minimalHdr = FilePageStore.HEADER_SIZE;
if (fileIO.size() < minimalHdr)
throw new IgniteCheckedException("Partition file is too small: " + partFile);
ByteBuffer hdr = ByteBuffer.allocate(minimalHdr).order(ByteOrder.LITTLE_ENDIAN);
while (hdr.remaining() > 0)
fileIO.read(hdr);
hdr.rewind();
hdr.getLong(); // Read signature.
hdr.getInt(); // Read version.
hdr.get(); // Read type.
int pageSize = hdr.getInt();
if (pageSize == 2048) {
U.quietAndWarn(log, "You are currently using persistent store with 2K pages (DataStorageConfiguration#" +
"pageSize). If you use SSD disk, consider migrating to 4K pages for better IO performance.");
}
return pageSize;
}
}