本文整理汇总了Java中org.apache.commons.lang3.concurrent.BasicThreadFactory类的典型用法代码示例。如果您正苦于以下问题:Java BasicThreadFactory类的具体用法?Java BasicThreadFactory怎么用?Java BasicThreadFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BasicThreadFactory类属于org.apache.commons.lang3.concurrent包,在下文中一共展示了BasicThreadFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: TransferManager
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
/**
*
* Very transient
*
* @param timeOutSeconds
* @param numberOfThreads
* @param outputWriter
*/
public TransferManager( Application csapApp, int timeOutSeconds, BufferedWriter outputWriter ) {
this.csapApp = csapApp;
logger.debug( "Number of workers: {}", csapApp.lifeCycleSettings().getNumberWorkerThreads() );
this.timeOutSeconds = timeOutSeconds;
osCommandRunner = new OsCommandRunner( timeOutSeconds, 1, "TransferMgr" );
this.globalWriterForResults = outputWriter;
updateProgress( "\nExecuting distribution using : " + csapApp.lifeCycleSettings().getNumberWorkerThreads() + " threads.\n\n" );
BasicThreadFactory schedFactory = new BasicThreadFactory.Builder()
.namingPattern( "CsapFileTransfer-%d" )
.daemon( true )
.priority( Thread.NORM_PRIORITY )
.build();
fileTransferService = Executors.newFixedThreadPool( csapApp.lifeCycleSettings().getNumberWorkerThreads(), schedFactory );
fileTransferComplete = new ExecutorCompletionService<String>( fileTransferService );
}
示例2: CsapEventClient
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
public CsapEventClient( ) {
BasicThreadFactory eventThreadFactory = new BasicThreadFactory.Builder()
.namingPattern( "CsapEventPost-%d" )
.daemon( true )
.priority( Thread.NORM_PRIORITY + 1 )
.build();
eventPostQueue = new ArrayBlockingQueue<>( MAX_EVENT_BACKLOG );
// Use a single thread to sequence and post
// eventPostPool = Executors.newFixedThreadPool(1, schedFactory, queue);
// really only needs to be 1 - adding the others for lt scenario
eventPostPool = new ThreadPoolExecutor( 1, 1,
30, TimeUnit.SECONDS,
eventPostQueue, eventThreadFactory );
eventPostCompletionService = new ExecutorCompletionService<String>(
eventPostPool );
}
示例3: scheduleCollection
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
protected void scheduleCollection( Runnable collector) {
// Thread commandThread = new Thread( this );
// commandThread.start();
String scheduleName = collector.getClass().getSimpleName() + "_" + collectionIntervalSeconds ;
BasicThreadFactory schedFactory = new BasicThreadFactory.Builder()
.namingPattern( scheduleName +"-%d" )
.daemon( true )
.priority( Thread.NORM_PRIORITY )
.build();
// Single collection thread
scheduledExecutorService = Executors
.newScheduledThreadPool( 1, schedFactory );
int initialSleep = 10 ;
if (this.collectionIntervalSeconds >= 60) {
initialSleep += 30 + rg.nextInt(30) ;
}
scheduledExecutorService
.scheduleAtFixedRate( collector, initialSleep, collectionIntervalSeconds, TimeUnit.SECONDS );
logger.info("Adding Job: {}", scheduleName);
}
示例4: init
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
@PostConstruct
public void init() {
scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new BasicThreadFactory.Builder().namingPattern("SendNodeServerInfo-schedule-pool-%d").daemon(true).build());
scheduledExecutorService.scheduleAtFixedRate(() ->
{
//将负载加载到ZK中
if (!CollectionUtils.isEmpty(dataCenterChannelStore.getAllChannels())) {
dataCenterChannelStore.getAllChannels().stream().forEach(e -> {
log.info("channel id:{}, {}", e.id(), e);
});
}
applicationEventPublisher.publishEvent(
NodeServerInfoEvent.builder()
.name(goPushNodeServerConfig.getName())
.nodeServerInfo(watch())
.build());
// 写入zk 其实不需要发送 NodeInfoReq
nodeSender.send(NodeInfoReq.builder().build());
}
, delay, delay, TimeUnit.MILLISECONDS);
}
示例5: create
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
public GremlinExecutor create() {
final BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("gremlin-executor-default-%d").build();
final AtomicBoolean poolCreatedByBuilder = new AtomicBoolean();
final AtomicBoolean suppliedExecutor = new AtomicBoolean(true);
final AtomicBoolean suppliedScheduledExecutor = new AtomicBoolean(true);
final ExecutorService es = Optional.ofNullable(executorService).orElseGet(() -> {
poolCreatedByBuilder.set(true);
suppliedExecutor.set(false);
return Executors.newScheduledThreadPool(4, threadFactory);
});
executorService = es;
final ScheduledExecutorService ses = Optional.ofNullable(scheduledExecutorService).orElseGet(() -> {
// if the pool is created by the builder and we need another just re-use it, otherwise create
// a new one of those guys
suppliedScheduledExecutor.set(false);
return (poolCreatedByBuilder.get()) ?
(ScheduledExecutorService) es : Executors.newScheduledThreadPool(4, threadFactory);
});
scheduledExecutorService = ses;
return new GremlinExecutor(this, suppliedExecutor.get(), suppliedScheduledExecutor.get());
}
示例6: initDbData
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
/**
* 初始化应用数据
*/
private static void initDbData(final MyTvData data) {
final TvService tvService = new TvServiceImpl();
makeCache(tvService);
// 启动抓取任务
ExecutorService executorService = Executors
.newSingleThreadExecutor(new BasicThreadFactory.Builder()
.namingPattern("Mytv_Crawl_Task_%d").build());
executorService.execute(new Runnable() {
@Override
public void run() {
runCrawlTask(data, tvService);
}
});
executorService.shutdown();
// 启动每天定时任务
logger.info("create everyday crawl task.");
createEverydayCron(data, tvService);
}
示例7: NonAccumulatingGarbageCollectorMetricSet
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
/**
* Constructor sets up the scheduled executor service that runs a background task to
* calculate non-accumulating gauge readings at periodic intervals.
*
* @param garbageCollectorMetricSet a metric set that collects counts and times of garbage collections
* @param interval the time interval over which to calculate non-accumulating gauge readings
* for all the gauges in {@code garbageCollectorMetricSet}
* @param scheduledExecutorService scheduled executor service that runs the task to calculate
* non-accumulating gauge readings at a frequency determined by
* {@code interval}.
*/
public NonAccumulatingGarbageCollectorMetricSet(
GarbageCollectorMetricSet garbageCollectorMetricSet, long interval,
ScheduledExecutorService scheduledExecutorService) {
this.garbageCollectorMetricSet = garbageCollectorMetricSet;
this.interval = interval;
previousValues = new HashMap<String, Long>();
nonAccumulatingValues = new ConcurrentHashMap<String, Long>();
if (scheduledExecutorService == null) {
BasicThreadFactory basicThreadFactory = new BasicThreadFactory.Builder()
.namingPattern("metrics-gc-stats-update-%d")
.daemon(false)
.priority(Thread.NORM_PRIORITY)
.build();
this.scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(basicThreadFactory);
} else {
this.scheduledExecutorService = scheduledExecutorService;
}
scheduleBackgroundCollectionOfNonAccumulatingValues();
}
开发者ID:gburton1,项目名称:metrics-jvm-nonaccumulating,代码行数:31,代码来源:NonAccumulatingGarbageCollectorMetricSet.java
示例8: createConsumerThreadPool
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
@Override
public ExecutorService createConsumerThreadPool(int numberOfThreads, int queueCapacity) {
BasicThreadFactory threadFactory = new BasicThreadFactory.Builder()
.namingPattern("msb-consumer-thread-%d")
.build();
BlockingQueue<Runnable> queue;
if (queueCapacity == QUEUE_SIZE_UNLIMITED) {
queue = new LinkedBlockingQueue<>();
} else {
queue = new ArrayBlockingQueue<>(queueCapacity);
}
return new ThreadPoolExecutor(numberOfThreads, numberOfThreads,
0L, TimeUnit.MILLISECONDS,
queue,
threadFactory);
}
示例9: FrontierContainer
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
public FrontierContainer(Job job, Accumulators accumulators, MandrelClient client) {
super(accumulators, job, client);
context.setDefinition(job);
// Init stores
MetadataStore metadatastore = job.getDefinition().getStores().getMetadataStore().build(context);
metadatastore.init();
MetadataStores.add(job.getId(), metadatastore);
// Init frontier
frontier = job.getDefinition().getFrontier().build(context);
// Revisitor
BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("frontier-" + job.getId() + "-%d").daemon(true)
.priority(Thread.MAX_PRIORITY).build();
executor = Executors.newFixedThreadPool(1, threadFactory);
revisitor = new Revisitor(frontier, metadatastore);
executor.submit(revisitor);
current.set(ContainerStatus.INITIATED);
}
示例10: Fetcher
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
public Fetcher( E environment, Class<I> inputType, Class<U> unitType ) {
this.environment = environment;
handlerCreator = new SourceHandlerCreator<>( inputType, unitType );
this.inputType = inputType;
this.unitType = unitType;
this.optimizer = ThreadCountOptimizer.withDefaultStrategies( environment );
this.sourceWatchdogInterval = ( new DurationParser() ).parse(
environment.getConfiguration().getJSONObject( "fetcher" ).getString( "source-watchdog-interval" ) );
ThreadFactory threads = new BasicThreadFactory.Builder().namingPattern( "FetchThread[initial]" ).build();
fetchPool = new ThreadPoolExecutor(
1, 1, // thread count is set to the real initial value on the first run()
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(),
threads
);
}
示例11: startQueryExpirer
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
private void startQueryExpirer() {
ThreadFactory factory = new BasicThreadFactory.Builder()
.namingPattern("QueryExpirer-%d")
.daemon(true)
.priority(Thread.NORM_PRIORITY)
.build();
queryExpirer = Executors.newSingleThreadScheduledExecutor(factory);
long expiryRunInterval = conf.getLong(QUERY_EXPIRY_INTERVAL_MILLIS, DEFAULT_QUERY_EXPIRY_INTERVAL_MILLIS);
queryExpirer.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
expireQueries();
} catch (Exception e) {
incrCounter(QUERY_EXPIRY_FAILURE_COUNTER);
log.error("Unable to expire queries", e);
}
}
}, expiryRunInterval, expiryRunInterval, TimeUnit.MILLISECONDS);
log.info("Enabled periodic exipry of queries at {} millis interval", expiryRunInterval);
}
示例12: ZkOffsetStorageImpl
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
public ZkOffsetStorageImpl(LogInfoStorage logInfoStorage,
AsyncCuratorFramework asyncCuratorFramework) {
this.logInfoStorage = logInfoStorage;
this.asyncCuratorFramework = asyncCuratorFramework;
offsetThreadPool = Executors.newSingleThreadExecutor(
new BasicThreadFactory.Builder().uncaughtExceptionHandler((t, e) -> logger
.error("Uncaught exception of thread :" + t.getClass().getName(), e))
.build());
}
示例13: HostStatusManager
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
public HostStatusManager(
Application csapApplication,
int numberOfThreads,
ArrayList<String> hostsToQuery ) {
this.csapApp = csapApplication;
csapApp.loadCacheFromDisk( getAlertHistory(), this.getClass().getSimpleName() );
alertThrottleTimer = CsapSimpleCache.builder(
csapApplication.getCsapCoreService().getAlerts().getThrottle().getFrequency(),
CsapSimpleCache.parseTimeUnit(
csapApplication.getCsapCoreService().getAlerts().getThrottle().getTimeUnit(),
TimeUnit.HOURS ),
HostStatusManager.class,
"Global Alert Throttle" );
logger.warn( "Constructed with thread count: {}, connectionTimeout: {} Host Count: {}, \n Hosts: {}, \n Alert: {}",
numberOfThreads, this.connectionTimeoutSeconds, hostsToQuery.size(), hostsToQuery,
csapApplication.getCsapCoreService().getAlerts() );
BasicThreadFactory statusFactory = new BasicThreadFactory.Builder()
.namingPattern( "CsapHostStatus-%d" )
.daemon( true )
.priority( Thread.NORM_PRIORITY )
.build();
hostStatusWorkers = Executors.newFixedThreadPool( numberOfThreads, statusFactory );
hostStatusService = new ExecutorCompletionService<AgentStatus>( hostStatusWorkers );
hostList = new CopyOnWriteArrayList<String>( hostsToQuery );
initialize_refresh_worker() ;
restartHostRefreshTimer( 3 );
}
示例14: initialize_refresh_worker
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
private void initialize_refresh_worker() {
BasicThreadFactory schedFactory = new BasicThreadFactory.Builder()
.namingPattern( "CsapHostJobsScheduler-%d" )
.daemon( true )
.priority( Thread.NORM_PRIORITY )
.build();
hostStatusScheduler = Executors.newScheduledThreadPool( 1, schedFactory );
}
示例15: init
import org.apache.commons.lang3.concurrent.BasicThreadFactory; //导入依赖的package包/类
@PostConstruct
public void init() {
scheduledExecutorService = new ScheduledThreadPoolExecutor(1,
new BasicThreadFactory.Builder().namingPattern("SendDataCenterInfo-schedule-pool-%d").daemon(true).build());
scheduledExecutorService.scheduleAtFixedRate(() -> applicationEventPublisher.publishEvent(DataCenterInfoEvent.builder()
.name(goPushDataCenterConfig.getName())
.dataCenterInfo(watch())
.build()), delay, delay, TimeUnit.MILLISECONDS);
}