本文整理汇总了Java中org.apache.hadoop.hbase.DaemonThreadFactory类的典型用法代码示例。如果您正苦于以下问题:Java DaemonThreadFactory类的具体用法?Java DaemonThreadFactory怎么用?Java DaemonThreadFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DaemonThreadFactory类属于org.apache.hadoop.hbase包,在下文中一共展示了DaemonThreadFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: FlushTableSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
FlushTableSubprocedurePool(String name, Configuration conf, Abortable abortable) {
this.abortable = abortable;
// configure the executor service
long keepAlive = conf.getLong(
RegionServerFlushTableProcedureManager.FLUSH_TIMEOUT_MILLIS_KEY,
RegionServerFlushTableProcedureManager.FLUSH_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_FLUSH_TASKS_KEY, DEFAULT_CONCURRENT_FLUSH_TASKS);
this.name = name;
executor = new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory("rs("
+ name + ")-flush-proc-pool"));
taskPool = new ExecutorCompletionService<Void>(executor);
}
示例2: SnapshotSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
SnapshotSubprocedurePool(String name, Configuration conf, Abortable abortable) {
this.abortable = abortable;
// configure the executor service
long keepAlive = conf.getLong(
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS);
this.name = name;
executor = new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory("rs("
+ name + ")-snapshot-pool"));
taskPool = new ExecutorCompletionService<Void>(executor);
}
示例3: start
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
@Override
public void start() {
this.executor = new ThreadPoolExecutor(
handlerCount,
handlerCount,
60,
TimeUnit.SECONDS,
new ArrayBlockingQueue<Runnable>(maxQueueLength),
new DaemonThreadFactory("FifoRpcScheduler.handler"),
new ThreadPoolExecutor.CallerRunsPolicy());
}
示例4: SimpleSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
public SimpleSubprocedurePool(String name, Configuration conf) {
this.name = name;
executor = new ThreadPoolExecutor(1, 1, 500, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
new DaemonThreadFactory("rs(" + name + ")-procedure-pool"));
taskPool = new ExecutorCompletionService<Void>(executor);
}
示例5: TwoConcurrentActionPolicy
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
public TwoConcurrentActionPolicy(long sleepTime, Action[] actionsOne, Action[] actionsTwo) {
super(sleepTime);
this.actionsOne = actionsOne;
this.actionsTwo = actionsTwo;
executor = Executors.newFixedThreadPool(2,
new DaemonThreadFactory("TwoConcurrentAction-"));
}
示例6: SnapshotSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
SnapshotSubprocedurePool(String name, Configuration conf) {
// configure the executor service
long keepAlive = conf.getLong(
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS);
this.name = name;
executor = new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory("rs("
+ name + ")-snapshot-pool"));
taskPool = new ExecutorCompletionService<Void>(executor);
}
示例7: FlushTableSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
FlushTableSubprocedurePool(String name, Configuration conf) {
// configure the executor service
long keepAlive = conf.getLong(
RegionServerFlushTableProcedureManager.FLUSH_TIMEOUT_MILLIS_KEY,
RegionServerFlushTableProcedureManager.FLUSH_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_FLUSH_TASKS_KEY, DEFAULT_CONCURRENT_FLUSH_TASKS);
this.name = name;
executor = new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory("rs("
+ name + ")-flush-proc-pool"));
taskPool = new ExecutorCompletionService<Void>(executor);
}
示例8: LogRollBackupSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
public LogRollBackupSubprocedurePool(String name, Configuration conf) {
// configure the executor service
long keepAlive =
conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY,
LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS);
this.name = name;
executor =
new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs(" + name
+ ")-backup-pool"));
taskPool = new ExecutorCompletionService<>(executor);
}
示例9: ZKPermissionWatcher
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
public ZKPermissionWatcher(ZKWatcher watcher,
TableAuthManager authManager, Configuration conf) {
super(watcher);
this.authManager = authManager;
String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE);
this.aclZNode = ZNodePaths.joinZNode(watcher.znodePaths.baseZNode, aclZnodeParent);
executor = Executors.newSingleThreadExecutor(
new DaemonThreadFactory("zk-permission-watcher"));
}
示例10: FlushTableSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
FlushTableSubprocedurePool(String name, Configuration conf, Abortable abortable) {
this.abortable = abortable;
// configure the executor service
long keepAlive = conf.getLong(
RegionServerFlushTableProcedureManager.FLUSH_TIMEOUT_MILLIS_KEY,
RegionServerFlushTableProcedureManager.FLUSH_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_FLUSH_TASKS_KEY, DEFAULT_CONCURRENT_FLUSH_TASKS);
this.name = name;
executor = new ThreadPoolExecutor(threads, threads, keepAlive, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs("
+ name + ")-flush-proc-pool"));
executor.allowCoreThreadTimeOut(true);
taskPool = new ExecutorCompletionService<>(executor);
}
示例11: SnapshotSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
SnapshotSubprocedurePool(String name, Configuration conf, Abortable abortable) {
this.abortable = abortable;
// configure the executor service
long keepAlive = conf.getLong(
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS);
this.name = name;
executor = new ThreadPoolExecutor(threads, threads, keepAlive, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(), new DaemonThreadFactory("rs("
+ name + ")-snapshot-pool"));
executor.allowCoreThreadTimeOut(true);
taskPool = new ExecutorCompletionService<>(executor);
}
示例12: start
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
@Override
public void start() {
this.executor = new ThreadPoolExecutor(
handlerCount,
handlerCount,
60,
TimeUnit.SECONDS,
new ArrayBlockingQueue<>(maxQueueLength),
new DaemonThreadFactory("FifoRpcScheduler.handler"),
new ThreadPoolExecutor.CallerRunsPolicy());
}
示例13: SimpleSubprocedurePool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
public SimpleSubprocedurePool(String name, Configuration conf) {
this.name = name;
executor = new ThreadPoolExecutor(1, 1, 500, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(),
new DaemonThreadFactory("rs(" + name + ")-procedure-pool"));
taskPool = new ExecutorCompletionService<>(executor);
}
示例14: defaultPool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
/**
* Default thread pool for the procedure
*/
public static ThreadPoolExecutor defaultPool(String coordName, long keepAliveTime, int opThreads,
long wakeFrequency) {
return new ThreadPoolExecutor(1, opThreads, keepAliveTime, TimeUnit.SECONDS,
new SynchronousQueue<Runnable>(),
new DaemonThreadFactory("(" + coordName + ")-proc-coordinator-pool"));
}
示例15: defaultPool
import org.apache.hadoop.hbase.DaemonThreadFactory; //导入依赖的package包/类
public static ThreadPoolExecutor defaultPool(long wakeFrequency, long keepAlive,
int procThreads, String memberName) {
return new ThreadPoolExecutor(1, procThreads, keepAlive, TimeUnit.SECONDS,
new SynchronousQueue<Runnable>(),
new DaemonThreadFactory("member: '" + memberName + "' subprocedure-pool"));
}