本文整理汇总了Java中org.apache.hadoop.hbase.Stoppable类的典型用法代码示例。如果您正苦于以下问题:Java Stoppable类的具体用法?Java Stoppable怎么用?Java Stoppable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Stoppable类属于org.apache.hadoop.hbase包,在下文中一共展示了Stoppable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SplitLogManager
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Its OK to construct this object even when region-servers are not online. It does lookup the
* orphan tasks in coordination engine but it doesn't block waiting for them to be done.
* @param server the server instance
* @param conf the HBase configuration
* @param stopper the stoppable in case anything is wrong
* @param master the master services
* @param serverName the master server name
* @throws IOException
*/
public SplitLogManager(Server server, Configuration conf, Stoppable stopper,
MasterServices master, ServerName serverName) throws IOException {
this.server = server;
this.conf = conf;
this.stopper = stopper;
this.choreService = new ChoreService(serverName.toString() + "_splitLogManager_");
if (server.getCoordinatedStateManager() != null) {
SplitLogManagerCoordination coordination =
((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
.getSplitLogManagerCoordination();
Set<String> failedDeletions = Collections.synchronizedSet(new HashSet<String>());
SplitLogManagerDetails details =
new SplitLogManagerDetails(tasks, master, failedDeletions, serverName);
coordination.setDetails(details);
coordination.init();
// Determine recovery mode
}
this.unassignedTimeout =
conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT);
this.timeoutMonitor =
new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000),
stopper);
choreService.scheduleChore(timeoutMonitor);
}
示例2: startOperation
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Starts the operation if operation with such nonce has not already succeeded. If the
* operation is in progress, waits for it to end and checks whether it has succeeded.
* @param group Nonce group.
* @param nonce Nonce.
* @param stoppable Stoppable that terminates waiting (if any) when the server is stopped.
* @return true if the operation has not already succeeded and can proceed; false otherwise.
*/
public boolean startOperation(long group, long nonce, Stoppable stoppable)
throws InterruptedException {
if (nonce == HConstants.NO_NONCE) return true;
NonceKey nk = new NonceKey(group, nonce);
OperationContext ctx = new OperationContext();
while (true) {
OperationContext oldResult = nonces.putIfAbsent(nk, ctx);
if (oldResult == null) return true;
// Collision with some operation - should be extremely rare.
synchronized (oldResult) {
int oldState = oldResult.getState();
LOG.debug("Conflict detected by nonce: " + nk + ", " + oldResult);
if (oldState != OperationContext.WAIT) {
return oldState == OperationContext.PROCEED; // operation ended
}
oldResult.setHasWait();
oldResult.wait(this.conflictWaitIterationMs); // operation is still active... wait and loop
if (stoppable.isStopped()) {
throw new InterruptedException("Server stopped");
}
}
}
}
示例3: testSavesFilesOnRequest
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
@Test
public void testSavesFilesOnRequest() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, NeverDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// create the directory layout in the directory to clean
Path parent = new Path(testDir, "parent");
Path file = new Path(parent, "someFile");
fs.mkdirs(parent);
// touch a new file
fs.create(file).close();
assertTrue("Test file didn't get created.", fs.exists(file));
// run the chore
chore.chore();
// verify all the files got deleted
assertTrue("File didn't get deleted", fs.exists(file));
assertTrue("Empty directory didn't get deleted", fs.exists(parent));
}
示例4: testStoppedCleanerDoesNotDeleteFiles
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
@Test
public void testStoppedCleanerDoesNotDeleteFiles() throws Exception {
Stoppable stop = new StoppableImplementation();
Configuration conf = UTIL.getConfiguration();
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
String confKey = "hbase.test.cleaner.delegates";
conf.set(confKey, AlwaysDelete.class.getName());
AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
// also create a file in the top level directory
Path topFile = new Path(testDir, "topFile");
fs.create(topFile).close();
assertTrue("Test file didn't get created.", fs.exists(topFile));
// stop the chore
stop.stop("testing stop");
// run the chore
chore.chore();
// test that the file still exists
assertTrue("File got deleted while chore was stopped", fs.exists(topFile));
}
示例5: testStopWaiting
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
@Test
public void testStopWaiting() throws Exception {
final ServerNonceManager nm = createManager();
nm.setConflictWaitIterationMs(1);
Stoppable stoppingStoppable = createStoppable();
Mockito.when(stoppingStoppable.isStopped()).thenAnswer(new Answer<Boolean>() {
AtomicInteger answer = new AtomicInteger(3);
@Override
public Boolean answer(InvocationOnMock invocation) throws Throwable {
return 0 < answer.decrementAndGet();
}
});
nm.startOperation(NO_NONCE, 1, createStoppable());
TestRunnable tr = new TestRunnable(nm, 1, null, stoppingStoppable);
Thread t = tr.start();
waitForThreadToBlockOrExit(t);
// thread must eventually throw
t.join();
tr.propagateError();
}
示例6: getReplicationSource
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Factory method to create a replication source
* @param conf the configuration to use
* @param fs the file system to use
* @param manager the manager to use
* @param stopper the stopper object for this region server
* @param replicating the status of the replication on this cluster
* @param peerId the id of the peer cluster
* @return the created source
* @throws IOException
*/
public ReplicationSourceInterface getReplicationSource(
final Configuration conf,
final FileSystem fs,
final ReplicationSourceManager manager,
final Stoppable stopper,
final AtomicBoolean replicating,
final String peerId) throws IOException {
ReplicationSourceInterface src;
try {
@SuppressWarnings("rawtypes")
Class c = Class.forName(conf.get("replication.replicationsource.implementation",
ReplicationSource.class.getCanonicalName()));
src = (ReplicationSourceInterface) c.newInstance();
} catch (Exception e) {
LOG.warn("Passed replication source implementation throws errors, " +
"defaulting to ReplicationSource", e);
src = new ReplicationSource();
}
src.init(conf, fs, manager, stopper, replicating, peerId);
return src;
}
示例7: SplitLogManager
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Wrapper around {@link #SplitLogManager(ZooKeeperWatcher, Configuration,
* Stoppable, String, TaskFinisher)} that provides a task finisher for
* copying recovered edits to their final destination. The task finisher
* has to be robust because it can be arbitrarily restarted or called
* multiple times.
*
* @param zkw
* @param conf
* @param stopper
* @param serverName
*/
public SplitLogManager(ZooKeeperWatcher zkw, final Configuration conf,
Stoppable stopper, MasterServices master, String serverName) {
this(zkw, conf, stopper, master, serverName, new TaskFinisher() {
@Override
public Status finish(String workerName, String logfile) {
try {
HLogSplitter.finishSplitLogFile(logfile, conf);
} catch (IOException e) {
LOG.warn("Could not finish splitting of log file " + logfile, e);
return Status.ERR;
}
return Status.DONE;
}
});
}
示例8: SplitLogManager
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Its OK to construct this object even when region-servers are not online. It does lookup the
* orphan tasks in coordination engine but it doesn't block waiting for them to be done.
* @param server the server instance
* @param conf the HBase configuration
* @param stopper the stoppable in case anything is wrong
* @param master the master services
* @param serverName the master server name
* @throws IOException
*/
public SplitLogManager(Server server, Configuration conf, Stoppable stopper,
MasterServices master, ServerName serverName) throws IOException {
this.server = server;
this.conf = conf;
this.stopper = stopper;
if (server.getCoordinatedStateManager() != null) {
SplitLogManagerCoordination coordination =
((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
.getSplitLogManagerCoordination();
Set<String> failedDeletions = Collections.synchronizedSet(new HashSet<String>());
SplitLogManagerDetails details =
new SplitLogManagerDetails(tasks, master, failedDeletions, serverName);
coordination.init();
coordination.setDetails(details);
// Determine recovery mode
}
this.unassignedTimeout =
conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT);
this.timeoutMonitor =
new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000),
stopper);
Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), serverName
+ ".splitLogManagerTimeoutMonitor");
}
示例9: createAndStart
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
static MovedRegionsCleaner createAndStart(HRegionServer rs) {
Stoppable stoppable = new Stoppable() {
private volatile boolean isStopped = false;
@Override
public void stop(String why) {
isStopped = true;
}
@Override
public boolean isStopped() {
return isStopped;
}
};
return new MovedRegionsCleaner(rs, stoppable);
}
示例10: createAndStart
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
static DelayedClosing createAndStart(HConnectionImplementation hci) {
Stoppable stoppable = new Stoppable() {
private volatile boolean isStopped = false;
@Override
public void stop(String why) {
isStopped = true;
}
@Override
public boolean isStopped() {
return isStopped;
}
};
return new DelayedClosing(hci, stoppable);
}
示例11: getReplicationSource
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Factory method to create a replication source
* @param conf the configuration to use
* @param fs the file system to use
* @param manager the manager to use
* @param stopper the stopper object for this region server
* @param peerId the id of the peer cluster
* @return the created source
* @throws IOException
*/
protected ReplicationSourceInterface getReplicationSource(final Configuration conf,
final FileSystem fs, final ReplicationSourceManager manager,
final ReplicationQueues replicationQueues, final ReplicationPeers replicationPeers,
final Stoppable stopper, final String peerId, final UUID clusterId) throws IOException {
ReplicationSourceInterface src;
try {
@SuppressWarnings("rawtypes")
Class c = Class.forName(conf.get("replication.replicationsource.implementation",
ReplicationSource.class.getCanonicalName()));
src = (ReplicationSourceInterface) c.newInstance();
} catch (Exception e) {
LOG.warn("Passed replication source implementation throws errors, " +
"defaulting to ReplicationSource", e);
src = new ReplicationSource();
}
src.init(conf, fs, manager, replicationQueues, replicationPeers, stopper, peerId, clusterId);
return src;
}
示例12: RESTServlet
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Constructor with existing configuration
* @param conf existing configuration
* @param realUser the login user
*/
RESTServlet(final Configuration conf,
final UserGroupInformation realUser) {
this.userProvider = UserProvider.instantiate(conf);
stoppable = new Stoppable() {
private volatile boolean isStopped = false;
@Override public void stop(String why) { isStopped = true;}
@Override public boolean isStopped() {return isStopped;}
};
int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
connectionCleaner = new ConnectionCleaner(cleanInterval, maxIdleTime);
Threads.setDaemonThreadRunning(connectionCleaner.getThread());
this.realUser = realUser;
this.conf = conf;
}
示例13: SplitLogManager
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Wrapper around {@link #SplitLogManager(ZooKeeperWatcher zkw, Configuration conf,
* Stoppable stopper, MasterServices master, ServerName serverName,
* boolean masterRecovery, TaskFinisher tf)}
* that provides a task finisher for copying recovered edits to their final destination.
* The task finisher has to be robust because it can be arbitrarily restarted or called
* multiple times.
*
* @param zkw the ZK watcher
* @param conf the HBase configuration
* @param stopper the stoppable in case anything is wrong
* @param master the master services
* @param serverName the master server name
* @param masterRecovery an indication if the master is in recovery
*/
public SplitLogManager(ZooKeeperWatcher zkw, final Configuration conf,
Stoppable stopper, MasterServices master, ServerName serverName, boolean masterRecovery) {
this(zkw, conf, stopper, master, serverName, masterRecovery, new TaskFinisher() {
@Override
public Status finish(ServerName workerName, String logfile) {
try {
HLogSplitter.finishSplitLogFile(logfile, conf);
} catch (IOException e) {
LOG.warn("Could not finish splitting of log file " + logfile, e);
return Status.ERR;
}
return Status.DONE;
}
});
}
示例14: init
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* Instantiation method used by region servers
*
* @param conf configuration to use
* @param fs file system to use
* @param manager replication manager to ping to
* @param stopper the atomic boolean to use to stop the regionserver
* @param peerClusterZnode the name of our znode
* @param clusterId unique UUID for the cluster
* @param replicationEndpoint the replication endpoint implementation
* @param metrics metrics for replication source
* @throws IOException
*/
@Override
public void init(final Configuration conf, final FileSystem fs,
final ReplicationSourceManager manager, final ReplicationQueues replicationQueues,
final ReplicationPeers replicationPeers, final Stoppable stopper,
final String peerClusterZnode, final UUID clusterId, ReplicationEndpoint replicationEndpoint,
final MetricsSource metrics)
throws IOException {
this.stopper = stopper;
this.conf = HBaseConfiguration.create(conf);
decorateConf();
this.replicationQueueSizeCapacity =
this.conf.getLong("replication.source.size.capacity", 1024*1024*64);
this.replicationQueueNbCapacity =
this.conf.getInt("replication.source.nb.capacity", 25000);
this.sleepForRetries =
this.conf.getLong("replication.source.sleepforretries", 1000); // 1 second
this.maxRetriesMultiplier =
this.conf.getInt("replication.source.maxretriesmultiplier", 300); // 5 minutes @ 1 sec per
this.queueSizePerGroup = this.conf.getInt("hbase.regionserver.maxlogs", 32);
long bandwidth = this.conf.getLong("replication.source.per.peer.node.bandwidth", 0);
this.throttler = new ReplicationThrottler((double)bandwidth/10.0);
this.replicationQueues = replicationQueues;
this.replicationPeers = replicationPeers;
this.manager = manager;
this.fs = fs;
this.metrics = metrics;
this.clusterId = clusterId;
this.peerClusterZnode = peerClusterZnode;
this.replicationQueueInfo = new ReplicationQueueInfo(peerClusterZnode);
// ReplicationQueueInfo parses the peerId out of the znode for us
this.peerId = this.replicationQueueInfo.getPeerId();
this.logQueueWarnThreshold = this.conf.getInt("replication.source.log.queue.warn", 2);
this.replicationEndpoint = replicationEndpoint;
}
示例15: CleanerChore
import org.apache.hadoop.hbase.Stoppable; //导入依赖的package包/类
/**
* @param name name of the chore being run
* @param sleepPeriod the period of time to sleep between each run
* @param s the stopper
* @param conf configuration to use
* @param fs handle to the FS
* @param oldFileDir the path to the archived files
* @param confKey configuration key for the classes to instantiate
*/
public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
FileSystem fs, Path oldFileDir, String confKey) {
super(name, s, sleepPeriod);
this.fs = fs;
this.oldFileDir = oldFileDir;
this.conf = conf;
initCleanerChain(confKey);
}