本文整理汇总了Java中org.apache.hadoop.hbase.wal.DefaultWALProvider类的典型用法代码示例。如果您正苦于以下问题:Java DefaultWALProvider类的具体用法?Java DefaultWALProvider怎么用?Java DefaultWALProvider使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DefaultWALProvider类属于org.apache.hadoop.hbase.wal包,在下文中一共展示了DefaultWALProvider类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: preLogRoll
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
void preLogRoll(Path newLog) throws IOException {
recordLog(newLog);
String logName = newLog.getName();
String logPrefix = DefaultWALProvider.getWALPrefixFromWALName(logName);
synchronized (latestPaths) {
Iterator<Path> iterator = latestPaths.iterator();
while (iterator.hasNext()) {
Path path = iterator.next();
if (path.getName().contains(logPrefix)) {
iterator.remove();
break;
}
}
this.latestPaths.add(newLog);
}
}
示例2: getLogDirs
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", justification=
"We only release this lock when we set it. Updates to code that uses it should verify use " +
"of the guard boolean.")
private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
List<Path> logDirs = new ArrayList<Path>();
boolean needReleaseLock = false;
if (!this.services.isInitialized()) {
// during master initialization, we could have multiple places splitting a same wal
this.splitLogLock.lock();
needReleaseLock = true;
}
try {
for (ServerName serverName : serverNames) {
Path logDir = new Path(this.rootdir,
DefaultWALProvider.getWALDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(DefaultWALProvider.SPLITTING_EXT);
// Rename the directory so a rogue RS doesn't create more WALs
if (fs.exists(logDir)) {
if (!this.fs.rename(logDir, splitDir)) {
throw new IOException("Failed fs.rename for log split: " + logDir);
}
logDir = splitDir;
LOG.debug("Renamed region directory: " + splitDir);
} else if (!fs.exists(splitDir)) {
LOG.info("Log dir for server " + serverName + " does not exist");
continue;
}
logDirs.add(splitDir);
}
} finally {
if (needReleaseLock) {
this.splitLogLock.unlock();
}
}
return logDirs;
}
示例3: splitLogDistributed
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
/**
* The caller will block until all the log files of the given region server have been processed -
* successfully split or an error is encountered - by an available worker region server. This
* method must only be called after the region servers have been brought online.
* @param logDirs List of log dirs to split
* @throws IOException If there was an error while splitting any log file
* @return cumulative size of the logfiles split
*/
public long splitLogDistributed(final List<Path> logDirs) throws IOException {
if (logDirs.isEmpty()) {
return 0;
}
Set<ServerName> serverNames = new HashSet<ServerName>();
for (Path logDir : logDirs) {
try {
ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(logDir);
if (serverName != null) {
serverNames.add(serverName);
}
} catch (IllegalArgumentException e) {
// ignore invalid format error.
LOG.warn("Cannot parse server name from " + logDir);
}
}
return splitLogDistributed(serverNames, logDirs, null);
}
示例4: testWALRollWriting
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
@Test (timeout=300000)
public void testWALRollWriting() throws Exception {
setUpforLogRolling();
String className = this.getClass().getName();
StringBuilder v = new StringBuilder(className);
while (v.length() < 1000) {
v.append(className);
}
byte[] value = Bytes.toBytes(v.toString());
HRegionServer regionServer = startAndWriteData(TableName.valueOf("TestLogRolling"), value);
LOG.info("after writing there are "
+ DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files");
// flush all regions
for (Region r : regionServer.getOnlineRegionsLocalContext()) {
r.flush(true);
}
admin.rollWALWriter(regionServer.getServerName());
int count = DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null));
LOG.info("after flushing all regions and rolling logs there are " +
count + " log files");
assertTrue(("actual count: " + count), count <= 2);
}
示例5: setUp
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
// this.cluster = TEST_UTIL.getDFSCluster();
this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = FSUtils.getRootDir(conf);
this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
this.oldLogDir = new Path(this.hbaseRootDir,
HConstants.HREGION_OLDLOGDIR_NAME);
this.logDir = new Path(this.hbaseRootDir,
DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
this.logName = HConstants.HREGION_LOGDIR_NAME;
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
}
this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
示例6: testLogRolling
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
/**
* Tests that logs are deleted
* @throws IOException
* @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
*/
@Test
public void testLogRolling() throws Exception {
this.tableName = getName();
// TODO: Why does this write data take for ever?
startAndWriteData();
final WAL log = server.getWAL(null);
LOG.info("after writing there are " + DefaultWALProvider.getNumRolledLogFiles(log) +
" log files");
// flush all regions
for (Region r: server.getOnlineRegionsLocalContext()) {
r.flush(true);
}
// Now roll the log
log.rollWriter();
int count = DefaultWALProvider.getNumRolledLogFiles(log);
LOG.info("after flushing all regions and rolling logs there are " + count + " log files");
assertTrue(("actual count: " + count), count <= 2);
}
示例7: testWALRollWriting
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
@Test (timeout=300000)
public void testWALRollWriting() throws Exception {
setUpforLogRolling();
String className = this.getClass().getName();
StringBuilder v = new StringBuilder(className);
while (v.length() < 1000) {
v.append(className);
}
byte[] value = Bytes.toBytes(v.toString());
HRegionServer regionServer = startAndWriteData(TableName.valueOf("TestLogRolling"), value);
LOG.info("after writing there are "
+ DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files");
// flush all regions
List<HRegion> regions = new ArrayList<HRegion>(regionServer
.getOnlineRegionsLocalContext());
for (HRegion r : regions) {
r.flushcache();
}
admin.rollWALWriter(regionServer.getServerName());
int count = DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null));
LOG.info("after flushing all regions and rolling logs there are " +
count + " log files");
assertTrue(("actual count: " + count), count <= 2);
}
示例8: testLogRolling
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
/**
* Tests that logs are deleted
* @throws IOException
* @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
*/
@Test
public void testLogRolling() throws Exception {
this.tableName = getName();
// TODO: Why does this write data take for ever?
startAndWriteData();
final WAL log = server.getWAL(null);
LOG.info("after writing there are " + DefaultWALProvider.getNumRolledLogFiles(log) +
" log files");
// flush all regions
List<HRegion> regions =
new ArrayList<HRegion>(server.getOnlineRegionsLocalContext());
for (HRegion r: regions) {
r.flushcache();
}
// Now roll the log
log.rollWriter();
int count = DefaultWALProvider.getNumRolledLogFiles(log);
LOG.info("after flushing all regions and rolling logs there are " + count + " log files");
assertTrue(("actual count: " + count), count <= 2);
}
示例9: getLogFileSize
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
/**
*
* @param serverName the 'unique-over-restarts' name, i.e. hostname with start code suffix
* @param hlogName name of HLog
*/
private long getLogFileSize(String serverName, String hlogName) throws IOException {
Path hbaseLogDir = new Path(hbaseRootDir, DefaultWALProvider.getWALDirectoryName(serverName));
Path path = new Path(hbaseLogDir, hlogName);
try {
FileStatus status = fileSystem.getFileStatus(path);
return status.getLen();
} catch (FileNotFoundException e) {
Path oldLogPath = new Path(hbaseOldLogDir, hlogName);
try {
return fileSystem.getFileStatus(oldLogPath).getLen();
} catch (FileNotFoundException e2) {
// TODO there is still another place to look for log files, cfr dead region servers, see openReader in replicationsource
System.err.println("HLog not found at : " + path + " or " + oldLogPath);
return -1;
}
}
}
示例10: addSource
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
/**
* Add sources for the given peer cluster on this region server. For the newly added peer, we only
* need to enqueue the latest log of each wal group and do replication
* @param id the id of the peer cluster
* @return the source that was created
* @throws IOException
*/
protected ReplicationSourceInterface addSource(String id) throws IOException,
ReplicationException {
ReplicationPeerConfig peerConfig = replicationPeers.getReplicationPeerConfig(id);
ReplicationPeer peer = replicationPeers.getPeer(id);
ReplicationSourceInterface src =
getReplicationSource(this.conf, this.fs, this, this.replicationQueues,
this.replicationPeers, server, id, this.clusterId, peerConfig, peer);
synchronized (this.walsById) {
this.sources.add(src);
Map<String, SortedSet<String>> walsByGroup = new HashMap<String, SortedSet<String>>();
this.walsById.put(id, walsByGroup);
// Add the latest wal to that source's queue
synchronized (latestPaths) {
if (this.latestPaths.size() > 0) {
for (Path logPath : latestPaths) {
String name = logPath.getName();
String walPrefix = DefaultWALProvider.getWALPrefixFromWALName(name);
SortedSet<String> logs = new TreeSet<String>();
logs.add(name);
walsByGroup.put(walPrefix, logs);
try {
this.replicationQueues.addLog(id, name);
} catch (ReplicationException e) {
String message =
"Cannot add log to queue when creating a new source, queueId=" + id
+ ", filename=" + name;
server.stop(message);
throw e;
}
src.enqueueLog(logPath);
}
}
}
}
src.startup();
return src;
}
示例11: enqueueLog
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
@Override
public void enqueueLog(Path log) {
String logPrefix = DefaultWALProvider.getWALPrefixFromWALName(log.getName());
PriorityBlockingQueue<Path> queue = queues.get(logPrefix);
if (queue == null) {
queue = new PriorityBlockingQueue<Path>(queueSizePerGroup, new LogsComparator());
queues.put(logPrefix, queue);
if (this.sourceRunning) {
// new wal group observed after source startup, start a new worker thread to track it
// notice: it's possible that log enqueued when this.running is set but worker thread
// still not launched, so it's necessary to check workerThreads before start the worker
final ReplicationSourceWorkerThread worker =
new ReplicationSourceWorkerThread(logPrefix, queue, replicationQueueInfo, this);
ReplicationSourceWorkerThread extant = workerThreads.putIfAbsent(logPrefix, worker);
if (extant != null) {
LOG.debug("Someone has beat us to start a worker thread for wal group " + logPrefix);
} else {
LOG.debug("Starting up worker for wal group " + logPrefix);
worker.startup();
}
}
}
queue.put(log);
int queueSize = logQueueSize.incrementAndGet();
this.metrics.setSizeOfLogQueue(queueSize);
// This will log a warning for each new log that gets created above the warn threshold
if (queue.size() > this.logQueueWarnThreshold) {
LOG.warn("WAL group " + logPrefix + " queue size: " + queueSize
+ " exceeds value of replication.source.log.queue.warn: " + logQueueWarnThreshold);
}
}
示例12: reorderBlocks
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src)
throws IOException {
ServerName sn = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, src);
if (sn == null) {
// It's not an WAL
return;
}
// Ok, so it's an WAL
String hostName = sn.getHostname();
if (LOG.isTraceEnabled()) {
LOG.trace(src +
" is an WAL file, so reordering blocks, last hostname will be:" + hostName);
}
// Just check for all blocks
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
DatanodeInfo[] dnis = lb.getLocations();
if (dnis != null && dnis.length > 1) {
boolean found = false;
for (int i = 0; i < dnis.length - 1 && !found; i++) {
if (hostName.equals(dnis[i].getHostName())) {
// advance the other locations by one and put this one at the last place.
DatanodeInfo toLast = dnis[i];
System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1);
dnis[dnis.length - 1] = toLast;
found = true;
}
}
}
}
}
示例13: setupWALAndReplication
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
/**
* Setup WAL log and replication if enabled.
* Replication setup is done in here because it wants to be hooked up to WAL.
*
* @return A WAL instance.
* @throws IOException
*/
private WALFactory setupWALAndReplication() throws IOException {
// TODO Replication make assumptions here based on the default filesystem impl
final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
final String logName = DefaultWALProvider.getWALDirectoryName(this.serverName.toString());
Path logdir = new Path(rootDir, logName);
if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir);
if (this.fs.exists(logdir)) {
throw new RegionServerRunningException(
"Region server has already " + "created directory at " + this.serverName.toString());
}
// Instantiate replication manager if replication enabled. Pass it the
// log directories.
createNewReplicationInstance(conf, this, this.fs, logdir, oldLogDir);
// listeners the wal factory will add to wals it creates.
final List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
listeners.add(new MetricsWAL());
if (this.replicationSourceHandler != null
&& this.replicationSourceHandler.getWALActionsListener() != null) {
// Replication handler is an implementation of WALActionsListener.
listeners.add(this.replicationSourceHandler.getWALActionsListener());
}
return new WALFactory(conf, listeners, serverName.toString());
}
示例14: setUp
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = FSUtils.getRootDir(this.conf);
this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
this.logName = DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName() + "-manual");
this.logDir = new Path(this.hbaseRootDir, logName);
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
}
this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ?
RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
示例15: verifyWALCount
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入依赖的package包/类
private void verifyWALCount(WALFactory wals, WAL log, int expected) throws Exception {
Path walPath = DefaultWALProvider.getCurrentFileName(log);
WAL.Reader reader = wals.createReader(FS, walPath);
int count = 0;
WAL.Entry entry = new WAL.Entry();
while (reader.next(entry) != null) count++;
reader.close();
assertEquals(expected, count);
}