本文整理汇总了Java中org.apache.hadoop.hbase.wal.DefaultWALProvider.getServerNameFromWALDirectoryName方法的典型用法代码示例。如果您正苦于以下问题:Java DefaultWALProvider.getServerNameFromWALDirectoryName方法的具体用法?Java DefaultWALProvider.getServerNameFromWALDirectoryName怎么用?Java DefaultWALProvider.getServerNameFromWALDirectoryName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.wal.DefaultWALProvider
的用法示例。
在下文中一共展示了DefaultWALProvider.getServerNameFromWALDirectoryName方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: splitLogDistributed
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入方法依赖的package包/类
/**
* The caller will block until all the log files of the given region server have been processed -
* successfully split or an error is encountered - by an available worker region server. This
* method must only be called after the region servers have been brought online.
* @param logDirs List of log dirs to split
* @throws IOException If there was an error while splitting any log file
* @return cumulative size of the logfiles split
*/
public long splitLogDistributed(final List<Path> logDirs) throws IOException {
if (logDirs.isEmpty()) {
return 0;
}
Set<ServerName> serverNames = new HashSet<ServerName>();
for (Path logDir : logDirs) {
try {
ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(logDir);
if (serverName != null) {
serverNames.add(serverName);
}
} catch (IllegalArgumentException e) {
// ignore invalid format error.
LOG.warn("Cannot parse server name from " + logDir);
}
}
return splitLogDistributed(serverNames, logDirs, null);
}
示例2: reorderBlocks
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入方法依赖的package包/类
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src)
throws IOException {
ServerName sn = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, src);
if (sn == null) {
// It's not an WAL
return;
}
// Ok, so it's an WAL
String hostName = sn.getHostname();
if (LOG.isTraceEnabled()) {
LOG.trace(src +
" is an WAL file, so reordering blocks, last hostname will be:" + hostName);
}
// Just check for all blocks
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
DatanodeInfo[] dnis = lb.getLocations();
if (dnis != null && dnis.length > 1) {
boolean found = false;
for (int i = 0; i < dnis.length - 1 && !found; i++) {
if (hostName.equals(dnis[i].getHostName())) {
// advance the other locations by one and put this one at the last place.
DatanodeInfo toLast = dnis[i];
System.arraycopy(dnis, i + 1, dnis, i, dnis.length - i - 1);
dnis[dnis.length - 1] = toLast;
found = true;
}
}
}
}
}
示例3: getFailedServersFromLogFolders
import org.apache.hadoop.hbase.wal.DefaultWALProvider; //导入方法依赖的package包/类
/**
* Inspect the log directory to find dead servers which need recovery work
* @return A set of ServerNames which aren't running but still have WAL files left in file system
*/
Set<ServerName> getFailedServersFromLogFolders() {
boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
Set<ServerName> serverNames = new HashSet<ServerName>();
Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
do {
if (master.isStopped()) {
LOG.warn("Master stopped while trying to get failed servers.");
break;
}
try {
if (!this.fs.exists(logsDirPath)) return serverNames;
FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null);
// Get online servers after getting log folders to avoid log folder deletion of newly
// checked in region servers . see HBASE-5916
Set<ServerName> onlineServers = ((HMaster) master).getServerManager().getOnlineServers()
.keySet();
if (logFolders == null || logFolders.length == 0) {
LOG.debug("No log files to split, proceeding...");
return serverNames;
}
for (FileStatus status : logFolders) {
FileStatus[] curLogFiles = FSUtils.listStatus(this.fs, status.getPath(), null);
if (curLogFiles == null || curLogFiles.length == 0) {
// Empty log folder. No recovery needed
continue;
}
final ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(
status.getPath());
if (null == serverName) {
LOG.warn("Log folder " + status.getPath() + " doesn't look like its name includes a " +
"region server name; leaving in place. If you see later errors about missing " +
"write ahead logs they may be saved in this location.");
} else if (!onlineServers.contains(serverName)) {
LOG.info("Log folder " + status.getPath() + " doesn't belong "
+ "to a known region server, splitting");
serverNames.add(serverName);
} else {
LOG.info("Log folder " + status.getPath() + " belongs to an existing region server");
}
}
retrySplitting = false;
} catch (IOException ioe) {
LOG.warn("Failed getting failed servers to be recovered.", ioe);
if (!checkFileSystem()) {
LOG.warn("Bad Filesystem, exiting");
Runtime.getRuntime().halt(1);
}
try {
if (retrySplitting) {
Thread.sleep(conf.getInt("hbase.hlog.split.failure.retry.interval", 30 * 1000));
}
} catch (InterruptedException e) {
LOG.warn("Interrupted, aborting since cannot return w/o splitting");
Thread.currentThread().interrupt();
retrySplitting = false;
Runtime.getRuntime().halt(1);
}
}
} while (retrySplitting);
return serverNames;
}