本文整理汇总了Java中org.apache.hadoop.hbase.util.VersionInfo类的典型用法代码示例。如果您正苦于以下问题:Java VersionInfo类的具体用法?Java VersionInfo怎么用?Java VersionInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
VersionInfo类属于org.apache.hadoop.hbase.util包,在下文中一共展示了VersionInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getExcludedServersForSystemTable
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
/**
* Get a list of servers that this region cannot be assigned to.
* For system tables, we must assign them to a server with highest version.
*/
public List<ServerName> getExcludedServersForSystemTable() {
// TODO: This should be a cached list kept by the ServerManager rather than calculated on each
// move or system region assign. The RegionServerTracker keeps list of online Servers with
// RegionServerInfo that includes Version.
List<Pair<ServerName, String>> serverList = master.getServerManager().getOnlineServersList()
.stream()
.map((s)->new Pair<>(s, master.getRegionServerVersion(s)))
.collect(Collectors.toList());
if (serverList.isEmpty()) {
return Collections.EMPTY_LIST;
}
String highestVersion = Collections.max(serverList,
(o1, o2) -> VersionInfo.compareVersion(o1.getSecond(), o2.getSecond())).getSecond();
return serverList.stream()
.filter((p)->!p.getSecond().equals(highestVersion))
.map(Pair::getFirst)
.collect(Collectors.toList());
}
示例2: getVersionInfo
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
/**
* Get a protocol buffer VersionInfo
*
* @return the converted protocol buffer VersionInfo
*/
public static HBaseProtos.VersionInfo getVersionInfo() {
HBaseProtos.VersionInfo.Builder builder = HBaseProtos.VersionInfo.newBuilder();
String version = VersionInfo.getVersion();
builder.setVersion(version);
String[] components = version.split("\\.");
if (components != null && components.length > 2) {
builder.setVersionMajor(Integer.parseInt(components[0]));
builder.setVersionMinor(Integer.parseInt(components[1]));
}
builder.setUrl(VersionInfo.getUrl());
builder.setRevision(VersionInfo.getRevision());
builder.setUser(VersionInfo.getUser());
builder.setDate(VersionInfo.getDate());
builder.setSrcChecksum(VersionInfo.getSrcChecksum());
return builder.build();
}
示例3: getClusterStatus
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
/**
* @return cluster status
*/
public ClusterStatus getClusterStatus() {
// Build Set of backup masters from ZK nodes
List<String> backupMasterStrings;
try {
backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper,
this.zooKeeper.backupMasterAddressesZNode);
} catch (KeeperException e) {
LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
backupMasterStrings = new ArrayList<String>(0);
}
List<ServerName> backupMasters = new ArrayList<ServerName>(
backupMasterStrings.size());
for (String s: backupMasterStrings) {
backupMasters.add(new ServerName(s));
}
return new ClusterStatus(VersionInfo.getVersion(),
this.fileSystemManager.getClusterId(),
this.serverManager.getOnlineServers(),
this.serverManager.getDeadServers(),
this.serverName,
backupMasters,
this.assignmentManager.getRegionsInTransition(),
this.getCoprocessors());
}
示例4: main
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
/**
* @param args
* @throws Exception
*/
public static void main(String [] args) throws Exception {
VersionInfo.logVersion();
try {
new ThriftServer(HBaseConfiguration.create()).doMain(args);
} catch (ExitCodeException ex) {
System.exit(ex.getExitCode());
}
}
示例5: dumpVersionInfo
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
protected void dumpVersionInfo(PrintWriter out) {
VersionInfo.writeTo(out);
out.println("Hadoop " + org.apache.hadoop.util.VersionInfo.getVersion());
out.println("Source code repository " + org.apache.hadoop.util.VersionInfo.getUrl()
+ " revision=" + org.apache.hadoop.util.VersionInfo.getRevision());
out.println("Compiled by " + org.apache.hadoop.util.VersionInfo.getUser() +
" on " + org.apache.hadoop.util.VersionInfo.getDate());
}
示例6: chore
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
@Override
protected void chore() {
if (!connected) {
return;
}
List<ServerName> sns = generateDeadServersListToSend();
if (sns.isEmpty()) {
// Nothing to send. Done.
return;
}
final long curTime = EnvironmentEdgeManager.currentTime();
if (lastMessageTime > curTime - messagePeriod) {
// We already sent something less than 10 second ago. Done.
return;
}
// Ok, we're going to send something then.
lastMessageTime = curTime;
// We're reusing an existing protobuf message, but we don't send everything.
// This could be extended in the future, for example if we want to send stuff like the
// hbase:meta server name.
ClusterStatus cs = new ClusterStatus(VersionInfo.getVersion(),
master.getMasterFileSystem().getClusterId().toString(),
null,
sns,
master.getServerName(),
null,
null,
null,
null);
publisher.publish(cs);
}
示例7: main
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
/**
* @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine
*/
public static void main(String[] args) throws Exception {
VersionInfo.logVersion();
Configuration conf = HBaseConfiguration.create();
@SuppressWarnings("unchecked") Class<? extends HRegionServer> regionServerClass =
(Class<? extends HRegionServer>) conf
.getClass(HConstants.REGION_SERVER_IMPL, HRegionServer.class);
new HRegionServerCommandLine(regionServerClass).doMain(args);
}
示例8: testHTableInterfaceMethods
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
@Test
public void testHTableInterfaceMethods() throws Exception {
Configuration conf = util.getConfiguration();
MasterCoprocessorHost cpHost = util.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
Class<?> implClazz = DummyRegionObserver.class;
cpHost.load(implClazz, Coprocessor.PRIORITY_HIGHEST, conf);
CoprocessorEnvironment env = cpHost.findCoprocessorEnvironment(implClazz.getName());
assertEquals(Coprocessor.VERSION, env.getVersion());
assertEquals(VersionInfo.getVersion(), env.getHBaseVersion());
hTableInterface = env.getTable(TEST_TABLE);
checkHTableInterfaceMethods();
cpHost.shutdown(env);
}
示例9: getVersionInfo
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
/**
* Get a protocol buffer VersionInfo
*
* @return the converted protocol buffer VersionInfo
*/
public static HBaseProtos.VersionInfo getVersionInfo() {
HBaseProtos.VersionInfo.Builder builder = HBaseProtos.VersionInfo.newBuilder();
builder.setVersion(VersionInfo.getVersion());
builder.setUrl(VersionInfo.getUrl());
builder.setRevision(VersionInfo.getRevision());
builder.setUser(VersionInfo.getUser());
builder.setDate(VersionInfo.getDate());
builder.setSrcChecksum(VersionInfo.getSrcChecksum());
return builder.build();
}
示例10: checkDefaultsVersion
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
private static void checkDefaultsVersion(Configuration conf) {
if (conf.getBoolean("hbase.defaults.for.version.skip", Boolean.FALSE)) return;
String defaultsVersion = conf.get("hbase.defaults.for.version");
String thisVersion = VersionInfo.getVersion();
if (!thisVersion.equals(defaultsVersion)) {
throw new RuntimeException(
"hbase-default.xml file seems to be for an older version of HBase (" +
defaultsVersion + "), this version is " + thisVersion);
}
}
示例11: dumpVersionInfo
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
protected void dumpVersionInfo(PrintWriter out) {
VersionInfo.writeTo(out);
out.println("Hadoop " + org.apache.hadoop.util.VersionInfo.getVersion());
out.println("Subversion " + org.apache.hadoop.util.VersionInfo.getUrl() + " -r " +
org.apache.hadoop.util.VersionInfo.getRevision());
out.println("Compiled by " + org.apache.hadoop.util.VersionInfo.getUser() +
" on " + org.apache.hadoop.util.VersionInfo.getDate());
}
示例12: checkDefaultsVersion
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
private static void checkDefaultsVersion(Configuration conf) {
if (conf.getBoolean("hbase.defaults.for.version.skip", Boolean.FALSE)) return;
String defaultsVersion = conf.get("hbase.defaults.for.version");
String thisVersion = VersionInfo.getVersion();
// if (!thisVersion.equals(defaultsVersion)) {
// throw new RuntimeException(
// "hbase-default.xml file seems to be for and old version of HBase (" +
// defaultsVersion + "), this version is " + thisVersion);
// }
}
示例13: main
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
/**
* @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine
*/
public static void main(String[] args) throws Exception {
VersionInfo.logVersion();
Configuration conf = HBaseConfiguration.create();
@SuppressWarnings("unchecked")
Class<? extends HRegionServer> regionServerClass =
(Class<? extends HRegionServer>) conf.getClass(HConstants.REGION_SERVER_IMPL,
HRegionServer.class);
new HRegionServerCommandLine(regionServerClass).doMain(args);
}
示例14: testHTableInterfaceMethods
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
@Test
public void testHTableInterfaceMethods() throws Exception {
Configuration conf = util.getConfiguration();
MasterCoprocessorHost cpHost = util.getMiniHBaseCluster().getMaster().getCoprocessorHost();
Class<?> implClazz = DummyRegionObserver.class;
cpHost.load(implClazz, Coprocessor.PRIORITY_HIGHEST, conf);
CoprocessorEnvironment env = cpHost.findCoprocessorEnvironment(implClazz.getName());
assertEquals(Coprocessor.VERSION, env.getVersion());
assertEquals(VersionInfo.getVersion(), env.getHBaseVersion());
hTableInterface = env.getTable(TEST_TABLE);
checkHTableInterfaceMethods();
cpHost.shutdown(env);
}
示例15: main
import org.apache.hadoop.hbase.util.VersionInfo; //导入依赖的package包/类
/**
* @see org.apache.hadoop.hbase.master.HMasterCommandLine
*/
public static void main(String[] args) {
VersionInfo.logVersion();
//System.out.println("########################################HMaster started!");
new HMasterCommandLine(HMaster.class).doMain(args);
//System.out.println("HMaster stoped!");
}