本文整理汇总了Java中org.apache.hadoop.hbase.ServerName.valueOf方法的典型用法代码示例。如果您正苦于以下问题:Java ServerName.valueOf方法的具体用法?Java ServerName.valueOf怎么用?Java ServerName.valueOf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.ServerName
的用法示例。
在下文中一共展示了ServerName.valueOf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupMocksForNormalizer
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
protected void setupMocksForNormalizer(Map<byte[], Integer> regionSizes,
List<HRegionInfo> hris) {
masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS);
// for simplicity all regions are assumed to be on one server; doesn't matter to us
ServerName sn = ServerName.valueOf("localhost", -1, 1L);
when(masterServices.getAssignmentManager().getRegionStates().
getRegionsOfTable(any(TableName.class))).thenReturn(hris);
when(masterServices.getAssignmentManager().getRegionStates().
getRegionServerOfRegion(any(HRegionInfo.class))).thenReturn(sn);
for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
RegionLoad regionLoad = Mockito.mock(RegionLoad.class);
when(regionLoad.getName()).thenReturn(region.getKey());
when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue());
when(masterServices.getServerManager().getLoad(sn).
getRegionsLoad().get(region.getKey())).thenReturn(regionLoad);
}
normalizer.setMasterServices(masterServices);
}
示例2: nodeDeleted
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Override
public void nodeDeleted(final String path) {
if(path.startsWith(watcher.drainingZNode)) {
final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(path));
LOG.info("Draining RS node deleted, removing from list [" +
sn + "]");
remove(sn);
}
}
示例3: makeServerNames
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* @param count
* @return Return <code>count</code> servernames.
*/
private static ServerName [] makeServerNames(final int count) {
ServerName [] sns = new ServerName[count];
for (int i = 0; i < count; i++) {
sns[i] = ServerName.valueOf("" + i + ".example.org", 16010, i);
}
return sns;
}
示例4: testNeedsBalanceForColocatedReplicas
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Test
public void testNeedsBalanceForColocatedReplicas() {
// check for the case where there are two hosts and with one rack, and where
// both the replicas are hosted on the same server
List<HRegionInfo> regions = randomRegions(1);
ServerName s1 = ServerName.valueOf("host1", 1000, 11111);
ServerName s2 = ServerName.valueOf("host11", 1000, 11111);
Map<ServerName, List<HRegionInfo>> map = new HashMap<ServerName, List<HRegionInfo>>();
map.put(s1, regions);
regions.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1));
// until the step above s1 holds two replicas of a region
regions = randomRegions(1);
map.put(s2, regions);
assertTrue(loadBalancer.needsBalance(new Cluster(map, null, null, null)));
// check for the case where there are two hosts on the same rack and there are two racks
// and both the replicas are on the same rack
map.clear();
regions = randomRegions(1);
List<HRegionInfo> regionsOnS2 = new ArrayList<HRegionInfo>(1);
regionsOnS2.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(0), 1));
map.put(s1, regions);
map.put(s2, regionsOnS2);
// add another server so that the cluster has some host on another rack
map.put(ServerName.valueOf("host2", 1000, 11111), randomRegions(1));
assertTrue(loadBalancer.needsBalance(new Cluster(map, null, null,
new ForTestRackManagerOne())));
}
示例5: toServerName
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* Convert a protocol buffer ServerName to a ServerName
*
* @param proto the protocol buffer ServerName to convert
* @return the converted ServerName
*/
public static ServerName toServerName(final HBaseProtos.ServerName proto) {
if (proto == null) return null;
String hostName = proto.getHostName();
long startCode = -1;
int port = -1;
if (proto.hasPort()) {
port = proto.getPort();
}
if (proto.hasStartCode()) {
startCode = proto.getStartCode();
}
return ServerName.valueOf(hostName, port, startCode);
}
示例6: testDeadWorker
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testDeadWorker() throws Exception {
LOG.info("testDeadWorker");
conf.setLong("hbase.splitlog.max.resubmit", 0);
slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER);
TaskBatch batch = new TaskBatch();
String tasknode = submitTaskAndWait(batch, "foo/1");
int version = ZKUtil.checkExists(zkw, tasknode);
final ServerName worker1 = ServerName.valueOf("worker1,1,1");
SplitLogTask slt = new SplitLogTask.Owned(worker1, this.mode);
ZKUtil.setData(zkw, tasknode, slt.toByteArray());
if (tot_mgr_heartbeat.get() == 0) waitForCounter(tot_mgr_heartbeat, 0, 1, to/2);
slm.handleDeadWorker(worker1);
if (tot_mgr_resubmit.get() == 0) waitForCounter(tot_mgr_resubmit, 0, 1, to+to/2);
if (tot_mgr_resubmit_dead_server_task.get() == 0) {
waitForCounter(tot_mgr_resubmit_dead_server_task, 0, 1, to + to/2);
}
int version1 = ZKUtil.checkExists(zkw, tasknode);
assertTrue(version1 > version);
byte[] taskstate = ZKUtil.getData(zkw, tasknode);
slt = SplitLogTask.parseFrom(taskstate);
assertTrue(slt.isUnassigned(DUMMY_MASTER));
return;
}
示例7: testRestartMaster
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Test public void testRestartMaster() throws IOException, KeeperException {
ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
"testActiveMasterManagerFromZK", null, true);
try {
ZKUtil.deleteNode(zk, zk.getMasterAddressZNode());
ZKUtil.deleteNode(zk, zk.clusterStateZNode);
} catch(KeeperException.NoNodeException nne) {}
// Create the master node with a dummy address
ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis());
// Should not have a master yet
DummyMaster dummyMaster = new DummyMaster(zk,master);
ClusterStatusTracker clusterStatusTracker =
dummyMaster.getClusterStatusTracker();
ActiveMasterManager activeMasterManager =
dummyMaster.getActiveMasterManager();
assertFalse(activeMasterManager.clusterHasActiveMaster.get());
// First test becoming the active master uninterrupted
MonitoredTask status = Mockito.mock(MonitoredTask.class);
clusterStatusTracker.setClusterUp();
activeMasterManager.blockUntilBecomingActiveMaster(100, status);
assertTrue(activeMasterManager.clusterHasActiveMaster.get());
assertMaster(zk, master);
// Now pretend master restart
DummyMaster secondDummyMaster = new DummyMaster(zk,master);
ActiveMasterManager secondActiveMasterManager =
secondDummyMaster.getActiveMasterManager();
assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get());
activeMasterManager.blockUntilBecomingActiveMaster(100, status);
assertTrue(activeMasterManager.clusterHasActiveMaster.get());
assertMaster(zk, master);
}
示例8: testShortCircuitConnection
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testShortCircuitConnection() throws IOException, InterruptedException {
String tnAsString = "testShortCircuitConnection";
TableName tn = TableName.valueOf(tnAsString);
HTableDescriptor htd = UTIL.createTableDescriptor(tnAsString);
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("cf"));
htd.addFamily(hcd);
UTIL.createTable(htd, null);
HRegionServer regionServer = UTIL.getRSForFirstRegionInTable(tn);
ClusterConnection connection = regionServer.getConnection();
HTableInterface tableIf = connection.getTable(tn);
assertTrue(tableIf instanceof HTable);
HTable table = (HTable) tableIf;
assertTrue(table.getConnection() == connection);
AdminService.BlockingInterface admin = connection.getAdmin(regionServer.getServerName());
ClientService.BlockingInterface client = connection.getClient(regionServer.getServerName());
assertTrue(admin instanceof RSRpcServices);
assertTrue(client instanceof RSRpcServices);
ServerName anotherSn = ServerName.valueOf(regionServer.getServerName().getHostAndPort(),
EnvironmentEdgeManager.currentTime());
admin = connection.getAdmin(anotherSn);
client = connection.getClient(anotherSn);
assertFalse(admin instanceof RSRpcServices);
assertFalse(client instanceof RSRpcServices);
assertTrue(connection.getAdmin().getConnection() == connection);
}
示例9: TablestoreRegionLocator
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
public TablestoreRegionLocator(TablestoreConnection connection, TableName tableName) {
this.tableName = tableName;
this.tablestoreAdaptor = OTSAdapter.getInstance(connection.getTablestoreConf());
this.serverName = ServerName.valueOf(connection.getTablestoreConf().getOTSEndpoint(), 0, 0);
this.regionsFetchTimeMillis = 0;
}
示例10: getServerName
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Override
public ServerName getServerName() {
return ServerName.valueOf(hostname, 1234, 1L);
}
示例11: testMultipleTasks
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testMultipleTasks() throws Exception {
LOG.info("testMultipleTasks");
SplitLogCounters.resetCounters();
final ServerName SRV = ServerName.valueOf("tmt_svr,1,1");
final String PATH1 = ZKSplitLog.getEncodedNodeName(zkw, "tmt_task");
RegionServerServices mockedRS = getRegionServer(SRV);
SplitLogWorker slw =
new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
slw.start();
try {
Thread.yield(); // let the worker start
Thread.sleep(100);
waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME);
SplitLogTask unassignedManager =
new SplitLogTask.Unassigned(MANAGER, this.mode);
zkw.getRecoverableZooKeeper().create(PATH1, unassignedManager.toByteArray(),
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
// now the worker is busy doing the above task
// create another task
final String PATH2 = ZKSplitLog.getEncodedNodeName(zkw, "tmt_task_2");
zkw.getRecoverableZooKeeper().create(PATH2, unassignedManager.toByteArray(),
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
// preempt the first task, have it owned by another worker
final ServerName anotherWorker = ServerName.valueOf("another-worker,1,1");
SplitLogTask slt = new SplitLogTask.Owned(anotherWorker, this.mode);
ZKUtil.setData(zkw, PATH1, slt.toByteArray());
waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME);
waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 1, 2, WAIT_TIME);
assertEquals(2, slw.getTaskReadySeq());
byte [] bytes = ZKUtil.getData(zkw, PATH2);
slt = SplitLogTask.parseFrom(bytes);
assertTrue(slt.isOwned(SRV));
} finally {
stopSplitLogWorker(slw);
}
}
示例12: move
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
void move(final byte[] encodedRegionName,
final byte[] destServerName) throws HBaseIOException {
RegionState regionState = assignmentManager.getRegionStates().
getRegionState(Bytes.toString(encodedRegionName));
if (regionState == null) {
throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
}
HRegionInfo hri = regionState.getRegion();
ServerName dest;
if (destServerName == null || destServerName.length == 0) {
LOG.info("Passed destination servername is null/empty so " +
"choosing a server at random");
final List<ServerName> destServers = this.serverManager.createDestinationServersList(
regionState.getServerName());
dest = balancer.randomAssignment(hri, destServers);
if (dest == null) {
LOG.debug("Unable to determine a plan to assign " + hri);
return;
}
} else {
dest = ServerName.valueOf(Bytes.toString(destServerName));
if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
&& !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
// To avoid unnecessary region moving later by balancer. Don't put user
// regions on master. Regions on master could be put on other region
// server intentionally by test however.
LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
+ " to avoid unnecessary region moving later by load balancer,"
+ " because it should not be on master");
return;
}
}
if (dest.equals(regionState.getServerName())) {
LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
+ " because region already assigned to the same server " + dest + ".");
return;
}
// Now we can do the move
RegionPlan rp = new RegionPlan(hri, regionState.getServerName(), dest);
try {
checkInitialized();
if (this.cpHost != null) {
if (this.cpHost.preMove(hri, rp.getSource(), rp.getDestination())) {
return;
}
}
// warmup the region on the destination before initiating the move. this call
// is synchronous and takes some time. doing it before the source region gets
// closed
serverManager.sendRegionWarmup(rp.getDestination(), hri);
LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
this.assignmentManager.balance(rp);
if (this.cpHost != null) {
this.cpHost.postMove(hri, rp.getSource(), rp.getDestination());
}
} catch (IOException ioe) {
if (ioe instanceof HBaseIOException) {
throw (HBaseIOException)ioe;
}
throw new HBaseIOException(ioe);
}
}
示例13: getServerName
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
public ServerName getServerName(){
return ServerName.valueOf(hostname, port, startCode);
}
示例14: testCacheSeqNums
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* Test that stale cache updates don't override newer cached values.
*/
@Test
public void testCacheSeqNums() throws Exception{
HTable table = TEST_UTIL.createMultiRegionTable(TABLE_NAME2, FAM_NAM);
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
table.put(put);
ConnectionManager.HConnectionImplementation conn =
(ConnectionManager.HConnectionImplementation)table.getConnection();
HRegionLocation location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
assertNotNull(location);
ServerName anySource = ServerName.valueOf(location.getHostname(), location.getPort() - 1, 0L);
// Same server as already in cache reporting - overwrites any value despite seqNum.
int nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), location.getServerName(),
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
Assert.assertEquals(nextPort, location.getPort());
// No source specified - same.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), location.getServerName(),
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
Assert.assertEquals(nextPort, location.getPort());
// Higher seqNum - overwrites lower seqNum.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), anySource,
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() + 1);
location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
Assert.assertEquals(nextPort, location.getPort());
// Lower seqNum - does not overwrite higher seqNum.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), anySource,
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
Assert.assertEquals(nextPort - 1, location.getPort());
table.close();
}
示例15: rollHLogWriter
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* Roll the log writer. I.e. when using a file system based write ahead log,
* start writing log messages to a new file.
*
* Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
* This method will return as soon as the roll is requested and the return value will
* always be null. Additionally, the named region server may schedule store flushes at the
* request of the wal handling the roll request.
*
* When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
* return value may be either null or a list of encoded region names.
*
* @param serverName
* The servername of the regionserver. A server name is made of host,
* port and startcode. This is mandatory. Here is an example:
* <code> host187.example.com,60020,1289493121758</code>
* @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
* clean up some underlying files. null if there's nothing to flush.
* @throws IOException if a remote or network exception occurs
* @throws FailedLogCloseException
* @deprecated use {@link #rollWALWriter(ServerName)}
*/
@Deprecated
public synchronized byte[][] rollHLogWriter(String serverName)
throws IOException, FailedLogCloseException {
ServerName sn = ServerName.valueOf(serverName);
final RollWALWriterResponse response = rollWALWriterImpl(sn);
int regionCount = response.getRegionToFlushCount();
if (0 == regionCount) {
return null;
}
byte[][] regionsToFlush = new byte[regionCount][];
for (int i = 0; i < regionCount; i++) {
ByteString region = response.getRegionToFlush(i);
regionsToFlush[i] = region.toByteArray();
}
return regionsToFlush;
}