本文整理汇总了Java中org.apache.hadoop.hbase.HBaseIOException类的典型用法代码示例。如果您正苦于以下问题:Java HBaseIOException类的具体用法?Java HBaseIOException怎么用?Java HBaseIOException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HBaseIOException类属于org.apache.hadoop.hbase包,在下文中一共展示了HBaseIOException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: processResponse
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
/**
* Process the response for this call. You need to have the lock on
* {@link org.apache.hadoop.hbase.ipc.RpcServer.Connection#responseWriteLock}
*
* @param call the call
* @return true if we proceed the call fully, false otherwise.
* @throws IOException
*/
private boolean processResponse(final Call call) throws IOException {
boolean error = true;
try {
// Send as much data as we can in the non-blocking fashion
long numBytes = channelWrite(call.connection.channel, call.response);
if (numBytes < 0) {
throw new HBaseIOException("Error writing on the socket " +
"for the call:" + call.toShortString());
}
error = false;
} finally {
if (error) {
LOG.debug(getName() + call.toShortString() + ": output error -- closing");
closeConnection(call.connection);
}
}
if (!call.response.hasRemaining()) {
call.done();
return true;
} else {
return false; // Socket can't take more, we will have to come back.
}
}
示例2: testTruncatePerms
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
@Test (timeout=180000)
public void testTruncatePerms() throws Exception {
try {
List<UserPermission> existingPerms = AccessControlClient.getUserPermissions(
systemUserConnection, TEST_TABLE.getNameAsString());
assertTrue(existingPerms != null);
assertTrue(existingPerms.size() > 1);
TEST_UTIL.getHBaseAdmin().disableTable(TEST_TABLE);
TEST_UTIL.truncateTable(TEST_TABLE);
TEST_UTIL.waitTableAvailable(TEST_TABLE);
List<UserPermission> perms = AccessControlClient.getUserPermissions(
systemUserConnection, TEST_TABLE.getNameAsString());
assertTrue(perms != null);
assertEquals(existingPerms.size(), perms.size());
} catch (Throwable e) {
throw new HBaseIOException(e);
}
}
示例3: testNoNormalizationIfTooFewRegions
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
@Test
public void testNoNormalizationIfTooFewRegions() throws HBaseIOException {
TableName testTable = TableName.valueOf("testSplitOfSmallRegion");
List<HRegionInfo> hris = new ArrayList<>();
Map<byte[], Integer> regionSizes = new HashMap<>();
HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
hris.add(hri1);
regionSizes.put(hri1.getRegionName(), 10);
HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
hris.add(hri2);
regionSizes.put(hri2.getRegionName(), 15);
setupMocksForNormalizer(regionSizes, hris);
List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable);
assertTrue(plans == null);
}
示例4: testNoNormalizationOnNormalizedCluster
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
@Test
public void testNoNormalizationOnNormalizedCluster() throws HBaseIOException {
TableName testTable = TableName.valueOf("testSplitOfSmallRegion");
List<HRegionInfo> hris = new ArrayList<>();
Map<byte[], Integer> regionSizes = new HashMap<>();
HRegionInfo hri1 = new HRegionInfo(testTable, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
hris.add(hri1);
regionSizes.put(hri1.getRegionName(), 10);
HRegionInfo hri2 = new HRegionInfo(testTable, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
hris.add(hri2);
regionSizes.put(hri2.getRegionName(), 15);
HRegionInfo hri3 = new HRegionInfo(testTable, Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
hris.add(hri3);
regionSizes.put(hri3.getRegionName(), 8);
HRegionInfo hri4 = new HRegionInfo(testTable, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
hris.add(hri4);
regionSizes.put(hri4.getRegionName(), 10);
setupMocksForNormalizer(regionSizes, hris);
List<NormalizationPlan> plans = normalizer.computePlanForTable(testTable);
assertTrue(plans == null);
}
示例5: prepare
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
/**
* Two responsibilities
* - if the call is already completed (by another replica) stops the retries.
* - set the location to the right region, depending on the replica.
*/
@Override
public void prepare(final boolean reload) throws IOException {
if (controller.isCanceled()) return;
if (Thread.interrupted()) {
throw new InterruptedIOException();
}
if (reload || location == null) {
RegionLocations rl = getRegionLocations(false, id, cConnection, tableName, get.getRow());
location = id < rl.size() ? rl.getRegionLocation(id) : null;
}
if (location == null || location.getServerName() == null) {
// With this exception, there will be a retry. The location can be null for a replica
// when the table is created or after a split.
throw new HBaseIOException("There is no location for replica id #" + id);
}
ServerName dest = location.getServerName();
setStub(cConnection.getClient(dest));
}
示例6: prepare
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
@Override
public void prepare(boolean reload) throws IOException {
if (Thread.interrupted()) {
throw new InterruptedIOException();
}
if (reload || location == null) {
location = getLocation(!reload);
}
if (location == null) {
// With this exception, there will be a retry.
throw new HBaseIOException(getExceptionMessage());
}
this.setStub(connection.getAdmin(location.getServerName()));
}
示例7: processResponse
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
/**
* Process the response for this call. You need to have the lock on
* {@link org.apache.hadoop.hbase.ipc.RpcServer.Connection#responseWriteLock}
*
* @param call the call
* @return true if we proceed the call fully, false otherwise.
* @throws IOException
*/
private boolean processResponse(final Call call) throws IOException {
boolean error = true;
try {
// Send as much data as we can in the non-blocking fashion
long numBytes = channelWrite(call.connection.channel, call.response);
if (numBytes < 0) {
throw new HBaseIOException("Error writing on the socket " +
"for the call:" + call.toShortString());
}
error = false;
} finally {
if (error) {
LOG.debug(getName() + call.toShortString() + ": output error -- closing");
closeConnection(call.connection);
}
}
if (!call.response.hasRemaining()) {
call.connection.decRpcCount(); // Say that we're done with this call.
return true;
} else {
return false; // Socket can't take more, we will have to come back.
}
}
示例8: moveRegion
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
@Override
public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req)
throws ServiceException {
final byte [] encodedRegionName = req.getRegion().getValue().toByteArray();
RegionSpecifierType type = req.getRegion().getType();
final byte [] destServerName = (req.hasDestServerName())?
Bytes.toBytes(ProtobufUtil.toServerName(req.getDestServerName()).getServerName()):null;
MoveRegionResponse mrr = MoveRegionResponse.newBuilder().build();
if (type != RegionSpecifierType.ENCODED_REGION_NAME) {
LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.ENCODED_REGION_NAME
+ " actual: " + type);
}
try {
move(encodedRegionName, destServerName);
} catch (HBaseIOException ioe) {
throw new ServiceException(ioe);
}
return mrr;
}
示例9: move
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
/**
* Move the region <code>r</code> to <code>dest</code>.
* @param encodedRegionName The encoded region name; i.e. the hash that makes
* up the region name suffix: e.g. if regionname is
* <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
* then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
* @param destServerName The servername of the destination regionserver. If
* passed the empty byte array we'll assign to a random server. A server name
* is made of host, port and startcode. Here is an example:
* <code> host187.example.com,60020,1289493121758</code>
* @throws UnknownRegionException Thrown if we can't find a region named
* <code>encodedRegionName</code>
* @throws ZooKeeperConnectionException
* @throws MasterNotRunningException
*/
public void move(final byte [] encodedRegionName, final byte [] destServerName)
throws HBaseIOException, MasterNotRunningException, ZooKeeperConnectionException {
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
try {
MoveRegionRequest request =
RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
stub.moveRegion(null,request);
} catch (ServiceException se) {
IOException ioe = ProtobufUtil.getRemoteException(se);
if (ioe instanceof HBaseIOException) {
throw (HBaseIOException)ioe;
}
LOG.error("Unexpected exception: " + se + " from calling HMaster.moveRegion");
} catch (DeserializationException de) {
LOG.error("Could not parse destination server name: " + de);
} finally {
stub.close();
}
}
示例10: initialize
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
@Override
public void initialize() throws HBaseIOException {
Class<? extends LoadBalancer> delegatorKlass =
conf.getClass(Constants.INDEX_BALANCER_DELEGATOR_CLASS, StochasticLoadBalancer.class,
LoadBalancer.class);
this.delegator = ReflectionUtils.newInstance(delegatorKlass, conf);
this.delegator.setClusterStatus(clusterStatus);
this.delegator.setMasterServices(this.master);
try {
HTableDescriptor desc = null;
Map<String, HTableDescriptor> tableDescriptors = this.master.getTableDescriptors().getAll();
for (Entry<String, HTableDescriptor> entry : tableDescriptors.entrySet()) {
desc = entry.getValue();
if (desc.getValue(Constants.INDEX_SPEC_KEY) != null) {
addIndexedTable(desc.getTableName());
}
}
} catch (IOException e) {
throw new HBaseIOException(e);
}
}
示例11: roundRobinAssignment
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
@Override
public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions,
List<ServerName> servers) throws HBaseIOException {
List<HRegionInfo> userRegions = new ArrayList<HRegionInfo>(1);
List<HRegionInfo> indexRegions = new ArrayList<HRegionInfo>(1);
for (HRegionInfo hri : regions) {
seperateUserAndIndexRegion(hri, userRegions, indexRegions);
}
Map<ServerName, List<HRegionInfo>> bulkPlan = null;
if (false == userRegions.isEmpty()) {
bulkPlan = this.delegator.roundRobinAssignment(userRegions, servers);
if (null == bulkPlan) {
if (LOG.isDebugEnabled()) {
LOG.debug("No region plan for user regions.");
}
return null;
}
synchronized (this.colocationInfo) {
savePlan(bulkPlan);
}
}
bulkPlan = prepareIndexRegionsPlan(indexRegions, bulkPlan, servers);
return bulkPlan;
}
示例12: retainAssignment
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
@Override
public Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions,
List<ServerName> servers) throws HBaseIOException {
Map<HRegionInfo, ServerName> userRegionsMap = new ConcurrentHashMap<HRegionInfo, ServerName>(1);
List<HRegionInfo> indexRegions = new ArrayList<HRegionInfo>(1);
for (Entry<HRegionInfo, ServerName> e : regions.entrySet()) {
seperateUserAndIndexRegion(e, userRegionsMap, indexRegions, servers);
}
Map<ServerName, List<HRegionInfo>> bulkPlan = null;
if (false == userRegionsMap.isEmpty()) {
bulkPlan = this.delegator.retainAssignment(userRegionsMap, servers);
if (null == bulkPlan) {
if (LOG.isDebugEnabled()) {
LOG.debug("Empty region plan for user regions.");
}
return null;
}
synchronized (this.colocationInfo) {
savePlan(bulkPlan);
}
}
bulkPlan = prepareIndexRegionsPlan(indexRegions, bulkPlan, servers);
return bulkPlan;
}
示例13: generateFavoredNodes
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
public List<ServerName> generateFavoredNodes(RegionInfo hri) throws IOException {
List<ServerName> favoredNodesForRegion = new ArrayList<>(FAVORED_NODES_NUM);
ServerName primary = servers.get(random.nextInt(servers.size()));
favoredNodesForRegion.add(ServerName.valueOf(primary.getHostAndPort(), ServerName.NON_STARTCODE));
Map<RegionInfo, ServerName> primaryRSMap = new HashMap<>(1);
primaryRSMap.put(hri, primary);
Map<RegionInfo, ServerName[]> secondaryAndTertiaryRSMap =
placeSecondaryAndTertiaryRS(primaryRSMap);
ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(hri);
if (secondaryAndTertiaryNodes != null && secondaryAndTertiaryNodes.length == 2) {
for (ServerName sn : secondaryAndTertiaryNodes) {
favoredNodesForRegion.add(ServerName.valueOf(sn.getHostAndPort(), ServerName.NON_STARTCODE));
}
return favoredNodesForRegion;
} else {
throw new HBaseIOException("Unable to generate secondary and tertiary favored nodes.");
}
}
示例14: generateFavoredNodesRoundRobin
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
public Map<RegionInfo, List<ServerName>> generateFavoredNodesRoundRobin(
Map<ServerName, List<RegionInfo>> assignmentMap, List<RegionInfo> regions)
throws IOException {
if (regions.size() > 0) {
if (canPlaceFavoredNodes()) {
Map<RegionInfo, ServerName> primaryRSMap = new HashMap<>();
// Lets try to have an equal distribution for primary favored node
placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions);
return generateFavoredNodes(primaryRSMap);
} else {
throw new HBaseIOException("Not enough nodes to generate favored nodes");
}
}
return null;
}
示例15: createRoundRobinAssignProcedures
import org.apache.hadoop.hbase.HBaseIOException; //导入依赖的package包/类
/**
* Create round-robin assigns. Use on table creation to distribute out regions across cluster.
* @return AssignProcedures made out of the passed in <code>hris</code> and a call
* to the balancer to populate the assigns with targets chosen using round-robin (default
* balancer scheme). If at assign-time, the target chosen is no longer up, thats fine,
* the AssignProcedure will ask the balancer for a new target, and so on.
*/
public AssignProcedure[] createRoundRobinAssignProcedures(final List<RegionInfo> hris) {
if (hris.isEmpty()) {
return null;
}
try {
// Ask the balancer to assign our regions. Pass the regions en masse. The balancer can do
// a better job if it has all the assignments in the one lump.
Map<ServerName, List<RegionInfo>> assignments = getBalancer().roundRobinAssignment(hris,
this.master.getServerManager().createDestinationServersList(null));
// Return mid-method!
return createAssignProcedures(assignments, hris.size());
} catch (HBaseIOException hioe) {
LOG.warn("Failed roundRobinAssignment", hioe);
}
// If an error above, fall-through to this simpler assign. Last resort.
return createAssignProcedures(hris);
}