当前位置: 首页>>代码示例>>Java>>正文


Java ProtobufUtil.getRemoteException方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException方法的典型用法代码示例。如果您正苦于以下问题:Java ProtobufUtil.getRemoteException方法的具体用法?Java ProtobufUtil.getRemoteException怎么用?Java ProtobufUtil.getRemoteException使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.protobuf.ProtobufUtil的用法示例。


在下文中一共展示了ProtobufUtil.getRemoteException方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: sendRegionOpen

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
 * Sends an OPEN RPC to the specified server to open the specified region.
 * <p>
 * Open should not fail but can if server just crashed.
 * <p>
 * @param server server to open a region
 * @param region region to open
 * @param versionOfOfflineNode that needs to be present in the offline node
 * when RS tries to change the state from OFFLINE to other states.
 * @param favoredNodes
 */
public RegionOpeningState sendRegionOpen(final ServerName server,
    HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
throws IOException {
  AdminService.BlockingInterface admin = getRsAdmin(server);
  if (admin == null) {
    LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
      " failed because no RPC connection found to this server");
    return RegionOpeningState.FAILED_OPENING;
  }
  OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, 
    region, versionOfOfflineNode, favoredNodes, 
    (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
  try {
    OpenRegionResponse response = admin.openRegion(null, request);
    return ResponseConverter.getRegionOpeningState(response);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:ServerManager.java

示例2: replayToServer

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private void replayToServer(HRegionInfo regionInfo, List<Entry> entries)
    throws IOException, ServiceException {
  if (entries.isEmpty()) return;

  Entry[] entriesArray = new Entry[entries.size()];
  entriesArray = entries.toArray(entriesArray);
  AdminService.BlockingInterface remoteSvr = conn.getAdmin(getLocation().getServerName());

  Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
      ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray);
  PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond());
  try {
    remoteSvr.replay(controller, p.getFirst());
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:WALEditsReplaySink.java

示例3: checkAndDelete

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
 * {@inheritDoc}
 */
@Override
public boolean checkAndDelete(final byte [] row,
    final byte [] family, final byte [] qualifier, final byte [] value,
    final Delete delete)
throws IOException {
  RegionServerCallable<Boolean> callable =
    new RegionServerCallable<Boolean>(connection, getName(), row) {
      @Override
      public Boolean call(int callTimeout) throws IOException {
        PayloadCarryingRpcController controller = rpcControllerFactory.newController();
        controller.setPriority(tableName);
        controller.setCallTimeout(callTimeout);
        try {
          MutateRequest request = RequestConverter.buildMutateRequest(
            getLocation().getRegionInfo().getRegionName(), row, family, qualifier,
              new BinaryComparator(value), CompareType.EQUAL, delete);
          MutateResponse response = getStub().mutate(controller, request);
          return Boolean.valueOf(response.getProcessed());
        } catch (ServiceException se) {
          throw ProtobufUtil.getRemoteException(se);
        }
      }
    };
  return rpcCallerFactory.<Boolean> newCaller().callWithRetries(callable, this.operationTimeout);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:HTable.java

示例4: expectSnapshotDoneException

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
 * Expect the snapshot to throw an error when checking if the snapshot is
 * complete
 *
 * @param master master to check
 * @param snapshot the {@link SnapshotDescription} request to pass to the master
 * @param clazz expected exception from the master
 */
public static void expectSnapshotDoneException(HMaster master,
    IsSnapshotDoneRequest snapshot,
    Class<? extends HBaseSnapshotException> clazz) {
  try {
    master.getMasterRpcServices().isSnapshotDone(null, snapshot);
    Assert.fail("didn't fail to lookup a snapshot");
  } catch (ServiceException se) {
    try {
      throw ProtobufUtil.getRemoteException(se);
    } catch (HBaseSnapshotException e) {
      assertEquals("Threw wrong snapshot exception!", clazz, e.getClass());
    } catch (Throwable t) {
      Assert.fail("Threw an unexpected exception:" + t);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:SnapshotTestingUtils.java

示例5: getHTableDescriptor

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
 * Connects to the master to get the table descriptor.
 * @param tableName table name
 * @throws IOException if the connection to master fails or if the table
 *  is not found.
 * @deprecated Use {@link Admin#getTableDescriptor(TableName)} instead
 */
@Deprecated
@Override
public HTableDescriptor getHTableDescriptor(final TableName tableName)
throws IOException {
  if (tableName == null) return null;
  MasterKeepAliveConnection master = getKeepAliveMasterService();
  GetTableDescriptorsResponse htds;
  try {
    GetTableDescriptorsRequest req =
      RequestConverter.buildGetTableDescriptorsRequest(tableName);
    htds = master.getTableDescriptors(null, req);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  } finally {
    master.close();
  }
  if (!htds.getTableSchemaList().isEmpty()) {
    return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
  }
  throw new TableNotFoundException(tableName.getNameAsString());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:ConnectionManager.java

示例6: getHTableDescriptorsByTableName

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
 * @deprecated Use {@link Admin#getTableDescriptorsByTableName(List)} instead
 */
@Deprecated
@Override
public HTableDescriptor[] getHTableDescriptorsByTableName(
    List<TableName> tableNames) throws IOException {
  if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0];
  MasterKeepAliveConnection master = getKeepAliveMasterService();
  try {
    GetTableDescriptorsRequest req =
      RequestConverter.buildGetTableDescriptorsRequest(tableNames);
    return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req));
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  } finally {
    master.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:ConnectionManager.java

示例7: stopRegionServer

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
 * Stop the designated regionserver
 * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
 * <code>example.org:1234</code>
 * @throws IOException if a remote or network exception occurs
 */
@Override
public synchronized void stopRegionServer(final String hostnamePort)
throws IOException {
  String hostname = Addressing.parseHostname(hostnamePort);
  int port = Addressing.parsePort(hostnamePort);
  AdminService.BlockingInterface admin =
    this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
  StopServerRequest request = RequestConverter.buildStopServerRequest(
    "Called by admin client " + this.connection.toString());
  PayloadCarryingRpcController controller = rpcControllerFactory.newController();

  controller.setPriority(HConstants.HIGH_QOS);
  try {
    // TODO: this does not do retries, it should. Set priority and timeout in controller
    admin.stopServer(controller, request);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HBaseAdmin.java

示例8: openScanner

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
protected long openScanner() throws IOException {
  incRPCcallsMetrics();
  ScanRequest request =
    RequestConverter.buildScanRequest(
      getLocation().getRegionInfo().getRegionName(),
      this.scan, 0, false);
  try {
    ScanResponse response = getStub().scan(null, request);
    long id = response.getScannerId();
    if (logScannerActivity) {
      LOG.info("Open scanner=" + id + " for scan=" + scan.toString()
        + " on region " + getLocation().toString());
    }
    return id;
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:ScannerCallable.java

示例9: close

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private void close() {
  if (this.scannerId == -1L) {
    return;
  }
  try {
    incRPCcallsMetrics();
    ScanRequest request =
        RequestConverter.buildScanRequest(this.scannerId, 0, true, this.scanMetrics != null);
    try {
      getStub().scan(null, request);
    } catch (ServiceException se) {
      throw ProtobufUtil.getRemoteException(se);
    }
  } catch (IOException e) {
    LOG.warn("Ignore, probably already closed", e);
  }
  this.scannerId = -1L;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:ScannerCallable.java

示例10: checkAndPut

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
 * {@inheritDoc}
 */
@Override
public boolean checkAndPut(final byte [] row, final byte [] family,
    final byte [] qualifier, final CompareOp compareOp, final byte [] value,
    final Put put)
throws IOException {
  RegionServerCallable<Boolean> callable =
    new RegionServerCallable<Boolean>(connection, getName(), row) {
      @Override
      public Boolean call(int callTimeout) throws IOException {
        PayloadCarryingRpcController controller = new PayloadCarryingRpcController();
        controller.setPriority(tableName);
        controller.setCallTimeout(callTimeout);
        try {
          CompareType compareType = CompareType.valueOf(compareOp.name());
          MutateRequest request = RequestConverter.buildMutateRequest(
            getLocation().getRegionInfo().getRegionName(), row, family, qualifier,
              new BinaryComparator(value), compareType, put);
          MutateResponse response = getStub().mutate(controller, request);
          return Boolean.valueOf(response.getProcessed());
        } catch (ServiceException se) {
          throw ProtobufUtil.getRemoteException(se);
        }
      }
    };
  return rpcCallerFactory.<Boolean> newCaller().callWithRetries(callable, this.operationTimeout);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:HTable.java

示例11: reportForDuty

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private RegionServerStartupResponse reportForDuty() throws IOException {
  ServerName masterServerName = createRegionServerStatusStub();
  if (masterServerName == null) return null;
  RegionServerStartupResponse result = null;
  try {
    rpcServices.requestCount.set(0);
    LOG.info(
        "reportForDuty to master=" + masterServerName + " with port=" + rpcServices.isa.getPort()
            + ", startcode=" + this.startcode);
    long now = EnvironmentEdgeManager.currentTime();
    int port = rpcServices.isa.getPort();
    RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder();
    if (shouldUseThisHostnameInstead()) {
      request.setUseThisHostnameInstead(useThisHostnameInstead);
    }
    request.setPort(port);
    request.setServerStartCode(this.startcode);
    request.setServerCurrentTime(now);
    result = this.rssStub.regionServerStartup(null, request.build());
  } catch (ServiceException se) {
    IOException ioe = ProtobufUtil.getRemoteException(se);
    if (ioe instanceof ClockOutOfSyncException) {
      LOG.fatal("Master rejected startup because clock is out of sync", ioe);
      // Re-throw IOE will cause RS to abort
      throw ioe;
    } else if (ioe instanceof ServerNotRunningYetException) {
      LOG.debug("Master is not running yet");
    } else {
      LOG.warn("error telling master we are up", se);
    }
    rssStub = null;
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:HRegionServer.java

示例12: call

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
@Override
public ReplicateWALEntryResponse call(int callTimeout) throws IOException {
  try {
    replayToServer(this.regionInfo, this.entries);
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
  return null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:WALEditsReplaySink.java

示例13: testRPCException

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
@Test
public void testRPCException() throws IOException, InterruptedException, KeeperException {
  ServerName sm = master.getServerName();
  boolean fakeZNodeDelete = false;
  for (int i = 0; i < 20; i++) {
    try {
      BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sm, User.getCurrent(), 0);
      MasterProtos.MasterService.BlockingInterface stub =
          MasterProtos.MasterService.newBlockingStub(channel);
      assertTrue(stub.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance())
          .getIsMasterRunning());
      return;
    } catch (ServiceException ex) {
      IOException ie = ProtobufUtil.getRemoteException(ex);
      // No SocketTimeoutException here. RpcServer is already started after the construction of
      // HMaster.
      assertTrue(ie.getMessage().startsWith(
        "org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet"));
      LOG.info("Expected exception: ", ie);
      if (!fakeZNodeDelete) {
        testUtil.getZooKeeperWatcher().getRecoverableZooKeeper()
            .delete(testUtil.getZooKeeperWatcher().getMasterAddressZNode(), -1);
        fakeZNodeDelete = true;
      }
    }
    Thread.sleep(1000);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestHMasterRPCException.java

示例14: closeRegionWithEncodedRegionName

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
 * For expert-admins. Runs close on the regionserver. Closes a region based on
 * the encoded region name. The region server name is mandatory. If the
 * servername is provided then based on the online regions in the specified
 * regionserver the specified region will be closed. The master will not be
 * informed of the close. Note that the regionname is the encoded regionname.
 *
 * @param encodedRegionName
 *          The encoded region name; i.e. the hash that makes up the region
 *          name suffix: e.g. if regionname is
 *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
 *          , then the encoded region name is:
 *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
 * @param serverName
 *          The servername of the regionserver. A server name is made of host,
 *          port and startcode. This is mandatory. Here is an example:
 *          <code> host187.example.com,60020,1289493121758</code>
 * @return true if the region was closed, false if not.
 * @throws IOException
 *           if a remote or network exception occurs
 */
@Override
public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
    final String serverName) throws IOException {
  if (null == serverName || ("").equals(serverName.trim())) {
    throw new IllegalArgumentException(
        "The servername cannot be null or empty.");
  }
  ServerName sn = ServerName.valueOf(serverName);
  AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
  // Close the region without updating zk state.
  CloseRegionRequest request =
    RequestConverter.buildCloseRegionRequest(sn, encodedRegionName, false);
  try {
    PayloadCarryingRpcController controller = rpcControllerFactory.newController();

    // TODO: this does not do retries, it should. Set priority and timeout in controller
    CloseRegionResponse response = admin.closeRegion(controller, request);
    boolean isRegionClosed = response.getClosed();
    if (false == isRegionClosed) {
      LOG.error("Not able to close the region " + encodedRegionName + ".");
    }
    return isRegionClosed;
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:48,代码来源:HBaseAdmin.java

示例15: call

import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
@Override
public Result[] call(int timeout) throws IOException {
  if (this.closed) return null;
  if (Thread.interrupted()) {
    throw new InterruptedIOException();
  }
  ScanRequest request = RequestConverter.buildScanRequest(getLocation()
      .getRegionInfo().getRegionName(), getScan(), getCaching(), true);
  ScanResponse response = null;
  controller = controllerFactory.newController();
  try {
    controller.setPriority(getTableName());
    controller.setCallTimeout(timeout);
    response = getStub().scan(controller, request);
    Result[] results = ResponseConverter.getResults(controller.cellScanner(),
        response);
    if (response.hasMoreResultsInRegion()) {
      setHasMoreResultsContext(true);
      setServerHasMoreResults(response.getMoreResultsInRegion());
    } else {
      setHasMoreResultsContext(false);
    }
    // We need to update result metrics since we are overriding call()
    updateResultsMetrics(results);
    return results;
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:ClientSmallScanner.java


注:本文中的org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。