当前位置: 首页>>代码示例>>Java>>正文


Java ReplicationProtbufUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil的典型用法代码示例。如果您正苦于以下问题:Java ReplicationProtbufUtil类的具体用法?Java ReplicationProtbufUtil怎么用?Java ReplicationProtbufUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ReplicationProtbufUtil类属于org.apache.hadoop.hbase.protobuf包,在下文中一共展示了ReplicationProtbufUtil类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: call

import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; //导入依赖的package包/类
@Override
public Integer call() throws IOException {
  SinkPeer sinkPeer = null;
  try {
    sinkPeer = replicationSinkMgr.getReplicationSink();
    BlockingInterface rrs = sinkPeer.getRegionServer();
    ReplicationProtbufUtil.replicateWALEntry(rrs,
        entries.toArray(new Entry[entries.size()]));
    replicationSinkMgr.reportSinkSuccess(sinkPeer);
    return ordinal;

  } catch (IOException ioe) {
    if (sinkPeer != null) {
      replicationSinkMgr.reportBadSink(sinkPeer);
    }
    throw ioe;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:HBaseInterClusterReplicationEndpoint.java

示例2: replayToServer

import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; //导入依赖的package包/类
private void replayToServer(HRegionInfo regionInfo, List<Entry> entries)
    throws IOException, ServiceException {
  if (entries.isEmpty()) return;

  Entry[] entriesArray = new Entry[entries.size()];
  entriesArray = entries.toArray(entriesArray);
  AdminService.BlockingInterface remoteSvr = conn.getAdmin(getLocation().getServerName());

  Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
      ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray);
  PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond());
  try {
    remoteSvr.replay(controller, p.getFirst());
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:WALEditsReplaySink.java

示例3: replayToServer

import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; //导入依赖的package包/类
private void replayToServer(HRegionInfo regionInfo, List<HLog.Entry> entries)
    throws IOException, ServiceException {
  if (entries.isEmpty()) return;

  HLog.Entry[] entriesArray = new HLog.Entry[entries.size()];
  entriesArray = entries.toArray(entriesArray);
  AdminService.BlockingInterface remoteSvr = conn.getAdmin(getLocation().getServerName());

  Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
      ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray);
  try {
    PayloadCarryingRpcController controller = new PayloadCarryingRpcController(p.getSecond());
    remoteSvr.replay(controller, p.getFirst());
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:18,代码来源:WALEditsReplaySink.java

示例4: replayToServer

import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; //导入依赖的package包/类
private void replayToServer(HRegionInfo regionInfo, List<HLog.Entry> entries)
    throws IOException, ServiceException {
  if (entries.isEmpty()) return;

  HLog.Entry[] entriesArray = new HLog.Entry[entries.size()];
  entriesArray = entries.toArray(entriesArray);
  AdminService.BlockingInterface remoteSvr = conn.getAdmin(getLocation().getServerName());

  Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
      ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray);
  PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond());
  try {
    remoteSvr.replay(controller, p.getFirst());
  } catch (ServiceException se) {
    throw ProtobufUtil.getRemoteException(se);
  }
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:18,代码来源:WALEditsReplaySink.java

示例5: call

import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; //导入依赖的package包/类
@Override
public ReplicateWALEntryResponse call(HBaseRpcController controller) throws Exception {
  // Check whether we should still replay this entry. If the regions are changed, or the
  // entry is not coming form the primary region, filter it out because we do not need it.
  // Regions can change because of (1) region split (2) region merge (3) table recreated
  boolean skip = false;
  if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(),
      initialEncodedRegionName)) {
    skip = true;
  }
  if (!this.entries.isEmpty() && !skip) {
    Entry[] entriesArray = new Entry[this.entries.size()];
    entriesArray = this.entries.toArray(entriesArray);

    // set the region name for the target region replica
    Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
        ReplicationProtbufUtil.buildReplicateWALEntryRequest(entriesArray, location
            .getRegionInfo().getEncodedNameAsBytes(), null, null, null);
    controller.setCellScanner(p.getSecond());
    return stub.replay(controller, p.getFirst());
  }

  if (skip) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("Skipping " + entries.size() + " entries in table " + tableName
        + " because located region " + location.getRegionInfo().getEncodedName()
        + " is different than the original region "
        + Bytes.toStringBinary(initialEncodedRegionName) + " from WALEdit");
      for (Entry entry : entries) {
        LOG.trace("Skipping : " + entry);
      }
    }
    skippedEntries.addAndGet(entries.size());
  }
  return ReplicateWALEntryResponse.newBuilder().build();
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:RegionReplicaReplicationEndpoint.java

示例6: replayToServer

import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; //导入依赖的package包/类
private ReplicateWALEntryResponse replayToServer(List<Entry> entries, int timeout)
    throws IOException {
  // check whether we should still replay this entry. If the regions are changed, or the
  // entry is not coming form the primary region, filter it out because we do not need it.
  // Regions can change because of (1) region split (2) region merge (3) table recreated
  boolean skip = false;

  if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(),
    initialEncodedRegionName)) {
    skip = true;
  }
  if (!entries.isEmpty() && !skip) {
    Entry[] entriesArray = new Entry[entries.size()];
    entriesArray = entries.toArray(entriesArray);

    // set the region name for the target region replica
    Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
        ReplicationProtbufUtil.buildReplicateWALEntryRequest(
          entriesArray, location.getRegionInfo().getEncodedNameAsBytes());
    try {
      PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond());
      controller.setCallTimeout(timeout);
      controller.setPriority(tableName);
      return stub.replay(controller, p.getFirst());
    } catch (ServiceException se) {
      throw ProtobufUtil.getRemoteException(se);
    }
  }

  if (skip) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("Skipping " + entries.size() + " entries in table " + tableName
        + " because located region " + location.getRegionInfo().getEncodedName()
        + " is different than the original region "
        + Bytes.toStringBinary(initialEncodedRegionName) + " from WALEdit");
      for (Entry entry : entries) {
        LOG.trace("Skipping : " + entry);
      }
    }
    skippedEntries.addAndGet(entries.size());
  }
  return ReplicateWALEntryResponse.newBuilder().build();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:RegionReplicaReplicationEndpoint.java

示例7: replicate

import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; //导入依赖的package包/类
/**
 * Do the shipping logic
 */
@Override
public boolean replicate(ReplicateContext replicateContext) {
  List<Entry> entries = replicateContext.getEntries();
  int sleepMultiplier = 1;
  while (this.isRunning()) {
    if (!peersSelected) {
      connectToPeers();
      peersSelected = true;
    }

    if (!isPeerEnabled()) {
      if (sleepForRetries("Replication is disabled", sleepMultiplier)) {
        sleepMultiplier++;
      }
      continue;
    }
    SinkPeer sinkPeer = null;
    try {
      sinkPeer = replicationSinkMgr.getReplicationSink();
      BlockingInterface rrs = sinkPeer.getRegionServer();
      if (LOG.isTraceEnabled()) {
        LOG.trace("Replicating " + entries.size() +
            " entries of total size " + replicateContext.getSize());
      }
      ReplicationProtbufUtil.replicateWALEntry(rrs,
          entries.toArray(new Entry[entries.size()]));

      // update metrics
      this.metrics.setAgeOfLastShippedOp(entries.get(entries.size()-1).getKey().getWriteTime());
      return true;

    } catch (IOException ioe) {
      // Didn't ship anything, but must still age the last time we did
      this.metrics.refreshAgeOfLastShippedOp();
      if (ioe instanceof RemoteException) {
        ioe = ((RemoteException) ioe).unwrapRemoteException();
        LOG.warn("Can't replicate because of an error on the remote cluster: ", ioe);
        if (ioe instanceof TableNotFoundException) {
          if (sleepForRetries("A table is missing in the peer cluster. "
              + "Replication cannot proceed without losing data.", sleepMultiplier)) {
            sleepMultiplier++;
          }
        }
      } else {
        if (ioe instanceof SocketTimeoutException) {
          // This exception means we waited for more than 60s and nothing
          // happened, the cluster is alive and calling it right away
          // even for a test just makes things worse.
          sleepForRetries("Encountered a SocketTimeoutException. Since the " +
            "call to the remote cluster timed out, which is usually " +
            "caused by a machine failure or a massive slowdown",
            this.socketTimeoutMultiplier);
        } else if (ioe instanceof ConnectException) {
          LOG.warn("Peer is unavailable, rechecking all sinks: ", ioe);
          replicationSinkMgr.chooseSinks();
        } else {
          LOG.warn("Can't replicate because of a local or network error: ", ioe);
        }
      }

      if (sinkPeer != null) {
        replicationSinkMgr.reportBadSink(sinkPeer);
      }
      if (sleepForRetries("Since we are unable to replicate", sleepMultiplier)) {
        sleepMultiplier++;
      }
    }
  }
  return false; // in case we exited before replicating
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:74,代码来源:HBaseInterClusterReplicationEndpoint.java

示例8: replicateWALEntry

import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; //导入依赖的package包/类
private void replicateWALEntry(WAL.Entry[] entries) throws IOException {
    ReplicationProtbufUtil.replicateWALEntry(sepConsumer, entries);
}
 
开发者ID:NGDATA,项目名称:hbase-indexer,代码行数:4,代码来源:SepConsumerTest.java


注:本文中的org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。