本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.ProtobufUtil.prependPBMagic方法的典型用法代码示例。如果您正苦于以下问题:Java ProtobufUtil.prependPBMagic方法的具体用法?Java ProtobufUtil.prependPBMagic怎么用?Java ProtobufUtil.prependPBMagic使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.protobuf.ProtobufUtil
的用法示例。
在下文中一共展示了ProtobufUtil.prependPBMagic方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: regionSequenceIdsToByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param regionLastFlushedSequenceId the flushed sequence id of a region which is the min of its
* store max seq ids
* @param storeSequenceIds column family to sequence Id map
* @return Serialized protobuf of <code>RegionSequenceIds</code> with pb magic prefix prepended
* suitable for use to filter wal edits in distributedLogReplay mode
*/
public static byte[] regionSequenceIdsToByteArray(final Long regionLastFlushedSequenceId,
final Map<byte[], Long> storeSequenceIds) {
ClusterStatusProtos.RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
ClusterStatusProtos.RegionStoreSequenceIds.newBuilder();
ClusterStatusProtos.StoreSequenceId.Builder storeSequenceIdBuilder =
ClusterStatusProtos.StoreSequenceId.newBuilder();
if (storeSequenceIds != null) {
for (Map.Entry<byte[], Long> e : storeSequenceIds.entrySet()){
byte[] columnFamilyName = e.getKey();
Long curSeqId = e.getValue();
storeSequenceIdBuilder.setFamilyName(ByteStringer.wrap(columnFamilyName));
storeSequenceIdBuilder.setSequenceId(curSeqId);
regionSequenceIdsBuilder.addStoreSequenceId(storeSequenceIdBuilder.build());
storeSequenceIdBuilder.clear();
}
}
regionSequenceIdsBuilder.setLastFlushedSequenceId(regionLastFlushedSequenceId);
byte[] result = regionSequenceIdsBuilder.build().toByteArray();
return ProtobufUtil.prependPBMagic(result);
}
示例2: setMetaLocation
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* Sets the location of <code>hbase:meta</code> in ZooKeeper to the
* specified server address.
* @param zookeeper
* @param serverName
* @param replicaId
* @param state
* @throws KeeperException
*/
public static void setMetaLocation(ZooKeeperWatcher zookeeper,
ServerName serverName, int replicaId, RegionState.State state) throws KeeperException {
LOG.info("Setting hbase:meta region location in ZooKeeper as " + serverName);
// Make the MetaRegionServer pb and then get its bytes and save this as
// the znode content.
MetaRegionServer pbrsr = MetaRegionServer.newBuilder()
.setServer(ProtobufUtil.toServerName(serverName))
.setRpcVersion(HConstants.RPC_CURRENT_VERSION)
.setState(state.convert()).build();
byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
try {
ZKUtil.setData(zookeeper, zookeeper.getZNodeForReplica(replicaId), data);
} catch(KeeperException.NoNodeException nne) {
if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
LOG.debug("META region location doesn't exist, create it");
} else {
LOG.debug("META region location doesn't exist for replicaId " + replicaId +
", create it");
}
ZKUtil.createAndWatch(zookeeper, zookeeper.getZNodeForReplica(replicaId), data);
}
}
示例3: sendAbortToMembers
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* This is the abort message being sent by the coordinator to member
*
* TODO this code isn't actually used but can be used to issue a cancellation from the
* coordinator.
*/
@Override
final public void sendAbortToMembers(Procedure proc, ForeignException ee) {
String procName = proc.getName();
LOG.debug("Aborting procedure '" + procName + "' in zk");
String procAbortNode = zkProc.getAbortZNode(procName);
try {
LOG.debug("Creating abort znode:" + procAbortNode);
String source = (ee.getSource() == null) ? coordName : ee.getSource();
byte[] errorInfo = ProtobufUtil.prependPBMagic(ForeignException.serialize(source, ee));
// first create the znode for the procedure
ZKUtil.createAndFailSilent(zkProc.getWatcher(), procAbortNode, errorInfo);
LOG.debug("Finished creating abort node:" + procAbortNode);
} catch (KeeperException e) {
// possible that we get this error for the procedure if we already reset the zk state, but in
// that case we should still get an error for that procedure anyways
zkProc.logZKTree(zkProc.baseZNode);
coordinator.rpcConnectionFailure("Failed to post zk node:" + procAbortNode
+ " to abort procedure '" + procName + "'", new IOException(e));
}
}
示例4: checkAndMigrateTableStatesToPB
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private void checkAndMigrateTableStatesToPB(ZooKeeperWatcher zkw) throws KeeperException,
InterruptedException {
List<String> tables = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
if (tables == null) {
LOG.info("No table present to migrate table state to PB. returning..");
return;
}
for (String table : tables) {
String znode = ZKUtil.joinZNode(zkw.tableZNode, table);
// Delete -ROOT- table state znode since its no longer present in 0.95.0
// onwards.
if (table.equals("-ROOT-") || table.equals(".META.")) {
ZKUtil.deleteNode(zkw, znode);
continue;
}
byte[] data = ZKUtil.getData(zkw, znode);
if (ProtobufUtil.isPBMagicPrefix(data)) continue;
ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data)));
data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
ZKUtil.setData(zkw, znode, data);
}
}
示例5: getUserAuthsDataToWriteToZooKeeper
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* Creates the user auth data to be written to zookeeper.
* @param userAuths
* @return Bytes form of user auths details to be written to zookeeper.
*/
public static byte[] getUserAuthsDataToWriteToZooKeeper(Map<String, List<Integer>> userAuths) {
MultiUserAuthorizations.Builder builder = MultiUserAuthorizations.newBuilder();
for (Entry<String, List<Integer>> entry : userAuths.entrySet()) {
UserAuthorizations.Builder userAuthsBuilder = UserAuthorizations.newBuilder();
userAuthsBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(entry.getKey())));
for (Integer label : entry.getValue()) {
userAuthsBuilder.addAuth(label);
}
builder.addUserAuths(userAuthsBuilder.build());
}
return ProtobufUtil.prependPBMagic(builder.build().toByteArray());
}
示例6: toByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @return Content of the clusterup znode as a serialized pb with the pb
* magic as prefix.
*/
static byte [] toByteArray() {
ZooKeeperProtos.ClusterUp.Builder builder =
ZooKeeperProtos.ClusterUp.newBuilder();
builder.setStartDate(new java.util.Date().toString());
return ProtobufUtil.prependPBMagic(builder.build().toByteArray());
}
示例7: toByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @return This instance serialized into a byte array
* @see #parseFrom(byte[])
*/
public byte [] toByteArray() {
// First create a pb ServerName. Then create a ByteString w/ the TaskState
// bytes in it. Finally create a SplitLogTaskState passing in the two
// pbs just created.
HBaseProtos.ServerName snpb = ProtobufUtil.toServerName(this.originServer);
ZooKeeperProtos.SplitLogTask slts =
ZooKeeperProtos.SplitLogTask.newBuilder().setServerName(snpb).setState(this.state).
setMode(this.mode).build();
return ProtobufUtil.prependPBMagic(slts.toByteArray());
}
示例8: createMyEphemeralNode
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private void createMyEphemeralNode() throws KeeperException, IOException {
RegionServerInfo.Builder rsInfo = RegionServerInfo.newBuilder();
rsInfo.setInfoPort(infoServer != null ? infoServer.getPort() : -1);
rsInfo.setVersionInfo(ProtobufUtil.getVersionInfo());
byte[] data = ProtobufUtil.prependPBMagic(rsInfo.build().toByteArray());
ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, getMyEphemeralNodePath(), data);
}
示例9: toByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param sn must not be null
* @return Content of the master znode as a serialized pb with the pb
* magic as prefix.
*/
static byte[] toByteArray(final ServerName sn, int infoPort) {
ZooKeeperProtos.Master.Builder mbuilder = ZooKeeperProtos.Master.newBuilder();
HBaseProtos.ServerName.Builder snbuilder = HBaseProtos.ServerName.newBuilder();
snbuilder.setHostName(sn.getHostname());
snbuilder.setPort(sn.getPort());
snbuilder.setStartCode(sn.getStartcode());
mbuilder.setMaster(snbuilder.build());
mbuilder.setRpcVersion(HConstants.RPC_CURRENT_VERSION);
mbuilder.setInfoPort(infoPort);
return ProtobufUtil.prependPBMagic(mbuilder.build().toByteArray());
}
示例10: positionToByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param position
* @return Serialized protobuf of <code>position</code> with pb magic prefix prepended suitable
* for use as content of an wal position in a replication queue.
*/
public static byte[] positionToByteArray(final long position) {
byte[] bytes = ZooKeeperProtos.ReplicationHLogPosition.newBuilder().setPosition(position)
.build().toByteArray();
return ProtobufUtil.prependPBMagic(bytes);
}
示例11: toByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private byte [] toByteArray(boolean isBalancerOn) {
LoadBalancerProtos.LoadBalancerState.Builder builder =
LoadBalancerProtos.LoadBalancerState.newBuilder();
builder.setBalancerOn(isBalancerOn);
return ProtobufUtil.prependPBMagic(builder.build().toByteArray());
}
示例12: toByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private byte [] toByteArray(boolean isNormalizerOn) {
RegionNormalizerProtos.RegionNormalizerState.Builder builder =
RegionNormalizerProtos.RegionNormalizerState.newBuilder();
builder.setNormalizerOn(isNormalizerOn);
return ProtobufUtil.prependPBMagic(builder.build().toByteArray());
}
示例13: toByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @return The clusterid serialized using pb w/ pb magic prefix
*/
public byte [] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
}
示例14: toVersionByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* Create the content to write into the ${HBASE_ROOTDIR}/hbase.version file.
* @param version Version to persist
* @return Serialized protobuf with <code>version</code> content and a bit of pb magic for a prefix.
*/
static byte [] toVersionByteArray(final String version) {
FSProtos.HBaseVersionFileContent.Builder builder =
FSProtos.HBaseVersionFileContent.newBuilder();
return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
}
示例15: toByteArray
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @return This instance serialized as protobuf w/ a magic pb prefix.
* @see #parseFrom(byte[])
*/
public byte [] toByteArray() {
byte [] bytes = convert().toByteArray();
return ProtobufUtil.prependPBMagic(bytes);
}