本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.ProtobufUtil.lengthOfPBMagic方法的典型用法代码示例。如果您正苦于以下问题:Java ProtobufUtil.lengthOfPBMagic方法的具体用法?Java ProtobufUtil.lengthOfPBMagic怎么用?Java ProtobufUtil.lengthOfPBMagic使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.protobuf.ProtobufUtil
的用法示例。
在下文中一共展示了ProtobufUtil.lengthOfPBMagic方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parseStateFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param bytes Content of a state znode.
* @return State parsed from the passed bytes.
* @throws DeserializationException
*/
private static ZooKeeperProtos.ReplicationState.State parseStateFrom(final byte[] bytes)
throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(bytes);
int pblen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.ReplicationState.Builder builder =
ZooKeeperProtos.ReplicationState.newBuilder();
ZooKeeperProtos.ReplicationState state;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
state = builder.build();
return state.getState();
} catch (IOException e) {
throw new DeserializationException(e);
}
}
示例2: parsePeerFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param bytes Content of a peer znode.
* @return ClusterKey parsed from the passed bytes.
* @throws DeserializationException
*/
private static ReplicationPeerConfig parsePeerFrom(final byte[] bytes)
throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.ReplicationPeer.Builder builder =
ZooKeeperProtos.ReplicationPeer.newBuilder();
ZooKeeperProtos.ReplicationPeer peer;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
peer = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(peer);
} else {
if (bytes.length > 0) {
return new ReplicationPeerConfig().setClusterKey(Bytes.toString(bytes));
}
return new ReplicationPeerConfig().setClusterKey("");
}
}
示例3: parseFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
* @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
* @throws DeserializationException
* @throws IOException
* @see #toByteArray()
*/
public static HTableDescriptor parseFrom(final byte [] bytes)
throws DeserializationException, IOException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
return (HTableDescriptor)Writables.getWritable(bytes, new HTableDescriptor());
}
int pblen = ProtobufUtil.lengthOfPBMagic();
TableSchema.Builder builder = TableSchema.newBuilder();
TableSchema ts;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
ts = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(ts);
}
示例4: appendPeerState
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private static void appendPeerState(ZooKeeperWatcher zkw, String znodeToProcess,
StringBuilder sb) throws KeeperException, InvalidProtocolBufferException {
String peerState = zkw.getConfiguration().get("zookeeper.znode.replication.peers.state",
"peer-state");
int pblen = ProtobufUtil.lengthOfPBMagic();
for (String child : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) {
if (!child.equals(peerState)) continue;
String peerStateZnode = ZKUtil.joinZNode(znodeToProcess, child);
sb.append("\n").append(peerStateZnode).append(": ");
byte[] peerStateData;
try {
peerStateData = ZKUtil.getData(zkw, peerStateZnode);
ZooKeeperProtos.ReplicationState.Builder builder =
ZooKeeperProtos.ReplicationState.newBuilder();
ProtobufUtil.mergeFrom(builder, peerStateData, pblen, peerStateData.length - pblen);
sb.append(builder.getState().name());
} catch (IOException ipbe) {
LOG.warn("Got Exception while parsing peer: " + znodeToProcess, ipbe);
} catch (InterruptedException e) {
zkw.interruptedException(e);
return;
}
}
}
示例5: parseWALPositionFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param bytes - Content of a WAL position znode.
* @return long - The current WAL position.
* @throws DeserializationException
*/
public static long parseWALPositionFrom(final byte[] bytes) throws DeserializationException {
if (bytes == null) {
throw new DeserializationException("Unable to parse null WAL position.");
}
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.ReplicationHLogPosition.Builder builder =
ZooKeeperProtos.ReplicationHLogPosition.newBuilder();
ZooKeeperProtos.ReplicationHLogPosition position;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
position = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return position.getPosition();
} else {
if (bytes.length > 0) {
return Bytes.toLong(bytes);
}
return 0;
}
}
示例6: parseRegionStoreSequenceIds
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param bytes Content of serialized data of RegionStoreSequenceIds
* @return a RegionStoreSequenceIds object
* @throws DeserializationException
*/
public static RegionStoreSequenceIds parseRegionStoreSequenceIds(final byte[] bytes)
throws DeserializationException {
if (bytes == null || !ProtobufUtil.isPBMagicPrefix(bytes)) {
throw new DeserializationException("Unable to parse RegionStoreSequenceIds.");
}
RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
ClusterStatusProtos.RegionStoreSequenceIds.newBuilder();
int pblen = ProtobufUtil.lengthOfPBMagic();
RegionStoreSequenceIds storeIds = null;
try {
ProtobufUtil.mergeFrom(regionSequenceIdsBuilder, bytes, pblen, bytes.length - pblen);
storeIds = regionSequenceIdsBuilder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return storeIds;
}
示例7: parseFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray()
*/
public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
ClusterIdProtos.ClusterId cid = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cid = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cid);
} else {
// Presume it was written out this way, the old way.
return new ClusterId(Bytes.toString(bytes));
}
}
示例8: readLabelsFromZKData
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* Reads back from the zookeeper. The data read here is of the form written by
* writeToZooKeeper(Map<byte[], Integer> entries).
*
* @param data
* @return Labels and their ordinal details
* @throws DeserializationException
*/
public static List<VisibilityLabel> readLabelsFromZKData(byte[] data)
throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(data)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
try {
VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
return builder.getVisLabelList();
} catch (IOException e) {
throw new DeserializationException(e);
}
}
return null;
}
示例9: readUserAuthsFromZKData
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* Reads back User auth data written to zookeeper.
* @param data
* @return User auth details
* @throws DeserializationException
*/
public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data)
throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(data)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
try {
MultiUserAuthorizations.Builder builder = MultiUserAuthorizations.newBuilder();
ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
return builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
}
return null;
}
示例10: parseFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private LoadBalancerProtos.LoadBalancerState parseFrom(byte [] pbBytes)
throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(pbBytes);
LoadBalancerProtos.LoadBalancerState.Builder builder =
LoadBalancerProtos.LoadBalancerState.newBuilder();
try {
int magicLen = ProtobufUtil.lengthOfPBMagic();
ProtobufUtil.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen);
} catch (IOException e) {
throw new DeserializationException(e);
}
return builder.build();
}
示例11: parseFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private RegionNormalizerProtos.RegionNormalizerState parseFrom(byte [] pbBytes)
throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(pbBytes);
RegionNormalizerProtos.RegionNormalizerState.Builder builder =
RegionNormalizerProtos.RegionNormalizerState.newBuilder();
try {
int magicLen = ProtobufUtil.lengthOfPBMagic();
ProtobufUtil.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen);
} catch (IOException e) {
throw new DeserializationException(e);
}
return builder.build();
}
示例12: parseFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* @param data Serialized date to parse.
* @return An SplitLogTaskState instance made of the passed <code>data</code>
* @throws DeserializationException
* @see #toByteArray()
*/
public static SplitLogTask parseFrom(final byte [] data) throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(data);
try {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.SplitLogTask.Builder builder = ZooKeeperProtos.SplitLogTask.newBuilder();
ProtobufUtil.mergeFrom(builder, data, prefixLen, data.length - prefixLen);
return new SplitLogTask(builder.build());
} catch (IOException e) {
throw new DeserializationException(Bytes.toStringBinary(data, 0, 64), e);
}
}
示例13: fromBytes
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/** Public for hbck */
public static ZooKeeperProtos.TableLock fromBytes(byte[] bytes) {
int pblen = ProtobufUtil.lengthOfPBMagic();
if (bytes == null || bytes.length < pblen) {
return null;
}
try {
ZooKeeperProtos.TableLock.Builder builder = ZooKeeperProtos.TableLock.newBuilder();
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
return builder.build();
} catch (IOException ex) {
LOG.warn("Exception in deserialization", ex);
}
return null;
}
示例14: parseVersionFrom
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
/**
* Parse the content of the ${HBASE_ROOTDIR}/hbase.version file.
* @param bytes The byte content of the hbase.version file.
* @return The version found in the file as a String.
* @throws DeserializationException
*/
static String parseVersionFrom(final byte [] bytes)
throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(bytes);
int pblen = ProtobufUtil.lengthOfPBMagic();
FSProtos.HBaseVersionFileContent.Builder builder =
FSProtos.HBaseVersionFileContent.newBuilder();
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
return builder.getVersion();
} catch (IOException e) {
// Convert
throw new DeserializationException(e);
}
}
示例15: checkTableState
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; //导入方法依赖的package包/类
private void checkTableState(byte[] data, State expectedState)
throws InvalidProtocolBufferException {
ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
int magicLen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build();
assertTrue(t.getState() == expectedState);
}