本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.HdfsProtos类的典型用法代码示例。如果您正苦于以下问题:Java HdfsProtos类的具体用法?Java HdfsProtos怎么用?Java HdfsProtos使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HdfsProtos类属于org.apache.hadoop.hdfs.protocol.proto包,在下文中一共展示了HdfsProtos类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setFileEncryptionInfo
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
/**
* Set the FileEncryptionInfo for an INode.
*/
void setFileEncryptionInfo(String src, FileEncryptionInfo info)
throws IOException {
// Make the PB for the xattr
final HdfsProtos.PerFileEncryptionInfoProto proto =
PBHelper.convertPerFileEncInfo(info);
final byte[] protoBytes = proto.toByteArray();
final XAttr fileEncryptionAttr =
XAttrHelper.buildXAttr(CRYPTO_XATTR_FILE_ENCRYPTION_INFO, protoBytes);
final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
xAttrs.add(fileEncryptionAttr);
writeLock();
try {
FSDirXAttrOp.unprotectedSetXAttrs(this, src, xAttrs,
EnumSet.of(XAttrSetFlag.CREATE));
} finally {
writeUnlock();
}
}
示例2: convert
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
public static ContentSummary convert(ContentSummaryProto cs) {
if (cs == null) return null;
ContentSummary.Builder builder = new ContentSummary.Builder();
builder.length(cs.getLength()).
fileCount(cs.getFileCount()).
directoryCount(cs.getDirectoryCount()).
quota(cs.getQuota()).
spaceConsumed(cs.getSpaceConsumed()).
spaceQuota(cs.getSpaceQuota());
if (cs.hasTypeQuotaInfos()) {
for (HdfsProtos.StorageTypeQuotaInfoProto info :
cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
StorageType type = PBHelper.convertStorageType(info.getType());
builder.typeConsumed(type, info.getConsumed());
builder.typeQuota(type, info.getQuota());
}
}
return builder.build();
}
示例3: convert
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
public static CipherOption convert(HdfsProtos.CipherOptionProto proto) {
if (proto != null) {
CipherSuite suite = null;
if (proto.getSuite() != null) {
suite = convert(proto.getSuite());
}
byte[] inKey = null;
if (proto.getInKey() != null) {
inKey = proto.getInKey().toByteArray();
}
byte[] inIv = null;
if (proto.getInIv() != null) {
inIv = proto.getInIv().toByteArray();
}
byte[] outKey = null;
if (proto.getOutKey() != null) {
outKey = proto.getOutKey().toByteArray();
}
byte[] outIv = null;
if (proto.getOutIv() != null) {
outIv = proto.getOutIv().toByteArray();
}
return new CipherOption(suite, inKey, inIv, outKey, outIv);
}
return null;
}
示例4: addEncryptionZone
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
private void addEncryptionZone(INodeWithAdditionalFields inode,
XAttrFeature xaf) {
if (xaf == null) {
return;
}
XAttr xattr = xaf.getXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE);
if (xattr == null) {
return;
}
try {
final HdfsProtos.ZoneEncryptionInfoProto ezProto =
HdfsProtos.ZoneEncryptionInfoProto.parseFrom(
xattr.getValue());
ezManager.unprotectedAddEncryptionZone(inode.getId(),
PBHelperClient.convert(ezProto.getSuite()),
PBHelperClient.convert(ezProto.getCryptoProtocolVersion()),
ezProto.getKeyName());
} catch (InvalidProtocolBufferException e) {
NameNode.LOG.warn("Error parsing protocol buffer of " +
"EZ XAttr " + xattr.getName() + " dir:" + inode.getFullPathName());
}
}
示例5: setFileEncryptionInfo
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
/**
* Set the FileEncryptionInfo for an INode.
*
* @param fsd fsdirectory
* @param src the path of a directory which will be the root of the
* encryption zone.
* @param info file encryption information
* @throws IOException
*/
static void setFileEncryptionInfo(final FSDirectory fsd, final String src,
final FileEncryptionInfo info) throws IOException {
// Make the PB for the xattr
final HdfsProtos.PerFileEncryptionInfoProto proto =
PBHelperClient.convertPerFileEncInfo(info);
final byte[] protoBytes = proto.toByteArray();
final XAttr fileEncryptionAttr =
XAttrHelper.buildXAttr(CRYPTO_XATTR_FILE_ENCRYPTION_INFO, protoBytes);
final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
xAttrs.add(fileEncryptionAttr);
fsd.writeLock();
try {
FSDirXAttrOp.unprotectedSetXAttrs(fsd, src, xAttrs,
EnumSet.of(XAttrSetFlag.CREATE));
} finally {
fsd.writeUnlock();
}
}
示例6: setFileEncryptionInfo
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
/**
* Set the FileEncryptionInfo for an INode.
*/
void setFileEncryptionInfo(String src, FileEncryptionInfo info)
throws IOException {
// Make the PB for the xattr
final HdfsProtos.PerFileEncryptionInfoProto proto =
PBHelper.convertPerFileEncInfo(info);
final byte[] protoBytes = proto.toByteArray();
final XAttr fileEncryptionAttr =
XAttrHelper.buildXAttr(CRYPTO_XATTR_FILE_ENCRYPTION_INFO, protoBytes);
final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
xAttrs.add(fileEncryptionAttr);
writeLock();
try {
unprotectedSetXAttrs(src, xAttrs, EnumSet.of(XAttrSetFlag.CREATE));
} finally {
writeUnlock();
}
}
示例7: convert
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
public static LocatedBlockProto convert(LocatedBlock b) {
if (b == null) {
return null;
}
Builder builder = LocatedBlockProto.newBuilder();
DatanodeInfo[] locs = b.getLocations();
for (int i = 0; i < locs.length; i++) {
builder.addLocs(i, PBHelper.convert(locs[i]));
//For compatability with newer clients
builder.addStorageIDs("HopsFS_Hack_Storage_ID" );
builder.addStorageTypes(HdfsProtos.StorageTypeProto.DISK);
builder.addIsCached(false);
}
builder = builder.setB(PBHelper.convert(b.getBlock()))
.setBlockToken(PBHelper.convert(b.getBlockToken()))
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset());
if(b.isPhantomBlock() && b.isDataSet()){
builder.setData(ByteString.copyFrom(b.getData()));
}
return builder.build();
}
示例8: convertPerFileEncInfo
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
public static HdfsProtos.PerFileEncryptionInfoProto convertPerFileEncInfo(
FileEncryptionInfo info) {
if (info == null) {
return null;
}
return HdfsProtos.PerFileEncryptionInfoProto.newBuilder()
.setKey(getByteString(info.getEncryptedDataEncryptionKey()))
.setIv(getByteString(info.getIV()))
.setEzKeyVersionName(info.getEzKeyVersionName())
.build();
}
示例9: addToInodeMap
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
/**
* This method is always called with writeLock of FSDirectory held.
*/
public final void addToInodeMap(INode inode) {
if (inode instanceof INodeWithAdditionalFields) {
inodeMap.put(inode);
if (!inode.isSymlink()) {
final XAttrFeature xaf = inode.getXAttrFeature();
if (xaf != null) {
final List<XAttr> xattrs = xaf.getXAttrs();
for (XAttr xattr : xattrs) {
final String xaName = XAttrHelper.getPrefixName(xattr);
if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) {
try {
final HdfsProtos.ZoneEncryptionInfoProto ezProto =
HdfsProtos.ZoneEncryptionInfoProto.parseFrom(
xattr.getValue());
ezManager.unprotectedAddEncryptionZone(inode.getId(),
PBHelper.convert(ezProto.getSuite()),
PBHelper.convert(ezProto.getCryptoProtocolVersion()),
ezProto.getKeyName());
} catch (InvalidProtocolBufferException e) {
NameNode.LOG.warn("Error parsing protocol buffer of " +
"EZ XAttr " + xattr.getName());
}
}
}
}
}
}
}
示例10: convertPerFileEncInfo
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
public static HdfsProtos.PerFileEncryptionInfoProto convertPerFileEncInfo(
FileEncryptionInfo info) {
if (info == null) {
return null;
}
return HdfsProtos.PerFileEncryptionInfoProto.newBuilder()
.setKey(getByteString(info.getEncryptedDataEncryptionKey()))
.setIv(getByteString(info.getIV()))
.setEzKeyVersionName(info.getEzKeyVersionName())
.build();
}
示例11: getFileSize
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
static long getFileSize(FsImageProto.INodeSection.INodeFile f) {
long size = 0;
for (HdfsProtos.BlockProto p : f.getBlocksList()) {
size += p.getNumBytes();
}
return size;
}
示例12: testChecksumTypeProto
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
@Test
public void testChecksumTypeProto() {
assertEquals(DataChecksum.Type.NULL,
PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL));
assertEquals(DataChecksum.Type.CRC32,
PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32));
assertEquals(DataChecksum.Type.CRC32C,
PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C));
assertEquals(PBHelper.convert(DataChecksum.Type.NULL),
HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL);
assertEquals(PBHelper.convert(DataChecksum.Type.CRC32),
HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32);
assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C),
HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C);
}
示例13: convertCipherOptions
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
public static List<HdfsProtos.CipherOptionProto> convertCipherOptions(
List<CipherOption> options) {
if (options != null) {
List<HdfsProtos.CipherOptionProto> protos =
Lists.newArrayListWithCapacity(options.size());
for (CipherOption option : options) {
protos.add(convert(option));
}
return protos;
}
return null;
}
示例14: convertCipherOptionProtos
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
public static List<CipherOption> convertCipherOptionProtos(
List<HdfsProtos.CipherOptionProto> protos) {
if (protos != null) {
List<CipherOption> options =
Lists.newArrayListWithCapacity(protos.size());
for (HdfsProtos.CipherOptionProto proto : protos) {
options.add(convert(proto));
}
return options;
}
return null;
}
示例15: convertECSchema
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; //导入依赖的package包/类
public static ECSchema convertECSchema(HdfsProtos.ECSchemaProto schema) {
List<HdfsProtos.ECSchemaOptionEntryProto> optionsList =
schema.getOptionsList();
Map<String, String> options = new HashMap<>(optionsList.size());
for (HdfsProtos.ECSchemaOptionEntryProto option : optionsList) {
options.put(option.getKey(), option.getValue());
}
return new ECSchema(schema.getCodecName(), schema.getDataUnits(),
schema.getParityUnits(), options);
}