本文整理汇总了Java中org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys类的典型用法代码示例。如果您正苦于以下问题:Java ExportedBlockKeys类的具体用法?Java ExportedBlockKeys怎么用?Java ExportedBlockKeys使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ExportedBlockKeys类属于org.apache.hadoop.hdfs.security.token.block包,在下文中一共展示了ExportedBlockKeys类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getBlockKeys
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
GetBlockKeysRequestProto request) throws ServiceException {
ExportedBlockKeys keys;
try {
keys = impl.getBlockKeys();
} catch (IOException e) {
throw new ServiceException(e);
}
GetBlockKeysResponseProto.Builder builder =
GetBlockKeysResponseProto.newBuilder();
if (keys != null) {
builder.setKeys(PBHelper.convert(keys));
}
return builder.build();
}
示例2: register
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports,
new BlockReportContext(1, 0, System.nanoTime()));
}
示例3: register
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
dnRegistration.setNamespaceInfo(nsInfo);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
dataNodeProto.blockReport(dnRegistration, bpid, reports,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
示例4: createBPRegistration
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
/**
* Create a DatanodeRegistration for a specific block pool.
* @param nsInfo the namespace info from the first part of the NN handshake
*/
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
if (storageInfo == null) {
// it's null in the case of SimulatedDataSet
storageInfo = new StorageInfo(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
NodeType.DATA_NODE);
}
DatanodeID dnId = new DatanodeID(
streamingAddr.getAddress().getHostAddress(), hostName,
storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
infoSecurePort, getIpcPort());
return new DatanodeRegistration(dnId, storageInfo,
new ExportedBlockKeys(), VersionInfo.getVersion());
}
示例5: register
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports,
new BlockReportContext(1, 0, System.nanoTime()));
}
示例6: transferBlocks
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
/**
* Transfer blocks to another data-node.
* Just report on behalf of the other data-node
* that the blocks have been received.
*/
private int transferBlocks( Block blocks[],
DatanodeInfo xferTargets[][],
String targetStorageIDs[][]
) throws IOException {
for(int i = 0; i < blocks.length; i++) {
DatanodeInfo blockTargets[] = xferTargets[i];
for(int t = 0; t < blockTargets.length; t++) {
DatanodeInfo dnInfo = blockTargets[t];
String targetStorageID = targetStorageIDs[i][t];
DatanodeRegistration receivedDNReg;
receivedDNReg = new DatanodeRegistration(dnInfo,
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
ReceivedDeletedBlockInfo[] rdBlocks = {
new ReceivedDeletedBlockInfo(
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
null) };
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
targetStorageID, rdBlocks) };
nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
.getNamesystem().getBlockPoolId(), report);
}
}
return blocks.length;
}
示例7: register
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""),
new ExportedBlockKeys(), VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例8: transferBlocks
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
/**
* Transfer blocks to another data-node.
* Just report on behalf of the other data-node
* that the blocks have been received.
*/
private int transferBlocks( Block blocks[],
DatanodeInfo xferTargets[][]
) throws IOException {
for(int i = 0; i < blocks.length; i++) {
DatanodeInfo blockTargets[] = xferTargets[i];
for(int t = 0; t < blockTargets.length; t++) {
DatanodeInfo dnInfo = blockTargets[t];
DatanodeRegistration receivedDNReg;
receivedDNReg = new DatanodeRegistration(dnInfo,
new DataStorage(nsInfo, dnInfo.getStorageID()),
new ExportedBlockKeys(), VersionInfo.getVersion());
ReceivedDeletedBlockInfo[] rdBlocks = {
new ReceivedDeletedBlockInfo(
blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
null) };
StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
receivedDNReg.getStorageID(), rdBlocks) };
nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
.getNamesystem().getBlockPoolId(), report);
}
}
return blocks.length;
}
示例9: register
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例10: getBlockKeys
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
GetBlockKeysRequestProto request) throws ServiceException {
ExportedBlockKeys keys;
try {
keys = impl.getBlockKeys();
} catch (IOException e) {
throw new ServiceException(e);
}
GetBlockKeysResponseProto.Builder builder =
GetBlockKeysResponseProto.newBuilder();
if (keys != null) {
builder.setKeys(PBHelper.convert(keys));
}
return builder.build();
}
示例11: register
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"), "", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""), new ExportedBlockKeys(),
VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {new StorageBlockReport(storage,
BlockReport.builder(NUM_BUCKETS).build())};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例12: transferBlocks
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
/**
* Transfer blocks to another data-node.
* Just report on behalf of the other data-node
* that the blocks have been received.
*/
private int transferBlocks(Block blocks[], DatanodeInfo xferTargets[][])
throws IOException {
for (int i = 0; i < blocks.length; i++) {
DatanodeInfo blockTargets[] = xferTargets[i];
for (DatanodeInfo dnInfo : blockTargets) {
DatanodeRegistration receivedDNReg;
receivedDNReg = new DatanodeRegistration(dnInfo,
new DataStorage(nsInfo, dnInfo.getStorageID()),
new ExportedBlockKeys(), VersionInfo.getVersion());
ReceivedDeletedBlockInfo[] rdBlocks =
{new ReceivedDeletedBlockInfo(blocks[i],
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED, null)};
StorageReceivedDeletedBlocks[] report =
{new StorageReceivedDeletedBlocks(receivedDNReg.getStorageID(),
rdBlocks)};
nameNodeProto.blockReceivedAndDeleted(receivedDNReg,
nameNode.getNamesystem().getBlockPoolId(), report);
}
}
return blocks.length;
}
示例13: register
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""),
new ExportedBlockKeys(), VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例14: KeyManager
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
public KeyManager(String blockpoolID, NamenodeProtocol namenode,
boolean encryptDataTransfer, Configuration conf) throws IOException {
this.namenode = namenode;
this.encryptDataTransfer = encryptDataTransfer;
final ExportedBlockKeys keys = namenode.getBlockKeys();
this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
if (isBlockTokenEnabled) {
long updateInterval = keys.getKeyUpdateInterval();
long tokenLifetime = keys.getTokenLifetime();
LOG.info("Block token params received from NN: update interval="
+ StringUtils.formatTime(updateInterval)
+ ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
String encryptionAlgorithm = conf.get(
DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
this.blockTokenSecretManager = new BlockTokenSecretManager(
updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
this.blockTokenSecretManager.addKeys(keys);
// sync block keys with NN more frequently than NN updates its block keys
this.blockKeyUpdater = new BlockKeyUpdater(updateInterval / 4);
this.shouldRun = true;
} else {
this.blockTokenSecretManager = null;
this.blockKeyUpdater = null;
}
}
示例15: DatanodeRegistration
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; //导入依赖的package包/类
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
ExportedBlockKeys keys, String softwareVersion) {
super(dn);
this.storageInfo = info;
this.exportedKeys = keys;
this.softwareVersion = softwareVersion;
}