本文整理汇总了Java中org.apache.hadoop.hdfs.security.token.block.BlockKey类的典型用法代码示例。如果您正苦于以下问题:Java BlockKey类的具体用法?Java BlockKey怎么用?Java BlockKey使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BlockKey类属于org.apache.hadoop.hdfs.security.token.block包,在下文中一共展示了BlockKey类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generateKeys
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
private void generateKeys() throws IOException {
if (!isMaster) {
return;
}
/*
* Need to set estimated expiry dates for currentKey and nextKey so that if
* NN crashes, DN can still expire those keys. NN will stop using the newly
* generated currentKey after the first keyUpdateInterval, however it may
* still be used by DN and Balancer to generate new tokens before they get a
* chance to sync their keys with NN. Since we require keyUpdInterval to be
* long enough so that all live DN's and Balancer will sync their keys with
* NN at least once during the period, the estimated expiry date for
* currentKey is set to now() + 2 * keyUpdateInterval + tokenLifetime.
* Similarly, the estimated expiry date for nextKey is one keyUpdateInterval
* more.
*/
setSerialNo(serialNo + 1);
currentKey = new BlockKey(serialNo,
Time.now() + 2 * keyUpdateInterval + tokenLifetime, generateSecret());
currentKey.setKeyType(BlockKey.KeyType.CurrKey);
setSerialNo(serialNo + 1);
nextKey = new BlockKey(serialNo,
Time.now() + 3 * keyUpdateInterval + tokenLifetime, generateSecret());
nextKey.setKeyType(BlockKey.KeyType.NextKey);
addBlockKeys();
}
示例2: createPassword
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
@Override
protected byte[] createPassword(BlockTokenIdentifier identifier) {
BlockKey key;
try {
key = getBlockKeyByType(BlockKey.KeyType.CurrKey);
} catch (IOException ex) {
throw new IllegalStateException(
"currentKey hasn't been initialized. [" + ex.getMessage() + "]");
}
if (key == null) {
throw new IllegalStateException("currentKey hasn't been initialized.");
}
identifier.setExpiryDate(Time.now() + tokenLifetime);
identifier.setKeyId(key.getKeyId());
if (LOG.isDebugEnabled()) {
LOG.debug("Generating block token for " + identifier.toString());
}
return createPassword(identifier.getBytes(), key.getKey());
}
示例3: retrievePassword
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
@Override
public byte[] retrievePassword(BlockTokenIdentifier identifier)
throws InvalidToken {
if (isExpired(identifier.getExpiryDate())) {
throw new InvalidToken(
"Block token with " + identifier.toString() + " is expired.");
}
BlockKey key = null;
try {
key = getBlockKeyById(identifier.getKeyId());
} catch (IOException ex) {
}
if (key == null) {
throw new InvalidToken(
"Can't re-compute password for " + identifier.toString() +
", since the required block key (keyID=" + identifier.getKeyId() +
") doesn't exist.");
}
return createPassword(identifier.getBytes(), key.getKey());
}
示例4: deserializeBlockKey
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
private static BlockKey deserializeBlockKey(ByteArrayVariable var)
throws IOException {
ByteArrayInputStream is = new ByteArrayInputStream((byte[]) var.getValue());
DataInputStream dis = new DataInputStream(is);
BlockKey key = new BlockKey();
key.readFields(dis);
switch (var.getType()) {
case BTCurrKey:
key.setKeyType(BlockKey.KeyType.CurrKey);
break;
case BTNextKey:
key.setKeyType(BlockKey.KeyType.NextKey);
break;
case BTSimpleKey:
key.setKeyType(BlockKey.KeyType.SimpleKey);
}
return key;
}
示例5: convert
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
public static BlockKeyProto convert(BlockKey key) {
byte[] encodedKey = key.getEncodedKey();
ByteString keyBytes = ByteString.copyFrom(encodedKey == null ?
DFSUtil.EMPTY_BYTES : encodedKey);
return BlockKeyProto.newBuilder().setKeyId(key.getKeyId())
.setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build();
}
示例6: convertBlockKeys
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
public static BlockKey[] convertBlockKeys(List<BlockKeyProto> list) {
BlockKey[] ret = new BlockKey[list.size()];
int i = 0;
for (BlockKeyProto k : list) {
ret[i++] = convert(k);
}
return ret;
}
示例7: testConvertBlockKey
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
@Test
public void testConvertBlockKey() {
BlockKey key = getBlockKey(1);
BlockKeyProto keyProto = PBHelper.convert(key);
BlockKey key1 = PBHelper.convert(keyProto);
compare(key, key1);
}
示例8: testConvertExportedBlockKeys
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
@Test
public void testConvertExportedBlockKeys() {
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
compare(expKeys, expKeys1);
}
示例9: compare
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
void compare(ExportedBlockKeys expKeys, ExportedBlockKeys expKeys1) {
BlockKey[] allKeys = expKeys.getAllKeys();
BlockKey[] allKeys1 = expKeys1.getAllKeys();
assertEquals(allKeys.length, allKeys1.length);
for (int i = 0; i < allKeys.length; i++) {
compare(allKeys[i], allKeys1[i]);
}
compare(expKeys.getCurrentKey(), expKeys1.getCurrentKey());
assertEquals(expKeys.getKeyUpdateInterval(),
expKeys1.getKeyUpdateInterval());
assertEquals(expKeys.getTokenLifetime(), expKeys1.getTokenLifetime());
}
示例10: testConvertDatanodeRegistration
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
@Test
public void testConvertDatanodeRegistration() {
DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
DatanodeRegistration reg = new DatanodeRegistration(dnId,
new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
DatanodeRegistrationProto proto = PBHelper.convert(reg);
DatanodeRegistration reg2 = PBHelper.convert(proto);
compare(reg.getStorageInfo(), reg2.getStorageInfo());
compare(reg.getExportedKeys(), reg2.getExportedKeys());
compare(reg, reg2);
assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
示例11: convert
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
public static BlockKeyProto convert(BlockKey key) {
byte[] encodedKey = key.getEncodedKey();
ByteString keyBytes = PBHelperClient.getByteString(encodedKey == null ?
DFSUtilClient.EMPTY_BYTES : encodedKey);
return BlockKeyProto.newBuilder().setKeyId(key.getKeyId())
.setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build();
}
示例12: testConvertDatanodeRegistration
import org.apache.hadoop.hdfs.security.token.block.BlockKey; //导入依赖的package包/类
@Test
public void testConvertDatanodeRegistration() {
DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
DatanodeRegistration reg = new DatanodeRegistration(dnId,
new StorageInfo(), expKeys, "3.0.0");
DatanodeRegistrationProto proto = PBHelper.convert(reg);
DatanodeRegistration reg2 = PBHelper.convert(proto);
compare(reg.getStorageInfo(), reg2.getStorageInfo());
compare(reg.getExportedKeys(), reg2.getExportedKeys());
compare(reg, reg2);
assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}