本文整理汇总了Java中org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager类的典型用法代码示例。如果您正苦于以下问题:Java BlockTokenSecretManager类的具体用法?Java BlockTokenSecretManager怎么用?Java BlockTokenSecretManager使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlockTokenSecretManager类属于org.apache.hadoop.hdfs.security.token.block包,在下文中一共展示了BlockTokenSecretManager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: transferBlock
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes) throws IOException {
checkAccess(socketOut, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
try {
datanode.transferReplicaForPipelineRecovery(blk, targets,
targetStorageTypes, clientName);
writeResponse(Status.SUCCESS, null, out);
} catch (IOException ioe) {
LOG.info("transferBlock " + blk + " received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
}
示例2: getBlockLocalPathInfo
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
Token<BlockTokenIdentifier> token) throws IOException {
checkBlockLocalPathAccess();
checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
Preconditions.checkNotNull(data, "Storage not yet initialized");
BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
if (LOG.isDebugEnabled()) {
if (info != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("getBlockLocalPathInfo successful block=" + block
+ " blockfile " + info.getBlockPath() + " metafile "
+ info.getMetaPath());
}
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("getBlockLocalPathInfo for block=" + block
+ " returning null");
}
}
}
metrics.incrBlocksGetLocalPathInfo();
return info;
}
示例3: getHdfsBlocksMetadata
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
String bpId, long[] blockIds,
List<Token<BlockTokenIdentifier>> tokens) throws IOException,
UnsupportedOperationException {
if (!getHdfsBlockLocationsEnabled) {
throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
+ " is not enabled in datanode config");
}
if (blockIds.length != tokens.size()) {
throw new IOException("Differing number of blocks and tokens");
}
// Check access for each block
for (int i = 0; i < blockIds.length; i++) {
checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
tokens.get(i), BlockTokenSecretManager.AccessMode.READ);
}
DataNodeFaultInjector.get().getHdfsBlocksMetadata();
return data.getHdfsBlocksMetadata(bpId, blockIds);
}
示例4: checkReadAccess
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
private void checkReadAccess(final ExtendedBlock block) throws IOException {
if (isBlockTokenEnabled) {
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
if (tokenIds.size() != 1) {
throw new IOException("Can't continue since none or more than one "
+ "BlockTokenIdentifier is found.");
}
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
if (LOG.isDebugEnabled()) {
LOG.debug("Got: " + id.toString());
}
blockPoolTokenSecretManager.checkAccess(id, null, block,
BlockTokenSecretManager.AccessMode.READ);
}
}
}
示例5: ensureSerialNumbersNeverOverlap
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Test
public void ensureSerialNumbersNeverOverlap() {
BlockTokenSecretManager btsm1 = cluster.getNamesystem(0).getBlockManager()
.getBlockTokenSecretManager();
BlockTokenSecretManager btsm2 = cluster.getNamesystem(1).getBlockManager()
.getBlockTokenSecretManager();
BlockTokenSecretManager btsm3 = cluster.getNamesystem(2).getBlockManager()
.getBlockTokenSecretManager();
setAndCheckSerialNumber(0, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MAX_VALUE, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MIN_VALUE, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MAX_VALUE / 2, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MIN_VALUE / 2, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MAX_VALUE / 3, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MIN_VALUE / 3, btsm1, btsm2, btsm3);
}
示例6: setAndCheckSerialNumber
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
private void setAndCheckSerialNumber(int serialNumber, BlockTokenSecretManager... btsms) {
for (BlockTokenSecretManager btsm : btsms) {
btsm.setSerialNo(serialNumber);
}
for (int i = 0; i < btsms.length; i++) {
for (int j = 0; j < btsms.length; j++) {
if (j == i) {
continue;
}
int first = btsms[i].getSerialNoForTesting();
int second = btsms[j].getSerialNoForTesting();
assertFalse("Overlap found for set serial number (" + serialNumber + ") is " + i + ": "
+ first + " == " + j + ": " + second, first == second);
}
}
}
示例7: transferBlock
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets) throws IOException {
checkAccess(socketOut, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
try {
datanode.transferReplicaForPipelineRecovery(blk, targets, clientName);
writeResponse(Status.SUCCESS, null, out);
} finally {
IOUtils.closeStream(out);
}
}
示例8: getBlockLocalPathInfo
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
Token<BlockTokenIdentifier> token) throws IOException {
checkBlockLocalPathAccess();
checkBlockToken(block, token, BlockTokenSecretManager.AccessMode.READ);
BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
if (LOG.isDebugEnabled()) {
if (info != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("getBlockLocalPathInfo successful block=" + block
+ " blockfile " + info.getBlockPath() + " metafile "
+ info.getMetaPath());
}
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("getBlockLocalPathInfo for block=" + block
+ " returning null");
}
}
}
metrics.incrBlocksGetLocalPathInfo();
return info;
}
示例9: getHdfsBlocksMetadata
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
List<Token<BlockTokenIdentifier>> tokens) throws IOException,
UnsupportedOperationException {
if (!getHdfsBlockLocationsEnabled) {
throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
+ " is not enabled in datanode config");
}
if (blocks.size() != tokens.size()) {
throw new IOException("Differing number of blocks and tokens");
}
// Check access for each block
for (int i = 0; i < blocks.size(); i++) {
checkBlockToken(blocks.get(i), tokens.get(i),
BlockTokenSecretManager.AccessMode.READ);
}
return data.getHdfsBlocksMetadata(blocks);
}
示例10: checkWriteAccess
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
if (isBlockTokenEnabled) {
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
if (tokenIds.size() != 1) {
throw new IOException("Can't continue since none or more than one "
+ "BlockTokenIdentifier is found.");
}
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
if (LOG.isDebugEnabled()) {
LOG.debug("Got: " + id.toString());
}
blockPoolTokenSecretManager.checkAccess(id, null, block,
BlockTokenSecretManager.AccessMode.READ);
}
}
}
示例11: testWrite
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
String description, Boolean eofExcepted) throws IOException {
sendBuf.reset();
recvBuf.reset();
sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
new DatanodeInfo[1], null, stage,
0, block.getNumBytes(), block.getNumBytes(), newGS,
DEFAULT_CHECKSUM);
if (eofExcepted) {
sendResponse(Status.ERROR, null, null, recvOut);
sendRecvData(description, true);
} else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
sendRecvData(description, false);
} else {
writeZeroLengthPacket(block, description);
}
}
示例12: replaceBlock
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
source.getStorageID(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto =
BlockOpResponseProto.parseDelimitedFrom(reply);
return proto.getStatus() == Status.SUCCESS;
}
示例13: transferBlock
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes) throws IOException {
checkAccess(socketOut, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
try {
datanode.transferReplicaForPipelineRecovery(blk, targets,
targetStorageTypes, clientName);
writeResponse(Status.SUCCESS, null, out);
} finally {
IOUtils.closeStream(out);
}
}
示例14: replaceBlock
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(
destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, StorageType.DEFAULT,
BlockTokenSecretManager.DUMMY_TOKEN,
source.getDatanodeUuid(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
while (proto.getStatus() == Status.IN_PROGRESS) {
proto = BlockOpResponseProto.parseDelimitedFrom(reply);
}
return proto.getStatus() == Status.SUCCESS;
}
示例15: transferBlock
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken, final String clientName,
final DatanodeInfo[] targets) throws IOException {
checkAccess(null, true, blk, blockToken, Op.TRANSFER_BLOCK,
BlockTokenSecretManager.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(getOutputStream());
try {
datanode.transferReplicaForPipelineRecovery(blk, targets, clientName);
writeResponse(Status.SUCCESS, null, out);
} finally {
IOUtils.closeStream(out);
}
}