本文整理汇总了Java中org.apache.hadoop.security.proto.SecurityProtos.TokenProto类的典型用法代码示例。如果您正苦于以下问题:Java TokenProto类的具体用法?Java TokenProto怎么用?Java TokenProto使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TokenProto类属于org.apache.hadoop.security.proto.SecurityProtos包,在下文中一共展示了TokenProto类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: recoverJobShuffleInfo
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
private void recoverJobShuffleInfo(String jobIdStr, byte[] data)
throws IOException {
JobID jobId;
try {
jobId = JobID.forName(jobIdStr);
} catch (IllegalArgumentException e) {
throw new IOException("Bad job ID " + jobIdStr + " in state store", e);
}
JobShuffleInfoProto proto = JobShuffleInfoProto.parseFrom(data);
String user = proto.getUser();
TokenProto tokenProto = proto.getJobToken();
Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(
tokenProto.getIdentifier().toByteArray(),
tokenProto.getPassword().toByteArray(),
new Text(tokenProto.getKind()), new Text(tokenProto.getService()));
addJobToken(jobId, user, jobToken);
}
示例2: recordJobShuffleInfo
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
private void recordJobShuffleInfo(JobID jobId, String user,
Token<JobTokenIdentifier> jobToken) throws IOException {
if (stateDb != null) {
TokenProto tokenProto = TokenProto.newBuilder()
.setIdentifier(ByteString.copyFrom(jobToken.getIdentifier()))
.setPassword(ByteString.copyFrom(jobToken.getPassword()))
.setKind(jobToken.getKind().toString())
.setService(jobToken.getService().toString())
.build();
JobShuffleInfoProto proto = JobShuffleInfoProto.newBuilder()
.setUser(user).setJobToken(tokenProto).build();
try {
stateDb.put(bytes(jobId.toString()), proto.toByteArray());
} catch (DBException e) {
throw new IOException("Error storing " + jobId, e);
}
}
addJobToken(jobId, user, jobToken);
}
示例3: recordJobShuffleInfo
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
private void recordJobShuffleInfo(JobID jobId, String user,
Token<JobTokenIdentifier> jobToken, String userFolder) throws IOException {
if (stateDb != null) {
TokenProto tokenProto = TokenProto.newBuilder()
.setIdentifier(ByteString.copyFrom(jobToken.getIdentifier()))
.setPassword(ByteString.copyFrom(jobToken.getPassword()))
.setKind(jobToken.getKind().toString())
.setService(jobToken.getService().toString())
.build();
JobShuffleInfoProto proto = JobShuffleInfoProto.newBuilder()
.setUser(user).setJobToken(tokenProto).setUserFolder(userFolder).build();
try {
stateDb.put(bytes(jobId.toString()), proto.toByteArray());
} catch (DBException e) {
throw new IOException("Error storing " + jobId, e);
}
}
addJobToken(jobId, user, jobToken, userFolder);
}
示例4: convert
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
public static TokenProto convert(Token<?> tok) {
return TokenProto.newBuilder().
setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
setPassword(ByteString.copyFrom(tok.getPassword())).
setKind(tok.getKind().toString()).
setService(tok.getService().toString()).build();
}
示例5: getHdfsBlocksMetadata
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId,
long[] blockIds,
List<Token<BlockTokenIdentifier>> tokens) throws IOException {
List<TokenProto> tokensProtos =
new ArrayList<TokenProto>(tokens.size());
for (Token<BlockTokenIdentifier> t : tokens) {
tokensProtos.add(PBHelper.convert(t));
}
// Build the request
GetHdfsBlockLocationsRequestProto request =
GetHdfsBlockLocationsRequestProto.newBuilder()
.setBlockPoolId(blockPoolId)
.addAllBlockIds(Longs.asList(blockIds))
.addAllTokens(tokensProtos)
.build();
// Send the RPC
GetHdfsBlockLocationsResponseProto response;
try {
response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
// List of volumes in the response
List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
for (ByteString bs : volumeIdsByteStrings) {
volumeIds.add(bs.toByteArray());
}
// Array of indexes into the list of volumes, one per block
List<Integer> volumeIndexes = response.getVolumeIndexesList();
// Parsed HdfsVolumeId values, one per block
return new HdfsBlocksMetadata(blockPoolId, blockIds,
volumeIds, volumeIndexes);
}
示例6: testConvertBlockToken
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
@Test
public void testConvertBlockToken() {
Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service"));
TokenProto tokenProto = PBHelper.convert(token);
Token<BlockTokenIdentifier> token2 = PBHelper.convert(tokenProto);
compare(token, token2);
}
示例7: getContainersToIncrease
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
@Override
public List<Token> getContainersToIncrease() {
if (containersToIncrease != null) {
return containersToIncrease;
}
IncreaseContainersResourceRequestProtoOrBuilder p =
viaProto ? proto : builder;
List<TokenProto> list = p.getIncreaseContainersList();
containersToIncrease = new ArrayList<>();
for (TokenProto c : list) {
containersToIncrease.add(convertFromProtoFormat(c));
}
return containersToIncrease;
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:IncreaseContainersResourceRequestPBImpl.java
示例8: addIncreaseContainersToProto
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
private void addIncreaseContainersToProto() {
maybeInitBuilder();
builder.clearIncreaseContainers();
if (this.containersToIncrease == null) {
return;
}
Iterable<TokenProto> iterable = new Iterable<TokenProto>() {
@Override
public Iterator<TokenProto> iterator() {
return new Iterator<TokenProto>() {
Iterator<Token> iter = containersToIncrease.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public TokenProto next() {
return convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllIncreaseContainers(iterable);
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:32,代码来源:IncreaseContainersResourceRequestPBImpl.java
示例9: convert
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
public static TokenProto convert(Token<?> tok) {
return TokenProto.newBuilder().
setIdentifier(getByteString(tok.getIdentifier())).
setPassword(getByteString(tok.getPassword())).
setKind(tok.getKind().toString()).
setService(tok.getService().toString()).build();
}
示例10: testConvertBlockToken
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
@Test
public void testConvertBlockToken() {
Token<BlockTokenIdentifier> token = new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service"));
TokenProto tokenProto = PBHelperClient.convert(token);
Token<BlockTokenIdentifier> token2 = PBHelperClient.convert(tokenProto);
compare(token, token2);
}
示例11: convertDelegationToken
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
public static Token<DelegationTokenIdentifier> convertDelegationToken(
TokenProto blockToken) {
return new Token<>(
blockToken.getIdentifier().toByteArray(),
blockToken.getPassword().toByteArray(), new Text(blockToken.getKind()),
new Text(blockToken.getService()));
}
示例12: getHdfsBlocksMetadata
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
List<Token<BlockTokenIdentifier>> tokens) throws IOException {
// Convert to proto objects
List<ExtendedBlockProto> blocksProtos =
new ArrayList<>(blocks.size());
List<TokenProto> tokensProtos = new ArrayList<>(tokens.size());
for (ExtendedBlock b : blocks) {
blocksProtos.add(PBHelper.convert(b));
}
for (Token<BlockTokenIdentifier> t : tokens) {
tokensProtos.add(PBHelper.convert(t));
}
// Build the request
GetHdfsBlockLocationsRequestProto request =
GetHdfsBlockLocationsRequestProto.newBuilder()
.addAllBlocks(blocksProtos).addAllTokens(tokensProtos).build();
// Send the RPC
GetHdfsBlockLocationsResponseProto response;
try {
response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
// List of volumes in the response
List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
List<byte[]> volumeIds = new ArrayList<>(volumeIdsByteStrings.size());
for (ByteString bs : volumeIdsByteStrings) {
volumeIds.add(bs.toByteArray());
}
// Array of indexes into the list of volumes, one per block
List<Integer> volumeIndexes = response.getVolumeIndexesList();
// Parsed HdfsVolumeId values, one per block
return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[]{}),
volumeIds, volumeIndexes);
}
示例13: testConvertBlockToken
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
@Test
public void testConvertBlockToken() {
Token<BlockTokenIdentifier> token =
new Token<>("identifier".getBytes(),
"password".getBytes(), new Text("kind"), new Text("service"));
TokenProto tokenProto = PBHelper.convert(token);
Token<BlockTokenIdentifier> token2 = PBHelper.convert(tokenProto);
compare(token, token2);
}
示例14: getHdfsBlocksMetadata
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
List<Token<BlockTokenIdentifier>> tokens) throws IOException {
// Convert to proto objects
List<ExtendedBlockProto> blocksProtos =
new ArrayList<ExtendedBlockProto>(blocks.size());
List<TokenProto> tokensProtos =
new ArrayList<TokenProto>(tokens.size());
for (ExtendedBlock b : blocks) {
blocksProtos.add(PBHelper.convert(b));
}
for (Token<BlockTokenIdentifier> t : tokens) {
tokensProtos.add(PBHelper.convert(t));
}
// Build the request
GetHdfsBlockLocationsRequestProto request =
GetHdfsBlockLocationsRequestProto.newBuilder()
.addAllBlocks(blocksProtos)
.addAllTokens(tokensProtos)
.build();
// Send the RPC
GetHdfsBlockLocationsResponseProto response;
try {
response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
// List of volumes in the response
List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
for (ByteString bs : volumeIdsByteStrings) {
volumeIds.add(bs.toByteArray());
}
// Array of indexes into the list of volumes, one per block
List<Integer> volumeIndexes = response.getVolumeIndexesList();
// Parsed HdfsVolumeId values, one per block
return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}),
volumeIds, volumeIndexes);
}
示例15: convertFromProtoFormat
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; //导入依赖的package包/类
private TokenPBImpl convertFromProtoFormat(TokenProto p) {
return new TokenPBImpl(p);
}