本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.BlockStoragePolicy类的典型用法代码示例。如果您正苦于以下问题:Java BlockStoragePolicy类的具体用法?Java BlockStoragePolicy怎么用?Java BlockStoragePolicy使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BlockStoragePolicy类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了BlockStoragePolicy类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyQuotaForTruncate
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
long newLength, QuotaCounts delta) throws QuotaExceededException {
if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
// Do not check quota if edit log is still being processed
return;
}
final long diff = file.computeQuotaDeltaForTruncate(newLength);
final short repl = file.getBlockReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = getBlockStoragePolicySuite()
.getPolicy(file.getStoragePolicyID());
List<StorageType> types = policy.chooseStorageTypes(repl);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, diff);
}
}
if (diff > 0) {
readLock();
try {
verifyQuota(iip, iip.length() - 1, delta, null);
} finally {
readUnlock();
}
}
}
示例2: computeQuotaDeltaForUCBlock
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
/** Compute quota change for converting a complete block to a UC block */
private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
final QuotaCounts delta = new QuotaCounts.Builder().build();
final BlockInfoContiguous lastBlock = file.getLastBlock();
if (lastBlock != null) {
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
final short repl = file.getBlockReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
.getPolicy(file.getStoragePolicyID());
List<StorageType> types = policy.chooseStorageTypes(repl);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, diff);
}
}
}
return delta;
}
示例3: chooseTarget4NewBlock
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
/**
* Choose target datanodes for creating a new block.
*
* @throws IOException
* if the number of targets < minimum replication.
* @see BlockPlacementPolicy#chooseTarget(String, int, Node,
* Set, long, List, BlockStoragePolicy)
*/
public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src,
final int numOfReplicas, final Node client,
final Set<Node> excludedNodes,
final long blocksize,
final List<String> favoredNodes,
final byte storagePolicyID) throws IOException {
List<DatanodeDescriptor> favoredDatanodeDescriptors =
getDatanodeDescriptors(favoredNodes);
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
numOfReplicas, client, excludedNodes, blocksize,
favoredDatanodeDescriptors, storagePolicy);
if (targets.length < minReplication) {
throw new IOException("File " + src + " could only be replicated to "
+ targets.length + " nodes instead of minReplication (="
+ minReplication + "). There are "
+ getDatanodeManager().getNetworkTopology().getNumOfLeaves()
+ " datanode(s) running and "
+ (excludedNodes == null? "no": excludedNodes.size())
+ " node(s) are excluded in this operation.");
}
return targets;
}
示例4: convert
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
public static BlockStoragePolicyProto convert(BlockStoragePolicy policy) {
BlockStoragePolicyProto.Builder builder = BlockStoragePolicyProto
.newBuilder().setPolicyId(policy.getId()).setName(policy.getName());
// creation storage types
StorageTypesProto creationProto = convert(policy.getStorageTypes());
Preconditions.checkArgument(creationProto != null);
builder.setCreationPolicy(creationProto);
// creation fallback
StorageTypesProto creationFallbackProto = convert(
policy.getCreationFallbacks());
if (creationFallbackProto != null) {
builder.setCreationFallbackPolicy(creationFallbackProto);
}
// replication fallback
StorageTypesProto replicationFallbackProto = convert(
policy.getReplicationFallbacks());
if (replicationFallbackProto != null) {
builder.setReplicationFallbackPolicy(replicationFallbackProto);
}
return builder.build();
}
示例5: getStoragePolicies
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
@Override
public GetStoragePoliciesResponseProto getStoragePolicies(
RpcController controller, GetStoragePoliciesRequestProto request)
throws ServiceException {
try {
BlockStoragePolicy[] policies = server.getStoragePolicies();
GetStoragePoliciesResponseProto.Builder builder =
GetStoragePoliciesResponseProto.newBuilder();
if (policies == null) {
return builder.build();
}
for (BlockStoragePolicy policy : policies) {
builder.addPolicies(PBHelper.convert(policy));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
示例6: run
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
System.out.println("Block Storage Policies:");
for (BlockStoragePolicy policy : policies) {
if (policy != null) {
System.out.println("\t" + policy);
}
}
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
示例7: verifyFile
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
private void verifyFile(final Path parent, final HdfsFileStatus status,
final Byte expectedPolicyId) throws Exception {
HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
byte policyId = fileStatus.getStoragePolicy();
BlockStoragePolicy policy = policies.getPolicy(policyId);
if (expectedPolicyId != null) {
Assert.assertEquals((byte)expectedPolicyId, policy.getId());
}
final List<StorageType> types = policy.chooseStorageTypes(
status.getReplication());
for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
lb.getStorageTypes());
Assert.assertTrue(fileStatus.getFullName(parent.toString())
+ " with policy " + policy + " has non-empty overlap: " + diff
+ ", the corresponding block is " + lb.getBlock().getLocalBlock(),
diff.removeOverlap(true));
}
}
示例8: chooseTarget
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
int numOfReplicas,
Node writer,
List<DatanodeStorageInfo> chosenNodes,
boolean returnChosenNodes,
Set<Node> excludedNodes,
long blocksize,
final BlockStoragePolicy storagePolicy) {
DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
blocksize, storagePolicy);
try {
Thread.sleep(3000);
} catch (InterruptedException e) {}
return results;
}
示例9: testMultipleHots
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
@Test
public void testMultipleHots() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
sts.add(new StorageType[]{StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(4,actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("HOT|DISK:1(HOT)", 1l);
expectedOutput.put("HOT|DISK:2(HOT)", 1l);
expectedOutput.put("HOT|DISK:3(HOT)", 1l);
expectedOutput.put("HOT|DISK:4(HOT)", 1l);
Assert.assertEquals(expectedOutput,actualOutput);
}
示例10: testMultipleHotsWithDifferentCounts
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
@Test
public void testMultipleHotsWithDifferentCounts() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
sts.add(new StorageType[]{StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(4,actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("HOT|DISK:1(HOT)", 1l);
expectedOutput.put("HOT|DISK:2(HOT)", 2l);
expectedOutput.put("HOT|DISK:3(HOT)", 2l);
expectedOutput.put("HOT|DISK:4(HOT)", 1l);
Assert.assertEquals(expectedOutput,actualOutput);
}
示例11: computeQuotaDeltaForUCBlock
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
/** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn,
INodeFile file) {
final QuotaCounts delta = new QuotaCounts.Builder().build();
final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) {
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
final short repl = lastBlock.getReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = fsn.getFSDirectory()
.getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
List<StorageType> types = policy.chooseStorageTypes(repl);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, diff);
}
}
}
return delta;
}
示例12: computeContentSummary
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
@Override
public final ContentSummaryComputationContext computeContentSummary(
int snapshotId, final ContentSummaryComputationContext summary) {
final ContentCounts counts = summary.getCounts();
counts.addContent(Content.FILE, 1);
final long fileLen = computeFileSize(snapshotId);
counts.addContent(Content.LENGTH, fileLen);
counts.addContent(Content.DISKSPACE, storagespaceConsumed(null)
.getStorageSpace());
if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
getPolicy(getStoragePolicyID());
List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
for (StorageType t : storageTypes) {
if (!t.supportTypeQuota()) {
continue;
}
counts.addTypeSpace(t, fileLen);
}
}
return summary;
}
示例13: getStoragePolicy
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm,
String path) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory
.getPathComponentsForReservedPath(path);
fsd.readLock();
try {
path = fsd.resolvePath(pc, path, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(path, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
}
INode inode = iip.getLastINode();
if (inode == null) {
throw new FileNotFoundException("File/Directory does not exist: "
+ iip.getPath());
}
return bm.getStoragePolicy(inode.getStoragePolicyID());
} finally {
fsd.readUnlock();
}
}
示例14: cleanFile
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
public void cleanFile(INode.ReclaimContext reclaimContext,
final INodeFile file, final int snapshotId, int priorSnapshotId,
byte storagePolicyId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
.getPolicy(storagePolicyId);
QuotaCounts old = file.storagespaceConsumed(policy);
collectBlocksAndClear(reclaimContext, file);
QuotaCounts current = file.storagespaceConsumed(policy);
reclaimContext.quotaDelta().add(old.subtract(current));
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
file);
}
}
示例15: getStoragePolicies
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; //导入依赖的package包/类
@Override
public GetStoragePoliciesResponseProto getStoragePolicies(
RpcController controller, GetStoragePoliciesRequestProto request)
throws ServiceException {
try {
BlockStoragePolicy[] policies = server.getStoragePolicies();
GetStoragePoliciesResponseProto.Builder builder =
GetStoragePoliciesResponseProto.newBuilder();
if (policies == null) {
return builder.build();
}
for (BlockStoragePolicy policy : policies) {
builder.addPolicies(PBHelperClient.convert(policy));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:ClientNamenodeProtocolServerSideTranslatorPB.java