当前位置: 首页>>代码示例>>Java>>正文


Java BlocksMapUpdateInfo.addDeleteBlock方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo.addDeleteBlock方法的典型用法代码示例。如果您正苦于以下问题:Java BlocksMapUpdateInfo.addDeleteBlock方法的具体用法?Java BlocksMapUpdateInfo.addDeleteBlock怎么用?Java BlocksMapUpdateInfo.addDeleteBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo的用法示例。


在下文中一共展示了BlocksMapUpdateInfo.addDeleteBlock方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: cleanZeroSizeBlock

import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
/**
 * When deleting a file in the current fs directory, and the file is contained
 * in a snapshot, we should delete the last block if it's under construction
 * and its size is 0.
 */
void cleanZeroSizeBlock(final INodeFile f,
    final BlocksMapUpdateInfo collectedBlocks) {
  final BlockInfoContiguous[] blocks = f.getBlocks();
  if (blocks != null && blocks.length > 0
      && blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) {
    BlockInfoContiguousUnderConstruction lastUC =
        (BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1];
    if (lastUC.getNumBytes() == 0) {
      // this is a 0-sized block. do not need check its UC state here
      collectedBlocks.addDeleteBlock(lastUC);
      f.removeLastBlock(lastUC);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:FileUnderConstructionFeature.java

示例2: destroyAndCollectSnapshotBlocks

import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
public void destroyAndCollectSnapshotBlocks(
    BlocksMapUpdateInfo collectedBlocks) {
  if(blocks == null || collectedBlocks == null)
    return;
  for(BlockInfoContiguous blk : blocks)
    collectedBlocks.addDeleteBlock(blk);
  blocks = null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:FileDiff.java

示例3: cleanZeroSizeBlock

import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
/**
 * When deleting a file in the current fs directory, and the file is contained
 * in a snapshot, we should delete the last block if it's under construction
 * and its size is 0.
 */
void cleanZeroSizeBlock(final INodeFile f,
    final BlocksMapUpdateInfo collectedBlocks) {
  final BlockInfo[] blocks = f.getBlocks();
  if (blocks != null && blocks.length > 0
      && !blocks[blocks.length - 1].isComplete()) {
    BlockInfo lastUC = blocks[blocks.length - 1];
    if (lastUC.getNumBytes() == 0) {
      // this is a 0-sized block. do not need check its UC state here
      collectedBlocks.addDeleteBlock(lastUC);
      f.removeLastBlock(lastUC);
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:FileUnderConstructionFeature.java

示例4: destroyAndCollectSnapshotBlocks

import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
public void destroyAndCollectSnapshotBlocks(
    BlocksMapUpdateInfo collectedBlocks) {
  if (blocks == null || collectedBlocks == null) {
    return;
  }
  for (BlockInfo blk : blocks) {
    collectedBlocks.addDeleteBlock(blk);
  }
  blocks = null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:11,代码来源:FileDiff.java

示例5: cleanZeroSizeBlock

import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
/**
 * When deleting a file in the current fs directory, and the file is contained
 * in a snapshot, we should delete the last block if it's under construction
 * and its size is 0.
 */
void cleanZeroSizeBlock(final INodeFile f,
    final BlocksMapUpdateInfo collectedBlocks) {
  final BlockInfo[] blocks = f.getBlocks();
  if (blocks != null && blocks.length > 0
      && blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) {
    BlockInfoUnderConstruction lastUC =
        (BlockInfoUnderConstruction) blocks[blocks.length - 1];
    if (lastUC.getNumBytes() == 0) {
      // this is a 0-sized block. do not need check its UC state here
      collectedBlocks.addDeleteBlock(lastUC);
      f.removeLastBlock(lastUC);
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:FileUnderConstructionFeature.java

示例6: collectBlocksBeyondMax

import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
private void collectBlocksBeyondMax(final INodeFile file, final long max,
    final BlocksMapUpdateInfo collectedBlocks) {
  final BlockInfo[] oldBlocks = file.getBlocks();
  if (oldBlocks != null) {
    //find the minimum n such that the size of the first n blocks > max
    int n = 0;
    for(long size = 0; n < oldBlocks.length && max > size; n++) {
      size += oldBlocks[n].getNumBytes();
    }
    
    // starting from block n, the data is beyond max.
    if (n < oldBlocks.length) {
      // resize the array.  
      final BlockInfo[] newBlocks;
      if (n == 0) {
        newBlocks = BlockInfo.EMPTY_ARRAY;
      } else {
        newBlocks = new BlockInfo[n];
        System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
      }
      
      // set new blocks
      file.setBlocks(newBlocks);

      // collect the blocks beyond max.  
      if (collectedBlocks != null) {
        for(; n < oldBlocks.length; n++) {
          collectedBlocks.addDeleteBlock(oldBlocks[n]);
        }
      }
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:34,代码来源:FileWithSnapshotFeature.java

示例7: collectBlocksBeyondMax

import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
private static void collectBlocksBeyondMax(final FileWithSnapshot file,
    final long max, final BlocksMapUpdateInfo collectedBlocks) {
  final BlockInfo[] oldBlocks = file.asINodeFile().getBlocks();
  if (oldBlocks != null) {
    //find the minimum n such that the size of the first n blocks > max
    int n = 0;
    for(long size = 0; n < oldBlocks.length && max > size; n++) {
      size += oldBlocks[n].getNumBytes();
    }
    
    // starting from block n, the data is beyond max.
    if (n < oldBlocks.length) {
      // resize the array.  
      final BlockInfo[] newBlocks;
      if (n == 0) {
        newBlocks = null;
      } else {
        newBlocks = new BlockInfo[n];
        System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
      }
      
      // set new blocks
      file.asINodeFile().setBlocks(newBlocks);

      // collect the blocks beyond max.  
      if (collectedBlocks != null) {
        for(; n < oldBlocks.length; n++) {
          collectedBlocks.addDeleteBlock(oldBlocks[n]);
        }
      }
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:34,代码来源:FileWithSnapshot.java

示例8: collectBlocksBeyondMax

import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; //导入方法依赖的package包/类
private void collectBlocksBeyondMax(final INodeFile file, final long max,
    final BlocksMapUpdateInfo collectedBlocks) {
  final BlockInfo[] oldBlocks = file.getBlocks();
  if (oldBlocks != null) {
    //find the minimum n such that the size of the first n blocks > max
    int n = 0;
    for(long size = 0; n < oldBlocks.length && max > size; n++) {
      size += oldBlocks[n].getNumBytes();
    }
    
    // starting from block n, the data is beyond max.
    if (n < oldBlocks.length) {
      // resize the array.  
      final BlockInfo[] newBlocks;
      if (n == 0) {
        newBlocks = null;
      } else {
        newBlocks = new BlockInfo[n];
        System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
      }
      
      // set new blocks
      file.setBlocks(newBlocks);

      // collect the blocks beyond max.  
      if (collectedBlocks != null) {
        for(; n < oldBlocks.length; n++) {
          collectedBlocks.addDeleteBlock(oldBlocks[n]);
        }
      }
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:34,代码来源:FileWithSnapshotFeature.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo.addDeleteBlock方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。