当前位置: 首页>>代码示例>>Java>>正文


Java WithCount类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount的典型用法代码示例。如果您正苦于以下问题:Java WithCount类的具体用法?Java WithCount怎么用?Java WithCount使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


WithCount类属于org.apache.hadoop.hdfs.server.namenode.INodeReference包,在下文中一共展示了WithCount类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadINodeReference

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
private INodeReference loadINodeReference(
    INodeReferenceSection.INodeReference r) throws IOException {
  long referredId = r.getReferredId();
  INode referred = fsDir.getInode(referredId);
  WithCount withCount = (WithCount) referred.getParentReference();
  if (withCount == null) {
    withCount = new INodeReference.WithCount(null, referred);
  }
  final INodeReference ref;
  if (r.hasDstSnapshotId()) { // DstReference
    ref = new INodeReference.DstReference(null, withCount,
        r.getDstSnapshotId());
  } else {
    ref = new INodeReference.WithName(null, withCount, r.getName()
        .toByteArray(), r.getLastSnapshotId());
  }
  return ref;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:FSImageFormatPBSnapshot.java

示例2: replaceChild4ReferenceWithName

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    int latestSnapshotId) {
  Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
  replaceChild(oldChild, ref, null);
  return ref;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:INodeDirectory.java

示例3: replaceChild4ReferenceWithName

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    Snapshot latest) {
  Preconditions.checkArgument(latest != null);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latest.getId());
  replaceChild(oldChild, ref, null);
  return ref;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:INodeDirectory.java

示例4: findRenameTargetPath

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
/**
 * We just found a deleted WithName node as the source of a rename operation.
 * However, we should include it in our snapshot diff report as rename only
 * if the rename target is also under the same snapshottable directory.
 */
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
    INodeReference.WithName wn, final int snapshotId) {
  INode inode = wn.getReferredINode();
  final LinkedList<byte[]> ancestors = Lists.newLinkedList();
  while (inode != null) {
    if (inode == snapshotRoot) {
      return ancestors.toArray(new byte[ancestors.size()][]);
    }
    if (inode instanceof INodeReference.WithCount) {
      inode = ((WithCount) inode).getParentRef(snapshotId);
    } else {
      INode parent = inode.getParentReference() != null ? inode
          .getParentReference() : inode.getParent();
      if (parent != null && parent instanceof INodeDirectory) {
        int sid = parent.asDirectory().searchChild(inode);
        if (sid < snapshotId) {
          return null;
        }
      }
      if (!(parent instanceof WithCount)) {
        ancestors.addFirst(inode.getLocalNameBytes());
      }
      inode = parent;
    }
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DirectorySnapshottableFeature.java

示例5: replaceChild

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
/** 
 * Replace the given child with a new child. Note that we no longer need to
 * replace an normal INodeDirectory or INodeFile into an
 * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
 * for child replacement is for reference nodes.
 */
public void replaceChild(INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  Preconditions.checkNotNull(children);
  final int i = searchChildren(newChild.getLocalNameBytes());
  Preconditions.checkState(i >= 0);
  Preconditions.checkState(oldChild == children.get(i)
      || oldChild == children.get(i).asReference().getReferredINode()
          .asReference().getReferredINode());
  oldChild = children.get(i);
  
  if (oldChild.isReference() && newChild.isReference()) {
    // both are reference nodes, e.g., DstReference -> WithName
    final INodeReference.WithCount withCount = 
        (WithCount) oldChild.asReference().getReferredINode();
    withCount.removeReference(oldChild.asReference());
  }
  children.set(i, newChild);
  
  // replace the instance in the created list of the diff list
  DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
  }
  
  // update the inodeMap
  if (inodeMap != null) {
    inodeMap.put(newChild);
  }    
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:INodeDirectory.java

示例6: replaceChild

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
/** 
 * Replace the given child with a new child. Note that we no longer need to
 * replace an normal INodeDirectory or INodeFile into an
 * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
 * for child replacement is for {@link INodeDirectorySnapshottable} and 
 * reference nodes.
 */
public void replaceChild(INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  Preconditions.checkNotNull(children);
  final int i = searchChildren(newChild.getLocalNameBytes());
  Preconditions.checkState(i >= 0);
  Preconditions.checkState(oldChild == children.get(i)
      || oldChild == children.get(i).asReference().getReferredINode()
          .asReference().getReferredINode());
  oldChild = children.get(i);
  
  if (oldChild.isReference() && newChild.isReference()) {
    // both are reference nodes, e.g., DstReference -> WithName
    final INodeReference.WithCount withCount = 
        (WithCount) oldChild.asReference().getReferredINode();
    withCount.removeReference(oldChild.asReference());
  }
  children.set(i, newChild);
  
  // replace the instance in the created list of the diff list
  DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
  }
  
  // update the inodeMap
  if (inodeMap != null) {
    inodeMap.put(newChild);
  }    
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:37,代码来源:INodeDirectory.java

示例7: testRenameFromSDir2NonSDir

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
@Test (timeout=300000)
public void testRenameFromSDir2NonSDir() throws Exception {
  final String dirStr = "/testRenameWithSnapshot";
  final String abcStr = dirStr + "/abc";
  final Path abc = new Path(abcStr);
  hdfs.mkdirs(abc, new FsPermission((short)0777));
  hdfs.allowSnapshot(abc);

  final Path foo = new Path(abc, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  hdfs.createSnapshot(abc, "s0");
  
  try {
    hdfs.rename(abc, new Path(dirStr, "tmp"));
    fail("Expect exception since " + abc
        + " is snapshottable and already has snapshots");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(abcStr
        + " is snapshottable and already has snapshots", e);
  }

  final String xyzStr = dirStr + "/xyz";
  final Path xyz = new Path(xyzStr);
  hdfs.mkdirs(xyz, new FsPermission((short)0777));
  final Path bar = new Path(xyz, "bar");
  hdfs.rename(foo, bar);
  
  final INode fooRef = fsdir.getINode(
      SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
  Assert.assertTrue(fooRef.isReference());
  Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);

  final INodeReference.WithCount withCount
      = (INodeReference.WithCount)fooRef.asReference().getReferredINode();
  Assert.assertEquals(2, withCount.getReferenceCount());

  final INode barRef = fsdir.getINode(bar.toString());
  Assert.assertTrue(barRef.isReference());

  Assert.assertSame(withCount, barRef.asReference().getReferredINode());
  
  hdfs.delete(bar, false);
  Assert.assertEquals(1, withCount.getReferenceCount());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestRenameWithSnapshots.java

示例8: testRenameUndo_4

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
/**
 * Test undo where dst node being overwritten is a reference node
 */
@Test
public void testRenameUndo_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);
  
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  final Path foo2 = new Path(sdir2, "foo2");
  hdfs.mkdirs(foo2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  // rename foo2 to foo3, so that foo3 will be a reference node
  final Path foo3 = new Path(sdir3, "foo3");
  hdfs.rename(foo2, foo3);
  
  INode foo3Node = fsdir.getINode4Write(foo3.toString());
  assertTrue(foo3Node.isReference());
  
  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  // fail the rename but succeed in undo
  doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
      anyBoolean(), Mockito.anyInt());
  Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), 
      Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  foo3Node.setParent(mockDir3);
  
  try {
    hdfs.rename(foo, foo3, Rename.OVERWRITE);
    fail("the rename from " + foo + " to " + foo3 + " should fail");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
        + foo3 + " failed.", e);
  }
  
  // make sure the undo is correct
  final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
  assertSame(foo3Node, foo3Node_undo);
  INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
      .getReferredINode();
  assertEquals(2, foo3_wc.getReferenceCount());
  assertSame(foo3Node, foo3_wc.getParentReference());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestRenameWithSnapshots.java

示例9: testRenameDirAndDeleteSnapshot_3

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);
  
  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
  
  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");
  
  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");
  
  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());
  
  restartClusterAndCheckImage(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:TestRenameWithSnapshots.java

示例10: testRenameDirAndDeleteSnapshot_4

import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; //导入依赖的package包/类
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);
  
  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
  
  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");
  
  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");
  
  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(7, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());
  
  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());
  
  restartClusterAndCheckImage(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:80,代码来源:TestRenameWithSnapshots.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。