Java 类org.apache.hadoop.hdfs.server.namenode.INodeMap 实例源码

项目:hadoop    文件:FSImageFormatPBSnapshot.java   
/**
 * save all the snapshot diff to fsimage
 */
public void serializeSnapshotDiffSection(OutputStream out)
    throws IOException {
  INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
  final List<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
  while (iter.hasNext()) {
    INodeWithAdditionalFields inode = iter.next();
    if (inode.isFile()) {
      serializeFileDiffList(inode.asFile(), out);
    } else if (inode.isDirectory()) {
      serializeDirDiffList(inode.asDirectory(), refList, out);
    }
    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(headers,
      FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatPBSnapshot.java   
/**
 * save all the snapshot diff to fsimage
 */
public void serializeSnapshotDiffSection(OutputStream out)
    throws IOException {
  INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
  final List<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
  while (iter.hasNext()) {
    INodeWithAdditionalFields inode = iter.next();
    if (inode.isFile()) {
      serializeFileDiffList(inode.asFile(), out);
    } else if (inode.isDirectory()) {
      serializeDirDiffList(inode.asDirectory(), refList, out);
    }
    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(headers,
      FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
项目:big-c    文件:FSImageFormatPBSnapshot.java   
/**
 * save all the snapshot diff to fsimage
 */
public void serializeSnapshotDiffSection(OutputStream out)
    throws IOException {
  INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
  final List<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
  while (iter.hasNext()) {
    INodeWithAdditionalFields inode = iter.next();
    if (inode.isFile()) {
      serializeFileDiffList(inode.asFile(), out);
    } else if (inode.isDirectory()) {
      serializeDirDiffList(inode.asDirectory(), refList, out);
    }
    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(headers,
      FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormatPBSnapshot.java   
/**
 * save all the snapshot diff to fsimage
 */
public void serializeSnapshotDiffSection(OutputStream out)
    throws IOException {
  INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
  final List<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
  while (iter.hasNext()) {
    INodeWithAdditionalFields inode = iter.next();
    if (inode.isFile()) {
      serializeFileDiffList(inode.asFile(), out);
    } else if (inode.isDirectory()) {
      serializeDirDiffList(inode.asDirectory(), refList, out);
    }
    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(headers,
      FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
项目:hadoop-plus    文件:INodeDirectoryWithSnapshot.java   
@Override
public INode saveChild2Snapshot(final INode child, final Snapshot latest,
    final INode snapshotCopy, final INodeMap inodeMap)
    throws QuotaExceededException {
  Preconditions.checkArgument(!child.isDirectory(),
      "child is a directory, child=%s", child);
  if (latest == null) {
    return child;
  }

  final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, this);
  if (diff.getChild(child.getLocalNameBytes(), false, this) != null) {
    // it was already saved in the latest snapshot earlier.  
    return child;
  }

  diff.diff.modify(snapshotCopy, child);
  return child;
}
项目:FlexMap    文件:FSImageFormatPBSnapshot.java   
/**
 * save all the snapshot diff to fsimage
 */
public void serializeSnapshotDiffSection(OutputStream out)
    throws IOException {
  INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
  final List<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
  while (iter.hasNext()) {
    INodeWithAdditionalFields inode = iter.next();
    if (inode.isFile()) {
      serializeFileDiffList(inode.asFile(), out);
    } else if (inode.isDirectory()) {
      serializeDirDiffList(inode.asDirectory(), refList, out);
    }
    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(headers,
      FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
项目:hadoop-TCP    文件:INodeDirectoryWithSnapshot.java   
@Override
public INode saveChild2Snapshot(final INode child, final Snapshot latest,
    final INode snapshotCopy, final INodeMap inodeMap)
    throws QuotaExceededException {
  Preconditions.checkArgument(!child.isDirectory(),
      "child is a directory, child=%s", child);
  if (latest == null) {
    return child;
  }

  final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, this);
  if (diff.getChild(child.getLocalNameBytes(), false, this) != null) {
    // it was already saved in the latest snapshot earlier.  
    return child;
  }

  diff.diff.modify(snapshotCopy, child);
  return child;
}
项目:hardfs    文件:INodeDirectoryWithSnapshot.java   
@Override
public INode saveChild2Snapshot(final INode child, final Snapshot latest,
    final INode snapshotCopy, final INodeMap inodeMap)
    throws QuotaExceededException {
  Preconditions.checkArgument(!child.isDirectory(),
      "child is a directory, child=%s", child);
  if (latest == null) {
    return child;
  }

  final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(latest, this);
  if (diff.getChild(child.getLocalNameBytes(), false, this) != null) {
    // it was already saved in the latest snapshot earlier.  
    return child;
  }

  diff.diff.modify(snapshotCopy, child);
  return child;
}
项目:hadoop-on-lustre2    文件:FSImageFormatPBSnapshot.java   
/**
 * save all the snapshot diff to fsimage
 */
public void serializeSnapshotDiffSection(OutputStream out)
    throws IOException {
  INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
  final List<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
  while (iter.hasNext()) {
    INodeWithAdditionalFields inode = iter.next();
    if (inode.isFile()) {
      serializeFileDiffList(inode.asFile(), out);
    } else if (inode.isDirectory()) {
      serializeDirDiffList(inode.asDirectory(), refList, out);
    }
    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(headers,
      FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
项目:hadoop-plus    文件:INodeDirectorySnapshottable.java   
/**
 * Replace itself with {@link INodeDirectoryWithSnapshot} or
 * {@link INodeDirectory} depending on the latest snapshot.
 */
INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap)
    throws QuotaExceededException {
  if (latest == null) {
    Preconditions.checkState(getLastSnapshot() == null,
        "latest == null but getLastSnapshot() != null, this=%s", this);
    return replaceSelf4INodeDirectory(inodeMap);
  } else {
    return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
        .recordModification(latest, null);
  }
}
项目:hadoop-plus    文件:INodeFileWithSnapshot.java   
@Override
public INodeFileWithSnapshot recordModification(final Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    diffs.saveSelf2Snapshot(latest, this, null);
  }
  return this;
}
项目:hadoop-plus    文件:INodeDirectoryWithSnapshot.java   
@Override
public INodeDirectoryWithSnapshot recordModification(final Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    return saveSelf2Snapshot(latest, null);
  }
  return this;
}
项目:hadoop-plus    文件:INodeDirectoryWithSnapshot.java   
@Override
public boolean addChild(INode inode, boolean setModTime, Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  ChildrenDiff diff = null;
  Integer undoInfo = null;
  if (isInLatestSnapshot(latest)) {
    diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
    undoInfo = diff.create(inode);
  }
  final boolean added = super.addChild(inode, setModTime, null, inodeMap);
  if (!added && undoInfo != null) {
    diff.undoCreate(inode, undoInfo);
  }
  return added; 
}
项目:hadoop-plus    文件:INodeDirectoryWithSnapshot.java   
@Override
public boolean removeChild(INode child, Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  ChildrenDiff diff = null;
  UndoInfo<INode> undoInfo = null;
  // For a directory that is not a renamed node, if isInLatestSnapshot returns
  // false, the directory is not in the latest snapshot, thus we do not need
  // to record the removed child in any snapshot.
  // For a directory that was moved/renamed, note that if the directory is in
  // any of the previous snapshots, we will create a reference node for the 
  // directory while rename, and isInLatestSnapshot will return true in that
  // scenario (if all previous snapshots have been deleted, isInLatestSnapshot
  // still returns false). Thus if isInLatestSnapshot returns false, the 
  // directory node cannot be in any snapshot (not in current tree, nor in 
  // previous src tree). Thus we do not need to record the removed child in 
  // any snapshot.
  if (isInLatestSnapshot(latest)) {
    diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
    undoInfo = diff.delete(child);
  }
  final boolean removed = removeChild(child);
  if (undoInfo != null) {
    if (!removed) {
      //remove failed, undo
      diff.undoDelete(child, undoInfo);
    }
  }
  return removed;
}
项目:hadoop-plus    文件:INodeFileUnderConstructionWithSnapshot.java   
@Override
public INodeFileUnderConstructionWithSnapshot recordModification(
    final Snapshot latest, final INodeMap inodeMap)
    throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    diffs.saveSelf2Snapshot(latest, this, null);
  }
  return this;
}
项目:hadoop-TCP    文件:INodeDirectorySnapshottable.java   
/**
 * Replace itself with {@link INodeDirectoryWithSnapshot} or
 * {@link INodeDirectory} depending on the latest snapshot.
 */
INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap)
    throws QuotaExceededException {
  if (latest == null) {
    Preconditions.checkState(getLastSnapshot() == null,
        "latest == null but getLastSnapshot() != null, this=%s", this);
    return replaceSelf4INodeDirectory(inodeMap);
  } else {
    return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
        .recordModification(latest, null);
  }
}
项目:hadoop-TCP    文件:INodeFileWithSnapshot.java   
@Override
public INodeFileWithSnapshot recordModification(final Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    diffs.saveSelf2Snapshot(latest, this, null);
  }
  return this;
}
项目:hadoop-TCP    文件:INodeDirectoryWithSnapshot.java   
@Override
public INodeDirectoryWithSnapshot recordModification(final Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    return saveSelf2Snapshot(latest, null);
  }
  return this;
}
项目:hadoop-TCP    文件:INodeDirectoryWithSnapshot.java   
@Override
public boolean addChild(INode inode, boolean setModTime, Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  ChildrenDiff diff = null;
  Integer undoInfo = null;
  if (isInLatestSnapshot(latest)) {
    diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
    undoInfo = diff.create(inode);
  }
  final boolean added = super.addChild(inode, setModTime, null, inodeMap);
  if (!added && undoInfo != null) {
    diff.undoCreate(inode, undoInfo);
  }
  return added; 
}
项目:hadoop-TCP    文件:INodeDirectoryWithSnapshot.java   
@Override
public boolean removeChild(INode child, Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  ChildrenDiff diff = null;
  UndoInfo<INode> undoInfo = null;
  // For a directory that is not a renamed node, if isInLatestSnapshot returns
  // false, the directory is not in the latest snapshot, thus we do not need
  // to record the removed child in any snapshot.
  // For a directory that was moved/renamed, note that if the directory is in
  // any of the previous snapshots, we will create a reference node for the 
  // directory while rename, and isInLatestSnapshot will return true in that
  // scenario (if all previous snapshots have been deleted, isInLatestSnapshot
  // still returns false). Thus if isInLatestSnapshot returns false, the 
  // directory node cannot be in any snapshot (not in current tree, nor in 
  // previous src tree). Thus we do not need to record the removed child in 
  // any snapshot.
  if (isInLatestSnapshot(latest)) {
    diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
    undoInfo = diff.delete(child);
  }
  final boolean removed = removeChild(child);
  if (undoInfo != null) {
    if (!removed) {
      //remove failed, undo
      diff.undoDelete(child, undoInfo);
    }
  }
  return removed;
}
项目:hadoop-TCP    文件:INodeFileUnderConstructionWithSnapshot.java   
@Override
public INodeFileUnderConstructionWithSnapshot recordModification(
    final Snapshot latest, final INodeMap inodeMap)
    throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    diffs.saveSelf2Snapshot(latest, this, null);
  }
  return this;
}
项目:hardfs    文件:INodeDirectorySnapshottable.java   
/**
 * Replace itself with {@link INodeDirectoryWithSnapshot} or
 * {@link INodeDirectory} depending on the latest snapshot.
 */
INodeDirectory replaceSelf(final Snapshot latest, final INodeMap inodeMap)
    throws QuotaExceededException {
  if (latest == null) {
    Preconditions.checkState(getLastSnapshot() == null,
        "latest == null but getLastSnapshot() != null, this=%s", this);
    return replaceSelf4INodeDirectory(inodeMap);
  } else {
    return replaceSelf4INodeDirectoryWithSnapshot(inodeMap)
        .recordModification(latest, null);
  }
}
项目:hardfs    文件:INodeFileWithSnapshot.java   
@Override
public INodeFileWithSnapshot recordModification(final Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    diffs.saveSelf2Snapshot(latest, this, null);
  }
  return this;
}
项目:hardfs    文件:INodeDirectoryWithSnapshot.java   
@Override
public INodeDirectoryWithSnapshot recordModification(final Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    return saveSelf2Snapshot(latest, null);
  }
  return this;
}
项目:hardfs    文件:INodeDirectoryWithSnapshot.java   
@Override
public boolean addChild(INode inode, boolean setModTime, Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  ChildrenDiff diff = null;
  Integer undoInfo = null;
  if (isInLatestSnapshot(latest)) {
    diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
    undoInfo = diff.create(inode);
  }
  final boolean added = super.addChild(inode, setModTime, null, inodeMap);
  if (!added && undoInfo != null) {
    diff.undoCreate(inode, undoInfo);
  }
  return added; 
}
项目:hardfs    文件:INodeDirectoryWithSnapshot.java   
@Override
public boolean removeChild(INode child, Snapshot latest,
    final INodeMap inodeMap) throws QuotaExceededException {
  ChildrenDiff diff = null;
  UndoInfo<INode> undoInfo = null;
  // For a directory that is not a renamed node, if isInLatestSnapshot returns
  // false, the directory is not in the latest snapshot, thus we do not need
  // to record the removed child in any snapshot.
  // For a directory that was moved/renamed, note that if the directory is in
  // any of the previous snapshots, we will create a reference node for the 
  // directory while rename, and isInLatestSnapshot will return true in that
  // scenario (if all previous snapshots have been deleted, isInLatestSnapshot
  // still returns false). Thus if isInLatestSnapshot returns false, the 
  // directory node cannot be in any snapshot (not in current tree, nor in 
  // previous src tree). Thus we do not need to record the removed child in 
  // any snapshot.
  if (isInLatestSnapshot(latest)) {
    diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
    undoInfo = diff.delete(child);
  }
  final boolean removed = removeChild(child);
  if (undoInfo != null) {
    if (!removed) {
      //remove failed, undo
      diff.undoDelete(child, undoInfo);
    }
  }
  return removed;
}
项目:hardfs    文件:INodeFileUnderConstructionWithSnapshot.java   
@Override
public INodeFileUnderConstructionWithSnapshot recordModification(
    final Snapshot latest, final INodeMap inodeMap)
    throws QuotaExceededException {
  if (isInLatestSnapshot(latest) && !shouldRecordInSrcSnapshot(latest)) {
    diffs.saveSelf2Snapshot(latest, this, null);
  }
  return this;
}
项目:hadoop-on-lustre2    文件:INodeDirectorySnapshottable.java   
/**
 * Replace itself with {@link INodeDirectoryWithSnapshot} or
 * {@link INodeDirectory} depending on the latest snapshot.
 */
INodeDirectory replaceSelf(final int latestSnapshotId, final INodeMap inodeMap)
    throws QuotaExceededException {
  if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
    Preconditions.checkState(getDirectoryWithSnapshotFeature()
        .getLastSnapshotId() == Snapshot.CURRENT_STATE_ID, "this=%s", this);
  }
  INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap);
  if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
    dir.recordModification(latestSnapshotId);
  }
  return dir;
}
项目:hadoop-plus    文件:INodeDirectoryWithSnapshot.java   
@Override
public void replaceChild(final INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  super.replaceChild(oldChild, newChild, inodeMap);
  diffs.replaceChild(ListType.CREATED, oldChild, newChild);
}
项目:hadoop-plus    文件:TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir before taking the snapshot.
 */
@Test
public void testRenameUndo_1() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");

  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          (Snapshot) anyObject(), (INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());

  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);

  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());

  // after the undo of rename, both the created and deleted list of sdir1
  // should be empty
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0, childrenDiff.getList(ListType.CREATED).size());

  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
  List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode)
      .getDiffs().asList();
  assertEquals(1, fooDiffs.size());
  assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
  assertTrue(fooNode_s1 == fooNode);

  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
项目:hadoop-plus    文件:TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir after taking the snapshot.
 */
@Test
public void testRenameUndo_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");

  // create foo after taking snapshot
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          (Snapshot) anyObject(), (INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());

  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);

  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());

  // after the undo of rename, the created list of sdir1 should contain 
  // 1 element
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());

  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  assertFalse(hdfs.exists(foo_s1));

  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
项目:hadoop-plus    文件:TestRenameWithSnapshots.java   
/**
 * Test undo where dst node being overwritten is a reference node
 */
@Test
public void testRenameUndo_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);

  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  final Path foo2 = new Path(sdir2, "foo2");
  hdfs.mkdirs(foo2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  // rename foo2 to foo3, so that foo3 will be a reference node
  final Path foo3 = new Path(sdir3, "foo3");
  hdfs.rename(foo2, foo3);

  INode foo3Node = fsdir.getINode4Write(foo3.toString());
  assertTrue(foo3Node.isReference());

  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  // fail the rename but succeed in undo
  doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
      anyBoolean(), (Snapshot) anyObject(), (INodeMap) anyObject());
  Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), 
      anyBoolean(), (Snapshot) anyObject(), 
      (INodeMap) anyObject())).thenReturn(false).thenCallRealMethod();
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  foo3Node.setParent(mockDir3);

  try {
    hdfs.rename(foo, foo3, Rename.OVERWRITE);
    fail("the rename from " + foo + " to " + foo3 + " should fail");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
        + foo3 + " failed.", e);
  }

  // make sure the undo is correct
  final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
  assertSame(foo3Node, foo3Node_undo);
  INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
      .getReferredINode();
  assertEquals(2, foo3_wc.getReferenceCount());
  assertSame(foo3Node, foo3_wc.getParentReference());
}
项目:hadoop-TCP    文件:INodeDirectoryWithSnapshot.java   
@Override
public void replaceChild(final INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  super.replaceChild(oldChild, newChild, inodeMap);
  diffs.replaceChild(ListType.CREATED, oldChild, newChild);
}
项目:hadoop-TCP    文件:TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir before taking the snapshot.
 */
@Test
public void testRenameUndo_1() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");

  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          (Snapshot) anyObject(), (INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());

  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);

  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());

  // after the undo of rename, both the created and deleted list of sdir1
  // should be empty
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0, childrenDiff.getList(ListType.CREATED).size());

  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
  List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode)
      .getDiffs().asList();
  assertEquals(1, fooDiffs.size());
  assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
  assertTrue(fooNode_s1 == fooNode);

  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
项目:hadoop-TCP    文件:TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir after taking the snapshot.
 */
@Test
public void testRenameUndo_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");

  // create foo after taking snapshot
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          (Snapshot) anyObject(), (INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());

  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);

  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());

  // after the undo of rename, the created list of sdir1 should contain 
  // 1 element
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());

  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  assertFalse(hdfs.exists(foo_s1));

  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
项目:hadoop-TCP    文件:TestRenameWithSnapshots.java   
/**
 * Test undo where dst node being overwritten is a reference node
 */
@Test
public void testRenameUndo_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);

  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  final Path foo2 = new Path(sdir2, "foo2");
  hdfs.mkdirs(foo2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  // rename foo2 to foo3, so that foo3 will be a reference node
  final Path foo3 = new Path(sdir3, "foo3");
  hdfs.rename(foo2, foo3);

  INode foo3Node = fsdir.getINode4Write(foo3.toString());
  assertTrue(foo3Node.isReference());

  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  // fail the rename but succeed in undo
  doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
      anyBoolean(), (Snapshot) anyObject(), (INodeMap) anyObject());
  Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), 
      anyBoolean(), (Snapshot) anyObject(), 
      (INodeMap) anyObject())).thenReturn(false).thenCallRealMethod();
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  foo3Node.setParent(mockDir3);

  try {
    hdfs.rename(foo, foo3, Rename.OVERWRITE);
    fail("the rename from " + foo + " to " + foo3 + " should fail");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
        + foo3 + " failed.", e);
  }

  // make sure the undo is correct
  final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
  assertSame(foo3Node, foo3Node_undo);
  INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
      .getReferredINode();
  assertEquals(2, foo3_wc.getReferenceCount());
  assertSame(foo3Node, foo3_wc.getParentReference());
}
项目:hardfs    文件:INodeDirectoryWithSnapshot.java   
@Override
public void replaceChild(final INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  super.replaceChild(oldChild, newChild, inodeMap);
  diffs.replaceChild(ListType.CREATED, oldChild, newChild);
}
项目:hardfs    文件:TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir before taking the snapshot.
 */
@Test
public void testRenameUndo_1() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");

  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          (Snapshot) anyObject(), (INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());

  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);

  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());

  // after the undo of rename, both the created and deleted list of sdir1
  // should be empty
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0, childrenDiff.getList(ListType.CREATED).size());

  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
  List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode)
      .getDiffs().asList();
  assertEquals(1, fooDiffs.size());
  assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
  assertTrue(fooNode_s1 == fooNode);

  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
项目:hardfs    文件:TestRenameWithSnapshots.java   
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir after taking the snapshot.
 */
@Test
public void testRenameUndo_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");

  // create foo after taking snapshot
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          (Snapshot) anyObject(), (INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());

  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);

  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());

  // after the undo of rename, the created list of sdir1 should contain 
  // 1 element
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());

  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  assertFalse(hdfs.exists(foo_s1));

  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
项目:hardfs    文件:TestRenameWithSnapshots.java   
/**
 * Test undo where dst node being overwritten is a reference node
 */
@Test
public void testRenameUndo_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);

  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  final Path foo2 = new Path(sdir2, "foo2");
  hdfs.mkdirs(foo2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  // rename foo2 to foo3, so that foo3 will be a reference node
  final Path foo3 = new Path(sdir3, "foo3");
  hdfs.rename(foo2, foo3);

  INode foo3Node = fsdir.getINode4Write(foo3.toString());
  assertTrue(foo3Node.isReference());

  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  // fail the rename but succeed in undo
  doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
      anyBoolean(), (Snapshot) anyObject(), (INodeMap) anyObject());
  Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), 
      anyBoolean(), (Snapshot) anyObject(), 
      (INodeMap) anyObject())).thenReturn(false).thenCallRealMethod();
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  foo3Node.setParent(mockDir3);

  try {
    hdfs.rename(foo, foo3, Rename.OVERWRITE);
    fail("the rename from " + foo + " to " + foo3 + " should fail");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
        + foo3 + " failed.", e);
  }

  // make sure the undo is correct
  final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
  assertSame(foo3Node, foo3Node_undo);
  INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
      .getReferredINode();
  assertEquals(2, foo3_wc.getReferenceCount());
  assertSame(foo3Node, foo3_wc.getParentReference());
}