Java 类org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount 实例源码

项目:hadoop    文件:FSImageFormatPBSnapshot.java   
private INodeReference loadINodeReference(
    INodeReferenceSection.INodeReference r) throws IOException {
  long referredId = r.getReferredId();
  INode referred = fsDir.getInode(referredId);
  WithCount withCount = (WithCount) referred.getParentReference();
  if (withCount == null) {
    withCount = new INodeReference.WithCount(null, referred);
  }
  final INodeReference ref;
  if (r.hasDstSnapshotId()) { // DstReference
    ref = new INodeReference.DstReference(null, withCount,
        r.getDstSnapshotId());
  } else {
    ref = new INodeReference.WithName(null, withCount, r.getName()
        .toByteArray(), r.getLastSnapshotId());
  }
  return ref;
}
项目:hadoop    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    int latestSnapshotId) {
  Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatPBSnapshot.java   
private INodeReference loadINodeReference(
    INodeReferenceSection.INodeReference r) throws IOException {
  long referredId = r.getReferredId();
  INode referred = fsDir.getInode(referredId);
  WithCount withCount = (WithCount) referred.getParentReference();
  if (withCount == null) {
    withCount = new INodeReference.WithCount(null, referred);
  }
  final INodeReference ref;
  if (r.hasDstSnapshotId()) { // DstReference
    ref = new INodeReference.DstReference(null, withCount,
        r.getDstSnapshotId());
  } else {
    ref = new INodeReference.WithName(null, withCount, r.getName()
        .toByteArray(), r.getLastSnapshotId());
  }
  return ref;
}
项目:aliyun-oss-hadoop-fs    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    int latestSnapshotId) {
  Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:big-c    文件:FSImageFormatPBSnapshot.java   
private INodeReference loadINodeReference(
    INodeReferenceSection.INodeReference r) throws IOException {
  long referredId = r.getReferredId();
  INode referred = fsDir.getInode(referredId);
  WithCount withCount = (WithCount) referred.getParentReference();
  if (withCount == null) {
    withCount = new INodeReference.WithCount(null, referred);
  }
  final INodeReference ref;
  if (r.hasDstSnapshotId()) { // DstReference
    ref = new INodeReference.DstReference(null, withCount,
        r.getDstSnapshotId());
  } else {
    ref = new INodeReference.WithName(null, withCount, r.getName()
        .toByteArray(), r.getLastSnapshotId());
  }
  return ref;
}
项目:big-c    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    int latestSnapshotId) {
  Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormatPBSnapshot.java   
private INodeReference loadINodeReference(
    INodeReferenceSection.INodeReference r) throws IOException {
  long referredId = r.getReferredId();
  INode referred = fsDir.getInode(referredId);
  WithCount withCount = (WithCount) referred.getParentReference();
  if (withCount == null) {
    withCount = new INodeReference.WithCount(null, referred);
  }
  final INodeReference ref;
  if (r.hasDstSnapshotId()) { // DstReference
    ref = new INodeReference.DstReference(null, withCount,
        r.getDstSnapshotId());
  } else {
    ref = new INodeReference.WithName(null, withCount, r.getName()
        .toByteArray(), r.getLastSnapshotId());
  }
  return ref;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    int latestSnapshotId) {
  Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:hadoop-plus    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    Snapshot latest) {
  Preconditions.checkArgument(latest != null);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latest.getId());
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:FlexMap    文件:FSImageFormatPBSnapshot.java   
private INodeReference loadINodeReference(
    INodeReferenceSection.INodeReference r) throws IOException {
  long referredId = r.getReferredId();
  INode referred = fsDir.getInode(referredId);
  WithCount withCount = (WithCount) referred.getParentReference();
  if (withCount == null) {
    withCount = new INodeReference.WithCount(null, referred);
  }
  final INodeReference ref;
  if (r.hasDstSnapshotId()) { // DstReference
    ref = new INodeReference.DstReference(null, withCount,
        r.getDstSnapshotId());
  } else {
    ref = new INodeReference.WithName(null, withCount, r.getName()
        .toByteArray(), r.getLastSnapshotId());
  }
  return ref;
}
项目:FlexMap    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    int latestSnapshotId) {
  Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:hadoop-TCP    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    Snapshot latest) {
  Preconditions.checkArgument(latest != null);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latest.getId());
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:hardfs    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    Snapshot latest) {
  Preconditions.checkArgument(latest != null);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latest.getId());
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:hadoop-on-lustre2    文件:FSImageFormatPBSnapshot.java   
private INodeReference loadINodeReference(
    INodeReferenceSection.INodeReference r) throws IOException {
  long referredId = r.getReferredId();
  INode referred = fsDir.getInode(referredId);
  WithCount withCount = (WithCount) referred.getParentReference();
  if (withCount == null) {
    withCount = new INodeReference.WithCount(null, referred);
  }
  final INodeReference ref;
  if (r.hasDstSnapshotId()) { // DstReference
    ref = new INodeReference.DstReference(null, withCount,
        r.getDstSnapshotId());
  } else {
    ref = new INodeReference.WithName(null, withCount, r.getName()
        .toByteArray(), r.getLastSnapshotId());
  }
  return ref;
}
项目:hadoop-on-lustre2    文件:INodeDirectory.java   
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    int latestSnapshotId) {
  Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
  replaceChild(oldChild, ref, null);
  return ref;
}
项目:hadoop    文件:DirectorySnapshottableFeature.java   
/**
 * We just found a deleted WithName node as the source of a rename operation.
 * However, we should include it in our snapshot diff report as rename only
 * if the rename target is also under the same snapshottable directory.
 */
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
    INodeReference.WithName wn, final int snapshotId) {
  INode inode = wn.getReferredINode();
  final LinkedList<byte[]> ancestors = Lists.newLinkedList();
  while (inode != null) {
    if (inode == snapshotRoot) {
      return ancestors.toArray(new byte[ancestors.size()][]);
    }
    if (inode instanceof INodeReference.WithCount) {
      inode = ((WithCount) inode).getParentRef(snapshotId);
    } else {
      INode parent = inode.getParentReference() != null ? inode
          .getParentReference() : inode.getParent();
      if (parent != null && parent instanceof INodeDirectory) {
        int sid = parent.asDirectory().searchChild(inode);
        if (sid < snapshotId) {
          return null;
        }
      }
      if (!(parent instanceof WithCount)) {
        ancestors.addFirst(inode.getLocalNameBytes());
      }
      inode = parent;
    }
  }
  return null;
}
项目:hadoop    文件:INodeDirectory.java   
/** 
 * Replace the given child with a new child. Note that we no longer need to
 * replace an normal INodeDirectory or INodeFile into an
 * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
 * for child replacement is for reference nodes.
 */
public void replaceChild(INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  Preconditions.checkNotNull(children);
  final int i = searchChildren(newChild.getLocalNameBytes());
  Preconditions.checkState(i >= 0);
  Preconditions.checkState(oldChild == children.get(i)
      || oldChild == children.get(i).asReference().getReferredINode()
          .asReference().getReferredINode());
  oldChild = children.get(i);

  if (oldChild.isReference() && newChild.isReference()) {
    // both are reference nodes, e.g., DstReference -> WithName
    final INodeReference.WithCount withCount = 
        (WithCount) oldChild.asReference().getReferredINode();
    withCount.removeReference(oldChild.asReference());
  }
  children.set(i, newChild);

  // replace the instance in the created list of the diff list
  DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
  }

  // update the inodeMap
  if (inodeMap != null) {
    inodeMap.put(newChild);
  }    
}
项目:aliyun-oss-hadoop-fs    文件:DirectorySnapshottableFeature.java   
/**
 * We just found a deleted WithName node as the source of a rename operation.
 * However, we should include it in our snapshot diff report as rename only
 * if the rename target is also under the same snapshottable directory.
 */
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
    INodeReference.WithName wn, final int snapshotId) {
  INode inode = wn.getReferredINode();
  final LinkedList<byte[]> ancestors = Lists.newLinkedList();
  while (inode != null) {
    if (inode == snapshotRoot) {
      return ancestors.toArray(new byte[ancestors.size()][]);
    }
    if (inode instanceof INodeReference.WithCount) {
      inode = ((WithCount) inode).getParentRef(snapshotId);
    } else {
      INode parent = inode.getParentReference() != null ? inode
          .getParentReference() : inode.getParent();
      if (parent != null && parent instanceof INodeDirectory) {
        int sid = parent.asDirectory().searchChild(inode);
        if (sid < snapshotId) {
          return null;
        }
      }
      if (!(parent instanceof WithCount)) {
        ancestors.addFirst(inode.getLocalNameBytes());
      }
      inode = parent;
    }
  }
  return null;
}
项目:aliyun-oss-hadoop-fs    文件:INodeDirectory.java   
/** 
 * Replace the given child with a new child. Note that we no longer need to
 * replace an normal INodeDirectory or INodeFile into an
 * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
 * for child replacement is for reference nodes.
 */
public void replaceChild(INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  Preconditions.checkNotNull(children);
  final int i = searchChildren(newChild.getLocalNameBytes());
  Preconditions.checkState(i >= 0);
  Preconditions.checkState(oldChild == children.get(i)
      || oldChild == children.get(i).asReference().getReferredINode()
          .asReference().getReferredINode());
  oldChild = children.get(i);

  if (oldChild.isReference() && newChild.isReference()) {
    // both are reference nodes, e.g., DstReference -> WithName
    final INodeReference.WithCount withCount = 
        (WithCount) oldChild.asReference().getReferredINode();
    withCount.removeReference(oldChild.asReference());
  }
  children.set(i, newChild);

  // replace the instance in the created list of the diff list
  DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
  }

  // update the inodeMap
  if (inodeMap != null) {
    inodeMap.put(newChild);
  }    
}
项目:big-c    文件:DirectorySnapshottableFeature.java   
/**
 * We just found a deleted WithName node as the source of a rename operation.
 * However, we should include it in our snapshot diff report as rename only
 * if the rename target is also under the same snapshottable directory.
 */
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
    INodeReference.WithName wn, final int snapshotId) {
  INode inode = wn.getReferredINode();
  final LinkedList<byte[]> ancestors = Lists.newLinkedList();
  while (inode != null) {
    if (inode == snapshotRoot) {
      return ancestors.toArray(new byte[ancestors.size()][]);
    }
    if (inode instanceof INodeReference.WithCount) {
      inode = ((WithCount) inode).getParentRef(snapshotId);
    } else {
      INode parent = inode.getParentReference() != null ? inode
          .getParentReference() : inode.getParent();
      if (parent != null && parent instanceof INodeDirectory) {
        int sid = parent.asDirectory().searchChild(inode);
        if (sid < snapshotId) {
          return null;
        }
      }
      if (!(parent instanceof WithCount)) {
        ancestors.addFirst(inode.getLocalNameBytes());
      }
      inode = parent;
    }
  }
  return null;
}
项目:big-c    文件:INodeDirectory.java   
/** 
 * Replace the given child with a new child. Note that we no longer need to
 * replace an normal INodeDirectory or INodeFile into an
 * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
 * for child replacement is for reference nodes.
 */
public void replaceChild(INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  Preconditions.checkNotNull(children);
  final int i = searchChildren(newChild.getLocalNameBytes());
  Preconditions.checkState(i >= 0);
  Preconditions.checkState(oldChild == children.get(i)
      || oldChild == children.get(i).asReference().getReferredINode()
          .asReference().getReferredINode());
  oldChild = children.get(i);

  if (oldChild.isReference() && newChild.isReference()) {
    // both are reference nodes, e.g., DstReference -> WithName
    final INodeReference.WithCount withCount = 
        (WithCount) oldChild.asReference().getReferredINode();
    withCount.removeReference(oldChild.asReference());
  }
  children.set(i, newChild);

  // replace the instance in the created list of the diff list
  DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
  }

  // update the inodeMap
  if (inodeMap != null) {
    inodeMap.put(newChild);
  }    
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DirectorySnapshottableFeature.java   
/**
 * We just found a deleted WithName node as the source of a rename operation.
 * However, we should include it in our snapshot diff report as rename only
 * if the rename target is also under the same snapshottable directory.
 */
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
    INodeReference.WithName wn, final int snapshotId) {
  INode inode = wn.getReferredINode();
  final LinkedList<byte[]> ancestors = Lists.newLinkedList();
  while (inode != null) {
    if (inode == snapshotRoot) {
      return ancestors.toArray(new byte[ancestors.size()][]);
    }
    if (inode instanceof INodeReference.WithCount) {
      inode = ((WithCount) inode).getParentRef(snapshotId);
    } else {
      INode parent = inode.getParentReference() != null ? inode
          .getParentReference() : inode.getParent();
      if (parent != null && parent instanceof INodeDirectory) {
        int sid = parent.asDirectory().searchChild(inode);
        if (sid < snapshotId) {
          return null;
        }
      }
      if (!(parent instanceof WithCount)) {
        ancestors.addFirst(inode.getLocalNameBytes());
      }
      inode = parent;
    }
  }
  return null;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeDirectory.java   
/** 
 * Replace the given child with a new child. Note that we no longer need to
 * replace an normal INodeDirectory or INodeFile into an
 * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
 * for child replacement is for reference nodes.
 */
public void replaceChild(INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  Preconditions.checkNotNull(children);
  final int i = searchChildren(newChild.getLocalNameBytes());
  Preconditions.checkState(i >= 0);
  Preconditions.checkState(oldChild == children.get(i)
      || oldChild == children.get(i).asReference().getReferredINode()
          .asReference().getReferredINode());
  oldChild = children.get(i);

  if (oldChild.isReference() && newChild.isReference()) {
    // both are reference nodes, e.g., DstReference -> WithName
    final INodeReference.WithCount withCount = 
        (WithCount) oldChild.asReference().getReferredINode();
    withCount.removeReference(oldChild.asReference());
  }
  children.set(i, newChild);

  // replace the instance in the created list of the diff list
  DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
  }

  // update the inodeMap
  if (inodeMap != null) {
    inodeMap.put(newChild);
  }    
}
项目:FlexMap    文件:DirectorySnapshottableFeature.java   
/**
 * We just found a deleted WithName node as the source of a rename operation.
 * However, we should include it in our snapshot diff report as rename only
 * if the rename target is also under the same snapshottable directory.
 */
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
    INodeReference.WithName wn, final int snapshotId) {
  INode inode = wn.getReferredINode();
  final LinkedList<byte[]> ancestors = Lists.newLinkedList();
  while (inode != null) {
    if (inode == snapshotRoot) {
      return ancestors.toArray(new byte[ancestors.size()][]);
    }
    if (inode instanceof INodeReference.WithCount) {
      inode = ((WithCount) inode).getParentRef(snapshotId);
    } else {
      INode parent = inode.getParentReference() != null ? inode
          .getParentReference() : inode.getParent();
      if (parent != null && parent instanceof INodeDirectory) {
        int sid = parent.asDirectory().searchChild(inode);
        if (sid < snapshotId) {
          return null;
        }
      }
      if (!(parent instanceof WithCount)) {
        ancestors.addFirst(inode.getLocalNameBytes());
      }
      inode = parent;
    }
  }
  return null;
}
项目:FlexMap    文件:INodeDirectory.java   
/** 
 * Replace the given child with a new child. Note that we no longer need to
 * replace an normal INodeDirectory or INodeFile into an
 * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
 * for child replacement is for reference nodes.
 */
public void replaceChild(INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  Preconditions.checkNotNull(children);
  final int i = searchChildren(newChild.getLocalNameBytes());
  Preconditions.checkState(i >= 0);
  Preconditions.checkState(oldChild == children.get(i)
      || oldChild == children.get(i).asReference().getReferredINode()
          .asReference().getReferredINode());
  oldChild = children.get(i);

  if (oldChild.isReference() && newChild.isReference()) {
    // both are reference nodes, e.g., DstReference -> WithName
    final INodeReference.WithCount withCount = 
        (WithCount) oldChild.asReference().getReferredINode();
    withCount.removeReference(oldChild.asReference());
  }
  children.set(i, newChild);

  // replace the instance in the created list of the diff list
  DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
  }

  // update the inodeMap
  if (inodeMap != null) {
    inodeMap.put(newChild);
  }    
}
项目:hadoop-on-lustre2    文件:INodeDirectory.java   
/** 
 * Replace the given child with a new child. Note that we no longer need to
 * replace an normal INodeDirectory or INodeFile into an
 * INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
 * for child replacement is for {@link INodeDirectorySnapshottable} and 
 * reference nodes.
 */
public void replaceChild(INode oldChild, final INode newChild,
    final INodeMap inodeMap) {
  Preconditions.checkNotNull(children);
  final int i = searchChildren(newChild.getLocalNameBytes());
  Preconditions.checkState(i >= 0);
  Preconditions.checkState(oldChild == children.get(i)
      || oldChild == children.get(i).asReference().getReferredINode()
          .asReference().getReferredINode());
  oldChild = children.get(i);

  if (oldChild.isReference() && newChild.isReference()) {
    // both are reference nodes, e.g., DstReference -> WithName
    final INodeReference.WithCount withCount = 
        (WithCount) oldChild.asReference().getReferredINode();
    withCount.removeReference(oldChild.asReference());
  }
  children.set(i, newChild);

  // replace the instance in the created list of the diff list
  DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
  }

  // update the inodeMap
  if (inodeMap != null) {
    inodeMap.put(newChild);
  }    
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
@Test (timeout=300000)
public void testRenameFromSDir2NonSDir() throws Exception {
  final String dirStr = "/testRenameWithSnapshot";
  final String abcStr = dirStr + "/abc";
  final Path abc = new Path(abcStr);
  hdfs.mkdirs(abc, new FsPermission((short)0777));
  hdfs.allowSnapshot(abc);

  final Path foo = new Path(abc, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  hdfs.createSnapshot(abc, "s0");

  try {
    hdfs.rename(abc, new Path(dirStr, "tmp"));
    fail("Expect exception since " + abc
        + " is snapshottable and already has snapshots");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(abcStr
        + " is snapshottable and already has snapshots", e);
  }

  final String xyzStr = dirStr + "/xyz";
  final Path xyz = new Path(xyzStr);
  hdfs.mkdirs(xyz, new FsPermission((short)0777));
  final Path bar = new Path(xyz, "bar");
  hdfs.rename(foo, bar);

  final INode fooRef = fsdir.getINode(
      SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
  Assert.assertTrue(fooRef.isReference());
  Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);

  final INodeReference.WithCount withCount
      = (INodeReference.WithCount)fooRef.asReference().getReferredINode();
  Assert.assertEquals(2, withCount.getReferenceCount());

  final INode barRef = fsdir.getINode(bar.toString());
  Assert.assertTrue(barRef.isReference());

  Assert.assertSame(withCount, barRef.asReference().getReferredINode());

  hdfs.delete(bar, false);
  Assert.assertEquals(1, withCount.getReferenceCount());
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Test undo where dst node being overwritten is a reference node
 */
@Test
public void testRenameUndo_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);

  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  final Path foo2 = new Path(sdir2, "foo2");
  hdfs.mkdirs(foo2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  // rename foo2 to foo3, so that foo3 will be a reference node
  final Path foo3 = new Path(sdir3, "foo3");
  hdfs.rename(foo2, foo3);

  INode foo3Node = fsdir.getINode4Write(foo3.toString());
  assertTrue(foo3Node.isReference());

  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  // fail the rename but succeed in undo
  doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
      anyBoolean(), Mockito.anyInt());
  Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), 
      Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  foo3Node.setParent(mockDir3);

  try {
    hdfs.rename(foo, foo3, Rename.OVERWRITE);
    fail("the rename from " + foo + " to " + foo3 + " should fail");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
        + foo3 + " failed.", e);
  }

  // make sure the undo is correct
  final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
  assertSame(foo3Node, foo3Node_undo);
  INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
      .getReferredINode();
  assertEquals(2, foo3_wc.getReferenceCount());
  assertSame(foo3Node, foo3_wc.getParentReference());
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  restartClusterAndCheckImage(true);
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(7, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());

  restartClusterAndCheckImage(true);
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
@Test (timeout=300000)
public void testRenameFromSDir2NonSDir() throws Exception {
  final String dirStr = "/testRenameWithSnapshot";
  final String abcStr = dirStr + "/abc";
  final Path abc = new Path(abcStr);
  hdfs.mkdirs(abc, new FsPermission((short)0777));
  hdfs.allowSnapshot(abc);

  final Path foo = new Path(abc, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  hdfs.createSnapshot(abc, "s0");

  try {
    hdfs.rename(abc, new Path(dirStr, "tmp"));
    fail("Expect exception since " + abc
        + " is snapshottable and already has snapshots");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(abcStr
        + " is snapshottable and already has snapshots", e);
  }

  final String xyzStr = dirStr + "/xyz";
  final Path xyz = new Path(xyzStr);
  hdfs.mkdirs(xyz, new FsPermission((short)0777));
  final Path bar = new Path(xyz, "bar");
  hdfs.rename(foo, bar);

  final INode fooRef = fsdir.getINode(
      SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
  Assert.assertTrue(fooRef.isReference());
  Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);

  final INodeReference.WithCount withCount
      = (INodeReference.WithCount)fooRef.asReference().getReferredINode();
  Assert.assertEquals(2, withCount.getReferenceCount());

  final INode barRef = fsdir.getINode(bar.toString());
  Assert.assertTrue(barRef.isReference());

  Assert.assertSame(withCount, barRef.asReference().getReferredINode());

  hdfs.delete(bar, false);
  Assert.assertEquals(1, withCount.getReferenceCount());
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
/**
 * Test undo where dst node being overwritten is a reference node
 */
@Test
public void testRenameUndo_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);

  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  final Path foo2 = new Path(sdir2, "foo2");
  hdfs.mkdirs(foo2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  // rename foo2 to foo3, so that foo3 will be a reference node
  final Path foo3 = new Path(sdir3, "foo3");
  hdfs.rename(foo2, foo3);

  INode foo3Node = fsdir.getINode4Write(foo3.toString());
  assertTrue(foo3Node.isReference());

  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  // fail the rename but succeed in undo
  doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
      anyBoolean(), Mockito.anyInt());
  Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), 
      Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  foo3Node.setParent(mockDir3);

  try {
    hdfs.rename(foo, foo3, Rename.OVERWRITE);
    fail("the rename from " + foo + " to " + foo3 + " should fail");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
        + foo3 + " failed.", e);
  }

  // make sure the undo is correct
  final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
  assertSame(foo3Node, foo3Node_undo);
  INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
      .getReferredINode();
  assertEquals(2, foo3_wc.getReferenceCount());
  assertSame(foo3Node, foo3_wc.getParentReference());
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  restartClusterAndCheckImage(true);
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(7, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());

  restartClusterAndCheckImage(true);
}
项目:big-c    文件:TestRenameWithSnapshots.java   
@Test (timeout=300000)
public void testRenameFromSDir2NonSDir() throws Exception {
  final String dirStr = "/testRenameWithSnapshot";
  final String abcStr = dirStr + "/abc";
  final Path abc = new Path(abcStr);
  hdfs.mkdirs(abc, new FsPermission((short)0777));
  hdfs.allowSnapshot(abc);

  final Path foo = new Path(abc, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  hdfs.createSnapshot(abc, "s0");

  try {
    hdfs.rename(abc, new Path(dirStr, "tmp"));
    fail("Expect exception since " + abc
        + " is snapshottable and already has snapshots");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(abcStr
        + " is snapshottable and already has snapshots", e);
  }

  final String xyzStr = dirStr + "/xyz";
  final Path xyz = new Path(xyzStr);
  hdfs.mkdirs(xyz, new FsPermission((short)0777));
  final Path bar = new Path(xyz, "bar");
  hdfs.rename(foo, bar);

  final INode fooRef = fsdir.getINode(
      SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
  Assert.assertTrue(fooRef.isReference());
  Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);

  final INodeReference.WithCount withCount
      = (INodeReference.WithCount)fooRef.asReference().getReferredINode();
  Assert.assertEquals(2, withCount.getReferenceCount());

  final INode barRef = fsdir.getINode(bar.toString());
  Assert.assertTrue(barRef.isReference());

  Assert.assertSame(withCount, barRef.asReference().getReferredINode());

  hdfs.delete(bar, false);
  Assert.assertEquals(1, withCount.getReferenceCount());
}
项目:big-c    文件:TestRenameWithSnapshots.java   
/**
 * Test undo where dst node being overwritten is a reference node
 */
@Test
public void testRenameUndo_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);

  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  final Path foo2 = new Path(sdir2, "foo2");
  hdfs.mkdirs(foo2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  // rename foo2 to foo3, so that foo3 will be a reference node
  final Path foo3 = new Path(sdir3, "foo3");
  hdfs.rename(foo2, foo3);

  INode foo3Node = fsdir.getINode4Write(foo3.toString());
  assertTrue(foo3Node.isReference());

  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  // fail the rename but succeed in undo
  doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
      anyBoolean(), Mockito.anyInt());
  Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), 
      Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  foo3Node.setParent(mockDir3);

  try {
    hdfs.rename(foo, foo3, Rename.OVERWRITE);
    fail("the rename from " + foo + " to " + foo3 + " should fail");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
        + foo3 + " failed.", e);
  }

  // make sure the undo is correct
  final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
  assertSame(foo3Node, foo3Node_undo);
  INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
      .getReferredINode();
  assertEquals(2, foo3_wc.getReferenceCount());
  assertSame(foo3Node, foo3_wc.getParentReference());
}
项目:big-c    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  restartClusterAndCheckImage(true);
}
项目:big-c    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(7, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());

  restartClusterAndCheckImage(true);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestRenameWithSnapshots.java   
@Test (timeout=300000)
public void testRenameFromSDir2NonSDir() throws Exception {
  final String dirStr = "/testRenameWithSnapshot";
  final String abcStr = dirStr + "/abc";
  final Path abc = new Path(abcStr);
  hdfs.mkdirs(abc, new FsPermission((short)0777));
  hdfs.allowSnapshot(abc);

  final Path foo = new Path(abc, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  hdfs.createSnapshot(abc, "s0");

  try {
    hdfs.rename(abc, new Path(dirStr, "tmp"));
    fail("Expect exception since " + abc
        + " is snapshottable and already has snapshots");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(abcStr
        + " is snapshottable and already has snapshots", e);
  }

  final String xyzStr = dirStr + "/xyz";
  final Path xyz = new Path(xyzStr);
  hdfs.mkdirs(xyz, new FsPermission((short)0777));
  final Path bar = new Path(xyz, "bar");
  hdfs.rename(foo, bar);

  final INode fooRef = fsdir.getINode(
      SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
  Assert.assertTrue(fooRef.isReference());
  Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);

  final INodeReference.WithCount withCount
      = (INodeReference.WithCount)fooRef.asReference().getReferredINode();
  Assert.assertEquals(2, withCount.getReferenceCount());

  final INode barRef = fsdir.getINode(bar.toString());
  Assert.assertTrue(barRef.isReference());

  Assert.assertSame(withCount, barRef.asReference().getReferredINode());

  hdfs.delete(bar, false);
  Assert.assertEquals(1, withCount.getReferenceCount());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestRenameWithSnapshots.java   
/**
 * Test undo where dst node being overwritten is a reference node
 */
@Test
public void testRenameUndo_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);

  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  final Path foo2 = new Path(sdir2, "foo2");
  hdfs.mkdirs(foo2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  // rename foo2 to foo3, so that foo3 will be a reference node
  final Path foo3 = new Path(sdir3, "foo3");
  hdfs.rename(foo2, foo3);

  INode foo3Node = fsdir.getINode4Write(foo3.toString());
  assertTrue(foo3Node.isReference());

  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  // fail the rename but succeed in undo
  doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
      anyBoolean(), Mockito.anyInt());
  Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(), 
      Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  foo3Node.setParent(mockDir3);

  try {
    hdfs.rename(foo, foo3, Rename.OVERWRITE);
    fail("the rename from " + foo + " to " + foo3 + " should fail");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
        + foo3 + " failed.", e);
  }

  // make sure the undo is correct
  final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
  assertSame(foo3Node, foo3Node_undo);
  INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
      .getReferredINode();
  assertEquals(2, foo3_wc.getReferenceCount());
  assertSame(foo3Node, foo3_wc.getParentReference());
}