Java 类org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable 实例源码

项目:hadoop-plus    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeDirectory}
 * @param node The node to write
 * @param out The {@link DataOutput} where the fields are written 
 */
public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
    throws IOException {
  writeLocalName(node, out);
  out.writeLong(node.getId());
  out.writeShort(0);  // replication
  out.writeLong(node.getModificationTime());
  out.writeLong(0);   // access time
  out.writeLong(0);   // preferred block size
  out.writeInt(-1);   // # of blocks

  out.writeLong(node.getNsQuota());
  out.writeLong(node.getDsQuota());
  if (node instanceof INodeDirectorySnapshottable) {
    out.writeBoolean(true);
  } else {
    out.writeBoolean(false);
    out.writeBoolean(node instanceof INodeDirectoryWithSnapshot);
  }

  writePermissionStatus(node, out);
}
项目:hadoop-plus    文件:FSDirectory.java   
/**
 * Check if the given INode (or one of its descendants) is snapshottable and
 * already has snapshots.
 * 
 * @param target The given INode
 * @param snapshottableDirs The list of directories that are snapshottable 
 *                          but do not have snapshots yet
 */
private static void checkSnapshot(INode target,
    List<INodeDirectorySnapshottable> snapshottableDirs) throws IOException {
  if (target.isDirectory()) {
    INodeDirectory targetDir = target.asDirectory();
    if (targetDir.isSnapshottable()) {
      INodeDirectorySnapshottable ssTargetDir = 
          (INodeDirectorySnapshottable) targetDir;
      if (ssTargetDir.getNumSnapshots() > 0) {
        throw new IOException("The directory " + ssTargetDir.getFullPathName()
            + " cannot be deleted since " + ssTargetDir.getFullPathName()
            + " is snapshottable and already has snapshots");
      } else {
        if (snapshottableDirs != null) {
          snapshottableDirs.add(ssTargetDir);
        }
      }
    } 
    for (INode child : targetDir.getChildrenList(null)) {
      checkSnapshot(child, snapshottableDirs);
    }
  }
}
项目:hadoop-plus    文件:FSDirectory.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:hadoop-plus    文件:FSDirectory.java   
/**
 * Currently we only support "ls /xxx/.snapshot" which will return all the
 * snapshots of a directory. The FSCommand Ls will first call getFileInfo to
 * make sure the file/directory exists (before the real getListing call).
 * Since we do not have a real INode for ".snapshot", we return an empty
 * non-null HdfsFileStatus here.
 */
private HdfsFileStatus getFileInfo4DotSnapshot(String src)
    throws UnresolvedLinkException {
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  if (node != null
      && node.isDirectory()
      && node.asDirectory() instanceof INodeDirectorySnapshottable) {
    return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
        HdfsFileStatus.EMPTY_NAME, -1L, 0);
  }
  return null;
}
项目:hadoop-TCP    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeDirectory}
 * @param node The node to write
 * @param out The {@link DataOutput} where the fields are written 
 */
public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
    throws IOException {
  writeLocalName(node, out);
  out.writeLong(node.getId());
  out.writeShort(0);  // replication
  out.writeLong(node.getModificationTime());
  out.writeLong(0);   // access time
  out.writeLong(0);   // preferred block size
  out.writeInt(-1);   // # of blocks

  out.writeLong(node.getNsQuota());
  out.writeLong(node.getDsQuota());
  if (node instanceof INodeDirectorySnapshottable) {
    out.writeBoolean(true);
  } else {
    out.writeBoolean(false);
    out.writeBoolean(node instanceof INodeDirectoryWithSnapshot);
  }

  writePermissionStatus(node, out);
}
项目:hadoop-TCP    文件:FSDirectory.java   
/**
 * Check if the given INode (or one of its descendants) is snapshottable and
 * already has snapshots.
 * 
 * @param target The given INode
 * @param snapshottableDirs The list of directories that are snapshottable 
 *                          but do not have snapshots yet
 */
private static void checkSnapshot(INode target,
    List<INodeDirectorySnapshottable> snapshottableDirs) throws IOException {
  if (target.isDirectory()) {
    INodeDirectory targetDir = target.asDirectory();
    if (targetDir.isSnapshottable()) {
      INodeDirectorySnapshottable ssTargetDir = 
          (INodeDirectorySnapshottable) targetDir;
      if (ssTargetDir.getNumSnapshots() > 0) {
        throw new IOException("The directory " + ssTargetDir.getFullPathName()
            + " cannot be deleted since " + ssTargetDir.getFullPathName()
            + " is snapshottable and already has snapshots");
      } else {
        if (snapshottableDirs != null) {
          snapshottableDirs.add(ssTargetDir);
        }
      }
    } 
    for (INode child : targetDir.getChildrenList(null)) {
      checkSnapshot(child, snapshottableDirs);
    }
  }
}
项目:hadoop-TCP    文件:FSDirectory.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:hadoop-TCP    文件:FSDirectory.java   
/**
 * Currently we only support "ls /xxx/.snapshot" which will return all the
 * snapshots of a directory. The FSCommand Ls will first call getFileInfo to
 * make sure the file/directory exists (before the real getListing call).
 * Since we do not have a real INode for ".snapshot", we return an empty
 * non-null HdfsFileStatus here.
 */
private HdfsFileStatus getFileInfo4DotSnapshot(String src)
    throws UnresolvedLinkException {
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  if (node != null
      && node.isDirectory()
      && node.asDirectory() instanceof INodeDirectorySnapshottable) {
    return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
        HdfsFileStatus.EMPTY_NAME, -1L, 0);
  }
  return null;
}
项目:hardfs    文件:FSImageSerialization.java   
/**
 * Serialize a {@link INodeDirectory}
 * @param node The node to write
 * @param out The {@link DataOutput} where the fields are written 
 */
public static void writeINodeDirectory(INodeDirectory node, DataOutput out)
    throws IOException {
  writeLocalName(node, out);
  out.writeLong(node.getId());
  out.writeShort(0);  // replication
  out.writeLong(node.getModificationTime());
  out.writeLong(0);   // access time
  out.writeLong(0);   // preferred block size
  out.writeInt(-1);   // # of blocks

  out.writeLong(node.getNsQuota());
  out.writeLong(node.getDsQuota());
  if (node instanceof INodeDirectorySnapshottable) {
    out.writeBoolean(true);
  } else {
    out.writeBoolean(false);
    out.writeBoolean(node instanceof INodeDirectoryWithSnapshot);
  }

  writePermissionStatus(node, out);
}
项目:hardfs    文件:FSDirectory.java   
/**
 * Check if the given INode (or one of its descendants) is snapshottable and
 * already has snapshots.
 * 
 * @param target The given INode
 * @param snapshottableDirs The list of directories that are snapshottable 
 *                          but do not have snapshots yet
 */
private static void checkSnapshot(INode target,
    List<INodeDirectorySnapshottable> snapshottableDirs) throws IOException {
  if (target.isDirectory()) {
    INodeDirectory targetDir = target.asDirectory();
    if (targetDir.isSnapshottable()) {
      INodeDirectorySnapshottable ssTargetDir = 
          (INodeDirectorySnapshottable) targetDir;
      if (ssTargetDir.getNumSnapshots() > 0) {
        throw new IOException("The directory " + ssTargetDir.getFullPathName()
            + " cannot be deleted since " + ssTargetDir.getFullPathName()
            + " is snapshottable and already has snapshots");
      } else {
        if (snapshottableDirs != null) {
          snapshottableDirs.add(ssTargetDir);
        }
      }
    } 
    for (INode child : targetDir.getChildrenList(null)) {
      checkSnapshot(child, snapshottableDirs);
    }
  }
}
项目:hardfs    文件:FSDirectory.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:hardfs    文件:FSDirectory.java   
/**
 * Currently we only support "ls /xxx/.snapshot" which will return all the
 * snapshots of a directory. The FSCommand Ls will first call getFileInfo to
 * make sure the file/directory exists (before the real getListing call).
 * Since we do not have a real INode for ".snapshot", we return an empty
 * non-null HdfsFileStatus here.
 */
private HdfsFileStatus getFileInfo4DotSnapshot(String src)
    throws UnresolvedLinkException {
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  if (node != null
      && node.isDirectory()
      && node.asDirectory() instanceof INodeDirectorySnapshottable) {
    return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
        HdfsFileStatus.EMPTY_NAME, -1L, 0);
  }
  return null;
}
项目:hadoop-on-lustre2    文件:FSDirectory.java   
/**
 * Check if the given INode (or one of its descendants) is snapshottable and
 * already has snapshots.
 * 
 * @param target The given INode
 * @param snapshottableDirs The list of directories that are snapshottable 
 *                          but do not have snapshots yet
 */
private static void checkSnapshot(INode target,
    List<INodeDirectorySnapshottable> snapshottableDirs) throws IOException {
  if (target.isDirectory()) {
    INodeDirectory targetDir = target.asDirectory();
    if (targetDir.isSnapshottable()) {
      INodeDirectorySnapshottable ssTargetDir = 
          (INodeDirectorySnapshottable) targetDir;
      if (ssTargetDir.getNumSnapshots() > 0) {
        throw new IOException("The directory " + ssTargetDir.getFullPathName()
            + " cannot be deleted since " + ssTargetDir.getFullPathName()
            + " is snapshottable and already has snapshots");
      } else {
        if (snapshottableDirs != null) {
          snapshottableDirs.add(ssTargetDir);
        }
      }
    } 
    for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
      checkSnapshot(child, snapshottableDirs);
    }
  }
}
项目:hadoop-on-lustre2    文件:FSDirectory.java   
private INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  if (node != null
      && node.isDirectory()
      && node.asDirectory() instanceof INodeDirectorySnapshottable) {
    return node;
  }
  return null;
}
项目:hadoop-plus    文件:FSDirectory.java   
private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
  final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
      INodeId.ROOT_INODE_ID,
      INodeDirectory.ROOT_NAME,
      namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
  final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
  s.setSnapshotQuota(0);
  return s;
}
项目:hadoop-plus    文件:INodeDirectory.java   
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
    Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException {
  Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
      "this is already an INodeDirectorySnapshottable, this=%s", this);
  final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
  replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this);
  return s;
}
项目:hadoop-plus    文件:TestSnapshotPathINodes.java   
static Snapshot getSnapshot(INodesInPath inodesInPath, String name) {
  if (name == null) {
    return null;
  }
  final int i = inodesInPath.getSnapshotRootIndex() - 1;
  final INode inode = inodesInPath.getINodes()[i];
  return ((INodeDirectorySnapshottable)inode).getSnapshot(
      DFSUtil.string2Bytes(name)); 
}
项目:hadoop-TCP    文件:FSDirectory.java   
private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
  final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
      INodeId.ROOT_INODE_ID,
      INodeDirectory.ROOT_NAME,
      namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
  final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
  s.setSnapshotQuota(0);
  return s;
}
项目:hadoop-TCP    文件:INodeDirectory.java   
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
    Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException {
  Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
      "this is already an INodeDirectorySnapshottable, this=%s", this);
  final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
  replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this);
  return s;
}
项目:hadoop-TCP    文件:TestSnapshotPathINodes.java   
static Snapshot getSnapshot(INodesInPath inodesInPath, String name) {
  if (name == null) {
    return null;
  }
  final int i = inodesInPath.getSnapshotRootIndex() - 1;
  final INode inode = inodesInPath.getINodes()[i];
  return ((INodeDirectorySnapshottable)inode).getSnapshot(
      DFSUtil.string2Bytes(name)); 
}
项目:hardfs    文件:FSDirectory.java   
private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) {
  final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
      INodeId.ROOT_INODE_ID,
      INodeDirectory.ROOT_NAME,
      namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)));
  final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
  s.setSnapshotQuota(0);
  return s;
}
项目:hardfs    文件:INodeDirectory.java   
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
    Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException {
  Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
      "this is already an INodeDirectorySnapshottable, this=%s", this);
  final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
  replaceSelf(s, inodeMap).saveSelf2Snapshot(latest, this);
  return s;
}
项目:hardfs    文件:TestSnapshotPathINodes.java   
static Snapshot getSnapshot(INodesInPath inodesInPath, String name) {
  if (name == null) {
    return null;
  }
  final int i = inodesInPath.getSnapshotRootIndex() - 1;
  final INode inode = inodesInPath.getINodes()[i];
  return ((INodeDirectorySnapshottable)inode).getSnapshot(
      DFSUtil.string2Bytes(name)); 
}
项目:hadoop-on-lustre2    文件:FSDirectory.java   
private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) {
  final INodeDirectory r = new INodeDirectory(
      INodeId.ROOT_INODE_ID,
      INodeDirectory.ROOT_NAME,
      namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)),
      0L);
  r.addDirectoryWithQuotaFeature(
      DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA,
      DirectoryWithQuotaFeature.DEFAULT_DISKSPACE_QUOTA);
  final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
  s.setSnapshotQuota(0);
  return s;
}
项目:hadoop-on-lustre2    文件:FSDirectory.java   
/**
 * Delete the target directory and collect the blocks under it
 * 
 * @param src Path of a directory to delete
 * @param collectedBlocks Blocks under the deleted directory
 * @param removedINodes INodes that should be removed from {@link #inodeMap}
 * @param logRetryCache Whether to record RPC IDs in editlog to support retry
 *                      cache rebuilding.
 * @return true on successful deletion; else false
 */
boolean delete(String src, BlocksMapUpdateInfo collectedBlocks,
    List<INode> removedINodes, boolean logRetryCache) throws IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
  }
  waitForReady();
  long now = now();
  final long filesRemoved;
  writeLock();
  try {
    final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
        normalizePath(src), false);
    if (!deleteAllowed(inodesInPath, src) ) {
      filesRemoved = -1;
    } else {
      List<INodeDirectorySnapshottable> snapshottableDirs = 
          new ArrayList<INodeDirectorySnapshottable>();
      checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
      filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
          removedINodes, now);
      namesystem.removeSnapshottableDirs(snapshottableDirs);
    }
  } finally {
    writeUnlock();
  }
  if (filesRemoved < 0) {
    return false;
  }
  fsImage.getEditLog().logDelete(src, now, logRetryCache);
  incrDeletedFileCount(filesRemoved);
  // Blocks/INodes will be handled later by the caller of this method
  getFSNamesystem().removePathAndBlocks(src, null, null);
  return true;
}
项目:hadoop-on-lustre2    文件:FSDirectory.java   
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
        Snapshot.CURRENT_STATE_ID);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
项目:hadoop-on-lustre2    文件:INodeDirectory.java   
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
    int latestSnapshotId, final INodeMap inodeMap)
    throws QuotaExceededException {
  Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
      "this is already an INodeDirectorySnapshottable, this=%s", this);
  final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
  replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs()
      .saveSelf2Snapshot(latestSnapshotId, s, this);
  return s;
}
项目:hadoop-on-lustre2    文件:TestSnapshotPathINodes.java   
static Snapshot getSnapshot(INodesInPath inodesInPath, String name) {
  if (name == null) {
    return null;
  }
  final int i = inodesInPath.getSnapshotRootIndex() - 1;
  final INode inode = inodesInPath.getINodes()[i];
  return ((INodeDirectorySnapshottable)inode).getSnapshot(
      DFSUtil.string2Bytes(name)); 
}
项目:hadoop-plus    文件:FSDirectory.java   
/**
 * Delete the target directory and collect the blocks under it
 * 
 * @param src Path of a directory to delete
 * @param collectedBlocks Blocks under the deleted directory
 * @param removedINodes INodes that should be removed from {@link #inodeMap}
 * @param logRetryCache Whether to record RPC IDs in editlog to support retry
 *                      cache rebuilding.
 * @return true on successful deletion; else false
 */
boolean delete(String src, BlocksMapUpdateInfo collectedBlocks,
    List<INode> removedINodes, boolean logRetryCache) throws IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
  }
  waitForReady();
  long now = now();
  final long filesRemoved;
  writeLock();
  try {
    final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
        normalizePath(src), false);
    if (!deleteAllowed(inodesInPath, src) ) {
      filesRemoved = -1;
    } else {
      // Before removing the node, first check if the targetNode is for a
      // snapshottable dir with snapshots, or its descendants have
      // snapshottable dir with snapshots
      final INode targetNode = inodesInPath.getLastINode();
      List<INodeDirectorySnapshottable> snapshottableDirs = 
          new ArrayList<INodeDirectorySnapshottable>();
      checkSnapshot(targetNode, snapshottableDirs);
      filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
          removedINodes, now);
      if (snapshottableDirs.size() > 0) {
        // There are some snapshottable directories without snapshots to be
        // deleted. Need to update the SnapshotManager.
        namesystem.removeSnapshottableDirs(snapshottableDirs);
      }
    }
  } finally {
    writeUnlock();
  }
  if (filesRemoved < 0) {
    return false;
  }
  fsImage.getEditLog().logDelete(src, now, logRetryCache);
  incrDeletedFileCount(filesRemoved);
  // Blocks/INodes will be handled later by the caller of this method
  getFSNamesystem().removePathAndBlocks(src, null, null);
  return true;
}
项目:hadoop-plus    文件:FSNamesystem.java   
/**
 * Remove a list of INodeDirectorySnapshottable from the SnapshotManager
 * @param toRemove the list of INodeDirectorySnapshottable to be removed
 */
void removeSnapshottableDirs(List<INodeDirectorySnapshottable> toRemove) {
  if (snapshotManager != null) {
    snapshotManager.removeSnapshottable(toRemove);
  }
}
项目:hadoop-plus    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    final INodeDirectorySnapshottable snapshottableParent
        = INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
        numSnapshots, in, this);
    if (snapshottableParent.getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(
          snapshottableParent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:hadoop-plus    文件:TestFSImageWithSnapshot.java   
/**
 * Test when there is snapshot taken on root
 */
@Test
public void testSnapshotOnRoot() throws Exception {
  final Path root = new Path("/");
  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();

  INodeDirectorySnapshottable rootNode = 
      (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
  assertTrue("The children list of root should be empty", 
      rootNode.getChildrenList(null).isEmpty());
  // one snapshot on root: s1
  List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  assertEquals("s1", diffList.get(0).getSnapshot().getRoot().getLocalName());

  // check SnapshotManager's snapshottable directory list
  assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
  SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager()
      .getSnapshottableDirListing(null);
  assertEquals(root, sdirs[0].getFullPath());

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
项目:hadoop-TCP    文件:FSDirectory.java   
/**
 * Delete the target directory and collect the blocks under it
 * 
 * @param src Path of a directory to delete
 * @param collectedBlocks Blocks under the deleted directory
 * @param removedINodes INodes that should be removed from {@link #inodeMap}
 * @param logRetryCache Whether to record RPC IDs in editlog to support retry
 *                      cache rebuilding.
 * @return true on successful deletion; else false
 */
boolean delete(String src, BlocksMapUpdateInfo collectedBlocks,
    List<INode> removedINodes, boolean logRetryCache) throws IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
  }
  waitForReady();
  long now = now();
  final long filesRemoved;
  writeLock();
  try {
    final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
        normalizePath(src), false);
    if (!deleteAllowed(inodesInPath, src) ) {
      filesRemoved = -1;
    } else {
      // Before removing the node, first check if the targetNode is for a
      // snapshottable dir with snapshots, or its descendants have
      // snapshottable dir with snapshots
      final INode targetNode = inodesInPath.getLastINode();
      List<INodeDirectorySnapshottable> snapshottableDirs = 
          new ArrayList<INodeDirectorySnapshottable>();
      checkSnapshot(targetNode, snapshottableDirs);
      filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
          removedINodes, now);
      if (snapshottableDirs.size() > 0) {
        // There are some snapshottable directories without snapshots to be
        // deleted. Need to update the SnapshotManager.
        namesystem.removeSnapshottableDirs(snapshottableDirs);
      }
    }
  } finally {
    writeUnlock();
  }
  if (filesRemoved < 0) {
    return false;
  }
  fsImage.getEditLog().logDelete(src, now, logRetryCache);
  incrDeletedFileCount(filesRemoved);
  // Blocks/INodes will be handled later by the caller of this method
  getFSNamesystem().removePathAndBlocks(src, null, null);
  return true;
}
项目:hadoop-TCP    文件:FSNamesystem.java   
/**
 * Remove a list of INodeDirectorySnapshottable from the SnapshotManager
 * @param toRemove the list of INodeDirectorySnapshottable to be removed
 */
void removeSnapshottableDirs(List<INodeDirectorySnapshottable> toRemove) {
  if (snapshotManager != null) {
    snapshotManager.removeSnapshottable(toRemove);
  }
}
项目:hadoop-TCP    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    final INodeDirectorySnapshottable snapshottableParent
        = INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
        numSnapshots, in, this);
    if (snapshottableParent.getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(
          snapshottableParent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:hadoop-TCP    文件:TestFSImageWithSnapshot.java   
/**
 * Test when there is snapshot taken on root
 */
@Test
public void testSnapshotOnRoot() throws Exception {
  final Path root = new Path("/");
  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();

  INodeDirectorySnapshottable rootNode = 
      (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
  assertTrue("The children list of root should be empty", 
      rootNode.getChildrenList(null).isEmpty());
  // one snapshot on root: s1
  List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  assertEquals("s1", diffList.get(0).getSnapshot().getRoot().getLocalName());

  // check SnapshotManager's snapshottable directory list
  assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
  SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager()
      .getSnapshottableDirListing(null);
  assertEquals(root, sdirs[0].getFullPath());

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
项目:hardfs    文件:FSDirectory.java   
/**
 * Delete the target directory and collect the blocks under it
 * 
 * @param src Path of a directory to delete
 * @param collectedBlocks Blocks under the deleted directory
 * @param removedINodes INodes that should be removed from {@link #inodeMap}
 * @param logRetryCache Whether to record RPC IDs in editlog to support retry
 *                      cache rebuilding.
 * @return true on successful deletion; else false
 */
boolean delete(String src, BlocksMapUpdateInfo collectedBlocks,
    List<INode> removedINodes, boolean logRetryCache) throws IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
  }
  waitForReady();
  long now = now();
  final long filesRemoved;
  writeLock();
  try {
    final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
        normalizePath(src), false);
    if (!deleteAllowed(inodesInPath, src) ) {
      filesRemoved = -1;
    } else {
      // Before removing the node, first check if the targetNode is for a
      // snapshottable dir with snapshots, or its descendants have
      // snapshottable dir with snapshots
      final INode targetNode = inodesInPath.getLastINode();
      List<INodeDirectorySnapshottable> snapshottableDirs = 
          new ArrayList<INodeDirectorySnapshottable>();
      checkSnapshot(targetNode, snapshottableDirs);
      filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
          removedINodes, now);
      if (snapshottableDirs.size() > 0) {
        // There are some snapshottable directories without snapshots to be
        // deleted. Need to update the SnapshotManager.
        namesystem.removeSnapshottableDirs(snapshottableDirs);
      }
    }
  } finally {
    writeUnlock();
  }
  if (filesRemoved < 0) {
    return false;
  }
  fsImage.getEditLog().logDelete(src, now, logRetryCache);
  incrDeletedFileCount(filesRemoved);
  // Blocks/INodes will be handled later by the caller of this method
  getFSNamesystem().removePathAndBlocks(src, null, null);
  return true;
}
项目:hardfs    文件:FSNamesystem.java   
/**
 * Remove a list of INodeDirectorySnapshottable from the SnapshotManager
 * @param toRemove the list of INodeDirectorySnapshottable to be removed
 */
void removeSnapshottableDirs(List<INodeDirectorySnapshottable> toRemove) {
  if (snapshotManager != null) {
    snapshotManager.removeSnapshottable(toRemove);
  }
}
项目:hardfs    文件:FSImageFormat.java   
/**
 * Load a directory when snapshot is supported.
 * @param in The {@link DataInput} instance to read.
 * @param counter Counter to increment for namenode startup progress
 */
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
    throws IOException {
  // Step 1. Identify the parent INode
  long inodeId = in.readLong();
  final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
      .asDirectory();

  // Check if the whole subtree has been saved (for reference nodes)
  boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
  if (!toLoadSubtree) {
    return;
  }

  // Step 2. Load snapshots if parent is snapshottable
  int numSnapshots = in.readInt();
  if (numSnapshots >= 0) {
    final INodeDirectorySnapshottable snapshottableParent
        = INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
    // load snapshots and snapshotQuota
    SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
        numSnapshots, in, this);
    if (snapshottableParent.getSnapshotQuota() > 0) {
      // add the directory to the snapshottable directory list in 
      // SnapshotManager. Note that we only add root when its snapshot quota
      // is positive.
      this.namesystem.getSnapshotManager().addSnapshottable(
          snapshottableParent);
    }
  }

  // Step 3. Load children nodes under parent
  loadChildren(parent, in, counter);

  // Step 4. load Directory Diff List
  SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);

  // Recursively load sub-directories, including snapshot copies of deleted
  // directories
  int numSubTree = in.readInt();
  for (int i = 0; i < numSubTree; i++) {
    loadDirectoryWithSnapshot(in, counter);
  }
}
项目:hardfs    文件:TestFSImageWithSnapshot.java   
/**
 * Test when there is snapshot taken on root
 */
@Test
public void testSnapshotOnRoot() throws Exception {
  final Path root = new Path("/");
  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();

  INodeDirectorySnapshottable rootNode = 
      (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
  assertTrue("The children list of root should be empty", 
      rootNode.getChildrenList(null).isEmpty());
  // one snapshot on root: s1
  List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  assertEquals("s1", diffList.get(0).getSnapshot().getRoot().getLocalName());

  // check SnapshotManager's snapshottable directory list
  assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
  SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager()
      .getSnapshottableDirListing(null);
  assertEquals(root, sdirs[0].getFullPath());

  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}