Java 类org.apache.hadoop.hdfs.util.ChunkedArrayList 实例源码

项目:FlexMap    文件:FSNamesystem.java   
/**
 * Remove a file/directory from the namespace.
 * <p>
 * For large directories, deletion is incremental. The blocks under
 * the directory are collected and deleted a small number at a time holding
 * the {@link FSNamesystem} lock.
 * <p>
 * For small directory or file the deletion is done in one shot.
 * 
 * @see ClientProtocol#delete(String, boolean) for description of exceptions
 */
private boolean deleteInternal(String src, boolean recursive,
    boolean enforcePermission, boolean logRetryCache)
    throws AccessControlException, SafeModeException, UnresolvedLinkException,
           IOException {
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  List<INode> removedINodes = new ChunkedArrayList<INode>();
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.WRITE);
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  boolean ret = false;

  waitForLoadingFSImage();
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot delete " + src);
    src = resolvePath(src, pathComponents);
    if (!recursive && dir.isNonEmptyDirectory(src)) {
      throw new PathIsNotEmptyDirectoryException(src + " is non empty");
    }
    if (enforcePermission && isPermissionEnabled) {
      checkPermission(pc, src, false, null, FsAction.WRITE, null,
          FsAction.ALL, true, false);
    }

    long mtime = now();
    // Unlink the target directory from directory tree
    long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
            mtime);
    if (filesRemoved < 0) {
      return false;
    }
    getEditLog().logDelete(src, mtime, logRetryCache);
    incrDeletedFileCount(filesRemoved);
    // Blocks/INodes will be handled later
    removePathAndBlocks(src, null, removedINodes, true);
    ret = true;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync(); 
  removeBlocks(collectedBlocks); // Incremental deletion of blocks
  collectedBlocks.clear();

  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
      + src +" is removed");
  }
  return ret;
}
项目:FlexMap    文件:FSNamesystem.java   
/**
 * Delete a snapshot of a snapshottable directory
 * @param snapshotRoot The snapshottable directory
 * @param snapshotName The name of the to-be-deleted snapshot
 * @throws SafeModeException
 * @throws IOException
 */
void deleteSnapshot(String snapshotRoot, String snapshotName)
    throws SafeModeException, IOException {
  checkOperation(OperationCategory.WRITE);
  final FSPermissionChecker pc = getPermissionChecker();

  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }
  boolean success = false;
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
    if (isPermissionEnabled) {
      checkOwner(pc, snapshotRoot);
    }

    List<INode> removedINodes = new ChunkedArrayList<INode>();
    dir.writeLock();
    try {
      snapshotManager.deleteSnapshot(snapshotRoot, snapshotName,
          collectedBlocks, removedINodes);
      dir.removeFromInodeMap(removedINodes);
    } finally {
      dir.writeUnlock();
    }
    removedINodes.clear();
    getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
        cacheEntry != null);
    success = true;
  } finally {
    writeUnlock();
    RetryCache.setState(cacheEntry, success);
  }
  getEditLog().logSync();

  removeBlocks(collectedBlocks);
  collectedBlocks.clear();

  if (auditLog.isInfoEnabled() && isExternalInvocation()) {
    String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
    logAuditEvent(true, "deleteSnapshot", rootPath, null, null);
  }
}
项目:FlexMap    文件:INode.java   
public BlocksMapUpdateInfo() {
  toDeleteList = new ChunkedArrayList<Block>();
}
项目:hadoop-on-lustre2    文件:FSNamesystem.java   
/**
 * Remove a file/directory from the namespace.
 * <p>
 * For large directories, deletion is incremental. The blocks under
 * the directory are collected and deleted a small number at a time holding
 * the {@link FSNamesystem} lock.
 * <p>
 * For small directory or file the deletion is done in one shot.
 * 
 * @see ClientProtocol#delete(String, boolean) for description of exceptions
 */
private boolean deleteInternal(String src, boolean recursive,
    boolean enforcePermission, boolean logRetryCache)
    throws AccessControlException, SafeModeException, UnresolvedLinkException,
           IOException {
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  List<INode> removedINodes = new ChunkedArrayList<INode>();
  FSPermissionChecker pc = getPermissionChecker();
  checkOperation(OperationCategory.WRITE);
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  boolean ret = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot delete " + src);
    src = FSDirectory.resolvePath(src, pathComponents, dir);
    if (!recursive && dir.isNonEmptyDirectory(src)) {
      throw new IOException(src + " is non empty");
    }
    if (enforcePermission && isPermissionEnabled) {
      checkPermission(pc, src, false, null, FsAction.WRITE, null,
          FsAction.ALL, false);
    }
    // Unlink the target directory from directory tree
    if (!dir.delete(src, collectedBlocks, removedINodes, logRetryCache)) {
      return false;
    }
    ret = true;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync(); 
  removeBlocks(collectedBlocks); // Incremental deletion of blocks
  collectedBlocks.clear();
  dir.writeLock();
  try {
    dir.removeFromInodeMap(removedINodes);
  } finally {
    dir.writeUnlock();
  }
  removedINodes.clear();
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
      + src +" is removed");
  }
  return ret;
}
项目:hadoop-on-lustre2    文件:FSNamesystem.java   
/**
 * Delete a snapshot of a snapshottable directory
 * @param snapshotRoot The snapshottable directory
 * @param snapshotName The name of the to-be-deleted snapshot
 * @throws SafeModeException
 * @throws IOException
 */
void deleteSnapshot(String snapshotRoot, String snapshotName)
    throws SafeModeException, IOException {
  checkOperation(OperationCategory.WRITE);
  final FSPermissionChecker pc = getPermissionChecker();

  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }
  boolean success = false;
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
    if (isPermissionEnabled) {
      checkOwner(pc, snapshotRoot);
    }

    List<INode> removedINodes = new ChunkedArrayList<INode>();
    dir.writeLock();
    try {
      snapshotManager.deleteSnapshot(snapshotRoot, snapshotName,
          collectedBlocks, removedINodes);
      dir.removeFromInodeMap(removedINodes);
    } finally {
      dir.writeUnlock();
    }
    removedINodes.clear();
    getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
        cacheEntry != null);
    success = true;
  } finally {
    writeUnlock();
    RetryCache.setState(cacheEntry, success);
  }
  getEditLog().logSync();

  removeBlocks(collectedBlocks);
  collectedBlocks.clear();

  if (auditLog.isInfoEnabled() && isExternalInvocation()) {
    String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
    logAuditEvent(true, "deleteSnapshot", rootPath, null, null);
  }
}
项目:hadoop-on-lustre2    文件:INode.java   
public BlocksMapUpdateInfo() {
  toDeleteList = new ChunkedArrayList<Block>();
}