Java 类org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo 实例源码

项目:aliyun-oss-hadoop-fs    文件:FSDirDeleteOp.java   
/**
 * Remove a file/directory from the namespace.
 * <p>
 * For large directories, deletion is incremental. The blocks under
 * the directory are collected and deleted a small number at a time holding
 * the {@link FSNamesystem} lock.
 * <p>
 * For small directory or file the deletion is done in one shot.
 *
 * @param fsn namespace
 * @param src path name to be deleted
 * @param recursive boolean true to apply to all sub-directories recursively
 * @param logRetryCache whether to record RPC ids in editlog for retry cache
 *          rebuilding
 * @return blocks collected from the deleted path
 * @throws IOException
 */
static BlocksMapUpdateInfo delete(
    FSNamesystem fsn, String src, boolean recursive, boolean logRetryCache)
    throws IOException {
  FSDirectory fsd = fsn.getFSDirectory();
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);

  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath4Write(src, false);
  if (!recursive && fsd.isNonEmptyDirectory(iip)) {
    throw new PathIsNotEmptyDirectoryException(src + " is non empty");
  }
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
                        FsAction.ALL, true);
  }
  if (recursive && fsd.isNonEmptyDirectory(iip)) {
    checkProtectedDescendants(fsd, fsd.normalizePath(src));
  }

  return deleteInternal(fsn, src, iip, logRetryCache);
}
项目:hadoop    文件:FSDirDeleteOp.java   
/**
 * Delete the target directory and collect the blocks under it
 *
 * @param iip the INodesInPath instance containing all the INodes for the path
 * @param collectedBlocks Blocks under the deleted directory
 * @param removedINodes INodes that should be removed from inodeMap
 * @return the number of files that have been removed
 */
static long delete(
    FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
    List<INode> removedINodes, long mtime) throws IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath());
  }
  final long filesRemoved;
  fsd.writeLock();
  try {
    if (!deleteAllowed(iip, iip.getPath()) ) {
      filesRemoved = -1;
    } else {
      List<INodeDirectory> snapshottableDirs = new ArrayList<>();
      FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
      filesRemoved = unprotectedDelete(fsd, iip, collectedBlocks,
                                       removedINodes, mtime);
      fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
    }
  } finally {
    fsd.writeUnlock();
  }
  return filesRemoved;
}
项目:hadoop    文件:FSDirDeleteOp.java   
/**
 * Remove a file/directory from the namespace.
 * <p>
 * For large directories, deletion is incremental. The blocks under
 * the directory are collected and deleted a small number at a time holding
 * the {@link FSNamesystem} lock.
 * <p>
 * For small directory or file the deletion is done in one shot.
 *
 */
static BlocksMapUpdateInfo delete(
    FSNamesystem fsn, String src, boolean recursive, boolean logRetryCache)
    throws IOException {
  FSDirectory fsd = fsn.getFSDirectory();
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);

  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath4Write(src, false);
  if (!recursive && fsd.isNonEmptyDirectory(iip)) {
    throw new PathIsNotEmptyDirectoryException(src + " is non empty");
  }
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
                        FsAction.ALL, true);
  }

  return deleteInternal(fsn, src, iip, logRetryCache);
}
项目:hadoop    文件:FSDirectory.java   
/**
 * Truncate has the following properties:
 * 1.) Any block deletions occur now.
 * 2.) INode length is truncated now – new clients can only read up to
 * the truncated length.
 * 3.) INode will be set to UC and lastBlock set to UNDER_RECOVERY.
 * 4.) NN will trigger DN truncation recovery and waits for DNs to report.
 * 5.) File is considered UNDER_RECOVERY until truncation recovery completes.
 * 6.) Soft and hard Lease expiration require truncation recovery to complete.
 *
 * @return true if on the block boundary or false if recovery is need
 */
boolean unprotectedTruncate(INodesInPath iip, long newLength,
                            BlocksMapUpdateInfo collectedBlocks,
                            long mtime, QuotaCounts delta) throws IOException {
  assert hasWriteLock();
  INodeFile file = iip.getLastINode().asFile();
  int latestSnapshot = iip.getLatestSnapshotId();
  file.recordModification(latestSnapshot, true);

  verifyQuotaForTruncate(iip, file, newLength, delta);

  long remainingLength =
      file.collectBlocksBeyondMax(newLength, collectedBlocks);
  file.excludeSnapshotBlocks(latestSnapshot, collectedBlocks);
  file.setModificationTime(mtime);
  // return whether on a block boundary
  return (remainingLength - newLength) == 0;
}
项目:hadoop    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    filesDeleted = true;
  } else {
    filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
        .getNameSpace() >= 0;
  }
  fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
  return filesDeleted;
}
项目:hadoop    文件:FileWithSnapshotFeature.java   
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:hadoop    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
/** clear the created list */
private QuotaCounts destroyCreatedList(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(bsps, counts, true);
    c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final QuotaCounts counts = new QuotaCounts.Builder().build();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(bsps, counts, false);
        inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirTruncateOp.java   
/**
 * Truncate has the following properties:
 * 1.) Any block deletions occur now.
 * 2.) INode length is truncated now - new clients can only read up to
 *     the truncated length.
 * 3.) INode will be set to UC and lastBlock set to UNDER_RECOVERY.
 * 4.) NN will trigger DN truncation recovery and waits for DNs to report.
 * 5.) File is considered UNDER_RECOVERY until truncation recovery
 *     completes.
 * 6.) Soft and hard Lease expiration require truncation recovery to
 *     complete.
 *
 * @return true if on the block boundary or false if recovery is need
 */
private static boolean unprotectedTruncate(FSNamesystem fsn,
    INodesInPath iip, long newLength, BlocksMapUpdateInfo collectedBlocks,
    long mtime, QuotaCounts delta) throws IOException {
  assert fsn.hasWriteLock();

  INodeFile file = iip.getLastINode().asFile();
  int latestSnapshot = iip.getLatestSnapshotId();
  file.recordModification(latestSnapshot, true);

  verifyQuotaForTruncate(fsn, iip, file, newLength, delta);

  long remainingLength =
      file.collectBlocksBeyondMax(newLength, collectedBlocks);
  file.excludeSnapshotBlocks(latestSnapshot, collectedBlocks);
  file.setModificationTime(mtime);
  // return whether on a block boundary
  return (remainingLength - newLength) == 0;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  List<Long> removedUCFiles = new ChunkedArrayList<>();
  INode.ReclaimContext context = new INode.ReclaimContext(
      bsps, collectedBlocks, removedINodes, removedUCFiles);
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(context);
    filesDeleted = true;
  } else {
    oldDstChild.cleanSubtree(context, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId());
    filesDeleted = context.quotaDelta().getNsDelta() >= 0;
  }
  fsd.updateReplicationFactor(context.collectedBlocks()
                                  .toUpdateReplicationInfo());

  fsd.getFSNamesystem().removeLeasesAndINodes(
      removedUCFiles, removedINodes, false);
  return filesDeleted;
}
项目:aliyun-oss-hadoop-fs    文件:FSNamesystem.java   
/**
 * Remove the indicated file from namespace.
 * 
 * @see ClientProtocol#delete(String, boolean) for detailed description and 
 * description of exceptions
 */
boolean delete(String src, boolean recursive, boolean logRetryCache)
    throws IOException {
  BlocksMapUpdateInfo toRemovedBlocks = null;
  writeLock();
  boolean ret = false;
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot delete " + src);
    toRemovedBlocks = FSDirDeleteOp.delete(
        this, src, recursive, logRetryCache);
    ret = toRemovedBlocks != null;
  } catch (AccessControlException e) {
    logAuditEvent(false, "delete", src);
    throw e;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  if (toRemovedBlocks != null) {
    removeBlocks(toRemovedBlocks); // Incremental deletion of blocks
  }
  logAuditEvent(true, "delete", src);
  return ret;
}
项目:big-c    文件:FSDirDeleteOp.java   
/**
 * Delete the target directory and collect the blocks under it
 *
 * @param iip the INodesInPath instance containing all the INodes for the path
 * @param collectedBlocks Blocks under the deleted directory
 * @param removedINodes INodes that should be removed from inodeMap
 * @return the number of files that have been removed
 */
static long delete(
    FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
    List<INode> removedINodes, long mtime) throws IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath());
  }
  final long filesRemoved;
  fsd.writeLock();
  try {
    if (!deleteAllowed(iip, iip.getPath()) ) {
      filesRemoved = -1;
    } else {
      List<INodeDirectory> snapshottableDirs = new ArrayList<>();
      FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
      filesRemoved = unprotectedDelete(fsd, iip, collectedBlocks,
                                       removedINodes, mtime);
      fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
    }
  } finally {
    fsd.writeUnlock();
  }
  return filesRemoved;
}
项目:big-c    文件:FSDirDeleteOp.java   
/**
 * Remove a file/directory from the namespace.
 * <p>
 * For large directories, deletion is incremental. The blocks under
 * the directory are collected and deleted a small number at a time holding
 * the {@link FSNamesystem} lock.
 * <p>
 * For small directory or file the deletion is done in one shot.
 *
 */
static BlocksMapUpdateInfo delete(
    FSNamesystem fsn, String src, boolean recursive, boolean logRetryCache)
    throws IOException {
  FSDirectory fsd = fsn.getFSDirectory();
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);

  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath4Write(src, false);
  if (!recursive && fsd.isNonEmptyDirectory(iip)) {
    throw new PathIsNotEmptyDirectoryException(src + " is non empty");
  }
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
                        FsAction.ALL, true);
  }

  return deleteInternal(fsn, src, iip, logRetryCache);
}
项目:big-c    文件:FSDirectory.java   
/**
 * Truncate has the following properties:
 * 1.) Any block deletions occur now.
 * 2.) INode length is truncated now – new clients can only read up to
 * the truncated length.
 * 3.) INode will be set to UC and lastBlock set to UNDER_RECOVERY.
 * 4.) NN will trigger DN truncation recovery and waits for DNs to report.
 * 5.) File is considered UNDER_RECOVERY until truncation recovery completes.
 * 6.) Soft and hard Lease expiration require truncation recovery to complete.
 *
 * @return true if on the block boundary or false if recovery is need
 */
boolean unprotectedTruncate(INodesInPath iip, long newLength,
                            BlocksMapUpdateInfo collectedBlocks,
                            long mtime, QuotaCounts delta) throws IOException {
  assert hasWriteLock();
  INodeFile file = iip.getLastINode().asFile();
  int latestSnapshot = iip.getLatestSnapshotId();
  file.recordModification(latestSnapshot, true);

  verifyQuotaForTruncate(iip, file, newLength, delta);

  long remainingLength =
      file.collectBlocksBeyondMax(newLength, collectedBlocks);
  file.excludeSnapshotBlocks(latestSnapshot, collectedBlocks);
  file.setModificationTime(mtime);
  // return whether on a block boundary
  return (remainingLength - newLength) == 0;
}
项目:big-c    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    filesDeleted = true;
  } else {
    filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
        .getNameSpace() >= 0;
  }
  fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
  return filesDeleted;
}
项目:big-c    文件:FileWithSnapshotFeature.java   
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:big-c    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
项目:big-c    文件:DirectoryWithSnapshotFeature.java   
/** clear the created list */
private QuotaCounts destroyCreatedList(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(bsps, counts, true);
    c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
项目:big-c    文件:DirectoryWithSnapshotFeature.java   
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final QuotaCounts counts = new QuotaCounts.Builder().build();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(bsps, counts, false);
        inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSDirectory.java   
/**
 * @see #unprotectedRenameTo(String, String, long, Options.Rename...)
 */
void renameTo(String src, String dst, long mtime,
    BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
    throws FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, QuotaExceededException,
    UnresolvedLinkException, IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src
        + " to " + dst);
  }
  writeLock();
  try {
    if (unprotectedRenameTo(src, dst, mtime, collectedBlocks, options)) {
      namesystem.incrDeletedFileCount(1);
    }
  } finally {
    writeUnlock();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSDirectory.java   
/**
 * Delete the target directory and collect the blocks under it
 * 
 * @param src Path of a directory to delete
 * @param collectedBlocks Blocks under the deleted directory
 * @param removedINodes INodes that should be removed from {@link #inodeMap}
 * @return the number of files that have been removed
 */
long delete(String src, BlocksMapUpdateInfo collectedBlocks,
            List<INode> removedINodes, long mtime) throws IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
  }
  final long filesRemoved;
  writeLock();
  try {
    final INodesInPath inodesInPath = getINodesInPath4Write(
        normalizePath(src), false);
    if (!deleteAllowed(inodesInPath, src) ) {
      filesRemoved = -1;
    } else {
      List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
      checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
      filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
          removedINodes, mtime);
      namesystem.removeSnapshottableDirs(snapshottableDirs);
    }
  } finally {
    writeUnlock();
  }
  return filesRemoved;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
private void renameToInternal(FSPermissionChecker pc, String src, 
    String dst, boolean logRetryCache, BlocksMapUpdateInfo collectedBlocks, 
    Options.Rename... options) throws IOException {
  assert hasWriteLock();
  if (isPermissionEnabled) {
    // Rename does not operates on link targets
    // Do not resolveLink when checking permissions of src and dst
    // Check write access to parent of src
    checkPermission(pc, src, false, null, FsAction.WRITE, null, null, false,
        false);
    // Check write access to ancestor of dst
    checkPermission(pc, dst, false, FsAction.WRITE, null, null, null, false,
        false);
  }

  waitForLoadingFSImage();
  long mtime = now();
  dir.renameTo(src, dst, mtime, collectedBlocks, options);
  getEditLog().logRename(src, dst, mtime, logRetryCache, options);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
/**
 * Remove leases, inodes and blocks related to a given path
 * @param src The given path
 * @param blocks Containing the list of blocks to be deleted from blocksMap
 * @param removedINodes Containing the list of inodes to be removed from 
 *                      inodesMap
 * @param acquireINodeMapLock Whether to acquire the lock for inode removal
 */
void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks,
    List<INode> removedINodes, final boolean acquireINodeMapLock) {
  assert hasWriteLock();
  leaseManager.removeLeaseWithPrefixPath(src);
  // remove inodes from inodesMap
  if (removedINodes != null) {
    if (acquireINodeMapLock) {
      dir.writeLock();
    }
    try {
      dir.removeFromInodeMap(removedINodes);
    } finally {
      if (acquireINodeMapLock) {
        dir.writeUnlock();
      }
    }
    removedINodes.clear();
  }
  if (blocks == null) {
    return;
  }

  removeBlocksAndUpdateSafemodeTotal(blocks);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileWithSnapshotFeature.java   
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(file, collectedBlocks, removedINodes);
    return Quota.Counts.newInstance();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileWithSnapshotFeature.java   
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
    FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  long oldDiskspace = file.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = file.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = file.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
    }
  }

  collectBlocksAndClear(file, collectedBlocks, removedINodes);

  long dsDelta = oldDiskspace - file.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
private void collectBlocksAndClear(final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  if (isCurrentFileDeleted()) {
    final FileDiff last = getDiffs().getLast();
    max = last == null? 0: last.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  collectBlocksBeyondMax(file, max, info);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DirectoryWithSnapshotFeature.java   
@Override
Quota.Counts combinePosteriorAndCollectBlocks(
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final Quota.Counts counts = Quota.Counts.newInstance();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(counts, false);
        inode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
项目:hadoop-plus    文件:FSNamesystem.java   
/**
 * From the given list, incrementally remove the blocks from blockManager
 * Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to
 * ensure that other waiters on the lock can get in. See HDFS-2938
 * 
 * @param blocks
 *          An instance of {@link BlocksMapUpdateInfo} which contains a list
 *          of blocks that need to be removed from blocksMap
 */
void removeBlocks(BlocksMapUpdateInfo blocks) {
  int start = 0;
  int end = 0;
  List<Block> toDeleteList = blocks.getToDeleteList();
  while (start < toDeleteList.size()) {
    end = BLOCK_DELETION_INCREMENT + start;
    end = end > toDeleteList.size() ? toDeleteList.size() : end;
    writeLock();
    try {
      for (int i = start; i < end; i++) {
        blockManager.removeBlock(toDeleteList.get(i));
      }
    } finally {
      writeUnlock();
    }
    start = end;
  }
}
项目:hadoop-plus    文件:FileWithSnapshot.java   
private static Quota.Counts updateQuotaAndCollectBlocks(
    INodeFile currentINode, FileDiff removed,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
  long oldDiskspace = currentINode.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = currentINode.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = currentINode.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
          * replication;
    }
  }

  Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);

  long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
项目:hadoop-plus    文件:FileWithSnapshot.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
static void collectBlocksAndClear(final FileWithSnapshot file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (file.isCurrentFileDeleted()
      && file.getDiffs().asList().isEmpty()) {
    file.asINodeFile().destroyAndCollectBlocks(info, removedINodes);
    return;
  }

  // find max file size.
  final long max;
  if (file.isCurrentFileDeleted()) {
    final FileDiff last = file.getDiffs().getLast();
    max = last == null? 0: last.fileSize;
  } else { 
    max = file.asINodeFile().computeFileSize();
  }

  collectBlocksBeyondMax(file, max, info);
}
项目:FlexMap    文件:FSDirectory.java   
/**
 * @see #unprotectedRenameTo(String, String, long, Options.Rename...)
 */
void renameTo(String src, String dst, long mtime,
    BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
    throws FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, QuotaExceededException,
    UnresolvedLinkException, IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src
        + " to " + dst);
  }
  writeLock();
  try {
    if (unprotectedRenameTo(src, dst, mtime, collectedBlocks, options)) {
      namesystem.incrDeletedFileCount(1);
    }
  } finally {
    writeUnlock();
  }
}
项目:FlexMap    文件:FSDirectory.java   
/**
 * Delete the target directory and collect the blocks under it
 * 
 * @param src Path of a directory to delete
 * @param collectedBlocks Blocks under the deleted directory
 * @param removedINodes INodes that should be removed from {@link #inodeMap}
 * @return the number of files that have been removed
 */
long delete(String src, BlocksMapUpdateInfo collectedBlocks,
            List<INode> removedINodes, long mtime) throws IOException {
  if (NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
  }
  final long filesRemoved;
  writeLock();
  try {
    final INodesInPath inodesInPath = getINodesInPath4Write(
        normalizePath(src), false);
    if (!deleteAllowed(inodesInPath, src) ) {
      filesRemoved = -1;
    } else {
      List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
      checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
      filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
          removedINodes, mtime);
      namesystem.removeSnapshottableDirs(snapshottableDirs);
    }
  } finally {
    writeUnlock();
  }
  return filesRemoved;
}
项目:FlexMap    文件:FSNamesystem.java   
private void renameToInternal(FSPermissionChecker pc, String src, 
    String dst, boolean logRetryCache, BlocksMapUpdateInfo collectedBlocks, 
    Options.Rename... options) throws IOException {
  assert hasWriteLock();
  if (isPermissionEnabled) {
    // Rename does not operates on link targets
    // Do not resolveLink when checking permissions of src and dst
    // Check write access to parent of src
    checkPermission(pc, src, false, null, FsAction.WRITE, null, null, false,
        false);
    // Check write access to ancestor of dst
    checkPermission(pc, dst, false, FsAction.WRITE, null, null, null, false,
        false);
  }

  waitForLoadingFSImage();
  long mtime = now();
  dir.renameTo(src, dst, mtime, collectedBlocks, options);
  getEditLog().logRename(src, dst, mtime, logRetryCache, options);
}
项目:FlexMap    文件:FSNamesystem.java   
/**
 * Remove leases, inodes and blocks related to a given path
 * @param src The given path
 * @param blocks Containing the list of blocks to be deleted from blocksMap
 * @param removedINodes Containing the list of inodes to be removed from 
 *                      inodesMap
 * @param acquireINodeMapLock Whether to acquire the lock for inode removal
 */
void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks,
    List<INode> removedINodes, final boolean acquireINodeMapLock) {
  assert hasWriteLock();
  leaseManager.removeLeaseWithPrefixPath(src);
  // remove inodes from inodesMap
  if (removedINodes != null) {
    if (acquireINodeMapLock) {
      dir.writeLock();
    }
    try {
      dir.removeFromInodeMap(removedINodes);
    } finally {
      if (acquireINodeMapLock) {
        dir.writeUnlock();
      }
    }
    removedINodes.clear();
  }
  if (blocks == null) {
    return;
  }

  removeBlocksAndUpdateSafemodeTotal(blocks);
}
项目:FlexMap    文件:FileWithSnapshotFeature.java   
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final boolean countDiffChange)
    throws QuotaExceededException {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(file, collectedBlocks, removedINodes);
    return Quota.Counts.newInstance();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes, countDiffChange);
  }
}
项目:FlexMap    文件:FileWithSnapshotFeature.java   
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
    FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  long oldDiskspace = file.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = file.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = file.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
    }
  }

  collectBlocksAndClear(file, collectedBlocks, removedINodes);

  long dsDelta = oldDiskspace - file.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
项目:FlexMap    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
private void collectBlocksAndClear(final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  if (isCurrentFileDeleted()) {
    final FileDiff last = getDiffs().getLast();
    max = last == null? 0: last.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  collectBlocksBeyondMax(file, max, info);
}
项目:FlexMap    文件:DirectoryWithSnapshotFeature.java   
@Override
Quota.Counts combinePosteriorAndCollectBlocks(
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final Quota.Counts counts = Quota.Counts.newInstance();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(counts, false);
        inode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
项目:hadoop-TCP    文件:FSNamesystem.java   
/**
 * From the given list, incrementally remove the blocks from blockManager
 * Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to
 * ensure that other waiters on the lock can get in. See HDFS-2938
 * 
 * @param blocks
 *          An instance of {@link BlocksMapUpdateInfo} which contains a list
 *          of blocks that need to be removed from blocksMap
 */
void removeBlocks(BlocksMapUpdateInfo blocks) {
  int start = 0;
  int end = 0;
  List<Block> toDeleteList = blocks.getToDeleteList();
  while (start < toDeleteList.size()) {
    end = BLOCK_DELETION_INCREMENT + start;
    end = end > toDeleteList.size() ? toDeleteList.size() : end;
    writeLock();
    try {
      for (int i = start; i < end; i++) {
        blockManager.removeBlock(toDeleteList.get(i));
      }
    } finally {
      writeUnlock();
    }
    start = end;
  }
}