Java 类org.apache.hadoop.hdfs.server.namenode.QuotaCounts 实例源码

项目:hadoop    文件:FileWithSnapshotFeature.java   
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
/** clear the created list */
private QuotaCounts destroyCreatedList(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(bsps, counts, true);
    c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final QuotaCounts counts = new QuotaCounts.Builder().build();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(bsps, counts, false);
        inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
项目:hadoop    文件:TestSnapshotDeletion.java   
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INodeDirectory dirNode = getDir(fsdir, dirPath);
  assertTrue(dirNode.isQuotaSet());
  QuotaCounts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      q.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      q.getStorageSpace());
  QuotaCounts counts = new QuotaCounts.Builder().build();
  dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), counts, false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.getStorageSpace());
}
项目:aliyun-oss-hadoop-fs    文件:FileWithSnapshotFeature.java   
public void cleanFile(INode.ReclaimContext reclaimContext,
    final INodeFile file, final int snapshotId, int priorSnapshotId,
    byte storagePolicyId) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
        .getPolicy(storagePolicyId);
    QuotaCounts old = file.storagespaceConsumed(policy);
    collectBlocksAndClear(reclaimContext, file);
    QuotaCounts current = file.storagespaceConsumed(policy);
    reclaimContext.quotaDelta().add(old.subtract(current));
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
        file);
  }
}
项目:big-c    文件:FileWithSnapshotFeature.java   
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:big-c    文件:DirectoryWithSnapshotFeature.java   
/** clear the created list */
private QuotaCounts destroyCreatedList(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(bsps, counts, true);
    c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
项目:big-c    文件:DirectoryWithSnapshotFeature.java   
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final QuotaCounts counts = new QuotaCounts.Builder().build();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(bsps, counts, false);
        inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
项目:big-c    文件:TestSnapshotDeletion.java   
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INodeDirectory dirNode = getDir(fsdir, dirPath);
  assertTrue(dirNode.isQuotaSet());
  QuotaCounts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      q.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      q.getStorageSpace());
  QuotaCounts counts = new QuotaCounts.Builder().build();
  dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), counts, false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.getStorageSpace());
}
项目:hadoop    文件:FileDiff.java   
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    BlockStoragePolicySuite bsps, INodeFile currentINode,
    FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
  assert sf != null : "FileWithSnapshotFeature is null";
  return sf.updateQuotaAndCollectBlocks(
      bsps, currentINode, posterior, collectedBlocks, removedINodes);
}
项目:hadoop    文件:FileDiff.java   
@Override
QuotaCounts destroyDiffAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile currentINode,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  return currentINode.getFileWithSnapshotFeature()
      .updateQuotaAndCollectBlocks(bsps, currentINode, this, collectedBlocks,
          removedINodes);
}
项目:hadoop    文件:AbstractINodeDiffList.java   
/**
 * Delete a snapshot. The synchronization of the diff list will be done 
 * outside. If the diff to remove is not the first one in the diff list, we 
 * need to combine the diff with its previous one.
 * 
 * @param snapshot The id of the snapshot to be deleted
 * @param prior The id of the snapshot taken before the to-be-deleted snapshot
 * @param collectedBlocks Used to collect information for blocksMap update
 * @return delta in namespace. 
 */
public final QuotaCounts deleteSnapshotDiff(BlockStoragePolicySuite bsps,
    final int snapshot,
    final int prior, final N currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  int snapshotIndex = Collections.binarySearch(diffs, snapshot);

  QuotaCounts counts = new QuotaCounts.Builder().build();
  D removed = null;
  if (snapshotIndex == 0) {
    if (prior != Snapshot.NO_SNAPSHOT_ID) { // there is still snapshot before
      // set the snapshot to latestBefore
      diffs.get(snapshotIndex).setSnapshotId(prior);
    } else { // there is no snapshot before
      removed = diffs.remove(0);
      counts.add(removed.destroyDiffAndCollectBlocks(bsps, currentINode,
          collectedBlocks, removedINodes));
    }
  } else if (snapshotIndex > 0) {
    final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
    if (previous.getSnapshotId() != prior) {
      diffs.get(snapshotIndex).setSnapshotId(prior);
    } else {
      // combine the to-be-removed diff with its previous diff
      removed = diffs.remove(snapshotIndex);
      if (previous.snapshotINode == null) {
        previous.snapshotINode = removed.snapshotINode;
      }

      counts.add(previous.combinePosteriorAndCollectBlocks(
          bsps, currentINode, removed, collectedBlocks, removedINodes));
      previous.setPosterior(removed.getPosterior());
      removed.setPosterior(null);
    }
  }
  return counts;
}
项目:hadoop    文件:DirectorySnapshottableFeature.java   
/**
 * Remove the snapshot with the given name from {@link #snapshotsByNames},
 * and delete all the corresponding DirectoryDiff.
 *
 * @param snapshotRoot The directory where we take snapshots
 * @param snapshotName The name of the snapshot to be removed
 * @param collectedBlocks Used to collect information to update blocksMap
 * @return The removed snapshot. Null if no snapshot with the given name
 *         exists.
 */
public Snapshot removeSnapshot(BlockStoragePolicySuite bsps, INodeDirectory snapshotRoot,
    String snapshotName, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) throws SnapshotException {
  final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
  if (i < 0) {
    throw new SnapshotException("Cannot delete snapshot " + snapshotName
        + " from path " + snapshotRoot.getFullPathName()
        + ": the snapshot does not exist.");
  } else {
    final Snapshot snapshot = snapshotsByNames.get(i);
    int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
    try {
      QuotaCounts counts = snapshotRoot.cleanSubtree(bsps, snapshot.getId(),
          prior, collectedBlocks, removedINodes);
      INodeDirectory parent = snapshotRoot.getParent();
      if (parent != null) {
        // there will not be any WithName node corresponding to the deleted
        // snapshot, thus only update the quota usage in the current tree
        parent.addSpaceConsumed(counts.negation(), true);
      }
    } catch(QuotaExceededException e) {
      INode.LOG.error("BUG: removeSnapshot increases namespace usage.", e);
    }
    // remove from snapshotsByNames after successfully cleaning the subtree
    snapshotsByNames.remove(i);
    return snapshot;
  }
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
/** clear the deleted list */
private QuotaCounts destroyDeletedList(
    final BlockStoragePolicySuite bsps,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> deletedList = getList(ListType.DELETED);
  for (INode d : deletedList) {
    d.computeQuotaUsage(bsps, counts, false);
    d.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
  }
  deletedList.clear();
  return counts;
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
@Override
QuotaCounts destroyDiffAndCollectBlocks(
    BlockStoragePolicySuite bsps, INodeDirectory currentINode,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  // this diff has been deleted
  QuotaCounts counts = new QuotaCounts.Builder().build();
  counts.add(diff.destroyDeletedList(bsps, collectedBlocks, removedINodes));
  INodeDirectoryAttributes snapshotINode = getSnapshotINode();
  if (snapshotINode != null && snapshotINode.getAclFeature() != null) {
    AclStorage.removeAclFeature(snapshotINode.getAclFeature());
  }
  return counts;
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
public QuotaCounts computeQuotaUsage4CurrentDirectory(
    BlockStoragePolicySuite bsps, byte storagePolicyId,
    QuotaCounts counts) {
  for(DirectoryDiff d : diffs) {
    for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
      final byte childPolicyId = deleted.getStoragePolicyIDForQuota(storagePolicyId);
      deleted.computeQuotaUsage(bsps, childPolicyId, counts, false,
          Snapshot.CURRENT_STATE_ID);
    }
  }
  return counts;
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Test the rename undo when quota of dst tree is exceeded after rename.
 */
@Test
public void testRenameExceedQuota() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subfile_dir2 = new Path(sub_dir2, "subfile");
  hdfs.mkdirs(dir1);
  DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);

  final Path foo = new Path(dir1, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, subfile_dir2, and s2)
  hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);

  // rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. The rename operation will succeed while the real quota 
  // of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
  // subfile_dir2 in deleted list, new subfile, s1 in new subfile).
  hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);

  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.asDirectory().isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(
      fsdir.getBlockStoragePolicySuite());
  assertEquals(4, counts.getNameSpace());
  assertEquals(BLOCKSIZE * REPL * 2, counts.getStorageSpace());
}
项目:aliyun-oss-hadoop-fs    文件:DirectoryWithSnapshotFeature.java   
public QuotaCounts computeQuotaUsage4CurrentDirectory(
    BlockStoragePolicySuite bsps, byte storagePolicyId) {
  final QuotaCounts counts = new QuotaCounts.Builder().build();
  for(DirectoryDiff d : diffs) {
    for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
      final byte childPolicyId = deleted.getStoragePolicyIDForQuota(
          storagePolicyId);
      counts.add(deleted.computeQuotaUsage(bsps, childPolicyId, false,
          Snapshot.CURRENT_STATE_ID));
    }
  }
  return counts;
}
项目:aliyun-oss-hadoop-fs    文件:TestSnapshotDeletion.java   
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INodeDirectory dirNode = getDir(fsdir, dirPath);
  assertTrue(dirNode.isQuotaSet());
  QuotaCounts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      q.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      q.getStorageSpace());
  QuotaCounts counts = dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.getStorageSpace());
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
/**
 * Test the rename undo when quota of dst tree is exceeded after rename.
 */
@Test
public void testRenameExceedQuota() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subfile_dir2 = new Path(sub_dir2, "subfile");
  hdfs.mkdirs(dir1);
  DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);

  final Path foo = new Path(dir1, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, subfile_dir2, and s2)
  hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);

  // rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. The rename operation will succeed while the real quota 
  // of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
  // subfile_dir2 in deleted list, new subfile, s1 in new subfile).
  hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);

  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.asDirectory().isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(
      fsdir.getBlockStoragePolicySuite());
  assertEquals(4, counts.getNameSpace());
  assertEquals(BLOCKSIZE * REPL * 2, counts.getStorageSpace());
}
项目:big-c    文件:FileDiff.java   
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    BlockStoragePolicySuite bsps, INodeFile currentINode,
    FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
  assert sf != null : "FileWithSnapshotFeature is null";
  return sf.updateQuotaAndCollectBlocks(
      bsps, currentINode, posterior, collectedBlocks, removedINodes);
}
项目:big-c    文件:FileDiff.java   
@Override
QuotaCounts destroyDiffAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile currentINode,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  return currentINode.getFileWithSnapshotFeature()
      .updateQuotaAndCollectBlocks(bsps, currentINode, this, collectedBlocks,
          removedINodes);
}
项目:big-c    文件:AbstractINodeDiffList.java   
/**
 * Delete a snapshot. The synchronization of the diff list will be done 
 * outside. If the diff to remove is not the first one in the diff list, we 
 * need to combine the diff with its previous one.
 * 
 * @param snapshot The id of the snapshot to be deleted
 * @param prior The id of the snapshot taken before the to-be-deleted snapshot
 * @param collectedBlocks Used to collect information for blocksMap update
 * @return delta in namespace. 
 */
public final QuotaCounts deleteSnapshotDiff(BlockStoragePolicySuite bsps,
    final int snapshot,
    final int prior, final N currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  int snapshotIndex = Collections.binarySearch(diffs, snapshot);

  QuotaCounts counts = new QuotaCounts.Builder().build();
  D removed = null;
  if (snapshotIndex == 0) {
    if (prior != Snapshot.NO_SNAPSHOT_ID) { // there is still snapshot before
      // set the snapshot to latestBefore
      diffs.get(snapshotIndex).setSnapshotId(prior);
    } else { // there is no snapshot before
      removed = diffs.remove(0);
      counts.add(removed.destroyDiffAndCollectBlocks(bsps, currentINode,
          collectedBlocks, removedINodes));
    }
  } else if (snapshotIndex > 0) {
    final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
    if (previous.getSnapshotId() != prior) {
      diffs.get(snapshotIndex).setSnapshotId(prior);
    } else {
      // combine the to-be-removed diff with its previous diff
      removed = diffs.remove(snapshotIndex);
      if (previous.snapshotINode == null) {
        previous.snapshotINode = removed.snapshotINode;
      }

      counts.add(previous.combinePosteriorAndCollectBlocks(
          bsps, currentINode, removed, collectedBlocks, removedINodes));
      previous.setPosterior(removed.getPosterior());
      removed.setPosterior(null);
    }
  }
  return counts;
}
项目:big-c    文件:DirectorySnapshottableFeature.java   
/**
 * Remove the snapshot with the given name from {@link #snapshotsByNames},
 * and delete all the corresponding DirectoryDiff.
 *
 * @param snapshotRoot The directory where we take snapshots
 * @param snapshotName The name of the snapshot to be removed
 * @param collectedBlocks Used to collect information to update blocksMap
 * @return The removed snapshot. Null if no snapshot with the given name
 *         exists.
 */
public Snapshot removeSnapshot(BlockStoragePolicySuite bsps, INodeDirectory snapshotRoot,
    String snapshotName, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) throws SnapshotException {
  final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
  if (i < 0) {
    throw new SnapshotException("Cannot delete snapshot " + snapshotName
        + " from path " + snapshotRoot.getFullPathName()
        + ": the snapshot does not exist.");
  } else {
    final Snapshot snapshot = snapshotsByNames.get(i);
    int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
    try {
      QuotaCounts counts = snapshotRoot.cleanSubtree(bsps, snapshot.getId(),
          prior, collectedBlocks, removedINodes);
      INodeDirectory parent = snapshotRoot.getParent();
      if (parent != null) {
        // there will not be any WithName node corresponding to the deleted
        // snapshot, thus only update the quota usage in the current tree
        parent.addSpaceConsumed(counts.negation(), true);
      }
    } catch(QuotaExceededException e) {
      INode.LOG.error("BUG: removeSnapshot increases namespace usage.", e);
    }
    // remove from snapshotsByNames after successfully cleaning the subtree
    snapshotsByNames.remove(i);
    return snapshot;
  }
}
项目:big-c    文件:DirectoryWithSnapshotFeature.java   
/** clear the deleted list */
private QuotaCounts destroyDeletedList(
    final BlockStoragePolicySuite bsps,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> deletedList = getList(ListType.DELETED);
  for (INode d : deletedList) {
    d.computeQuotaUsage(bsps, counts, false);
    d.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
  }
  deletedList.clear();
  return counts;
}
项目:big-c    文件:DirectoryWithSnapshotFeature.java   
@Override
QuotaCounts destroyDiffAndCollectBlocks(
    BlockStoragePolicySuite bsps, INodeDirectory currentINode,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  // this diff has been deleted
  QuotaCounts counts = new QuotaCounts.Builder().build();
  counts.add(diff.destroyDeletedList(bsps, collectedBlocks, removedINodes));
  INodeDirectoryAttributes snapshotINode = getSnapshotINode();
  if (snapshotINode != null && snapshotINode.getAclFeature() != null) {
    AclStorage.removeAclFeature(snapshotINode.getAclFeature());
  }
  return counts;
}
项目:big-c    文件:DirectoryWithSnapshotFeature.java   
public QuotaCounts computeQuotaUsage4CurrentDirectory(
    BlockStoragePolicySuite bsps, byte storagePolicyId,
    QuotaCounts counts) {
  for(DirectoryDiff d : diffs) {
    for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
      final byte childPolicyId = deleted.getStoragePolicyIDForQuota(storagePolicyId);
      deleted.computeQuotaUsage(bsps, childPolicyId, counts, false,
          Snapshot.CURRENT_STATE_ID);
    }
  }
  return counts;
}
项目:big-c    文件:TestRenameWithSnapshots.java   
/**
 * Test the rename undo when quota of dst tree is exceeded after rename.
 */
@Test
public void testRenameExceedQuota() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subfile_dir2 = new Path(sub_dir2, "subfile");
  hdfs.mkdirs(dir1);
  DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);

  final Path foo = new Path(dir1, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, subfile_dir2, and s2)
  hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);

  // rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. The rename operation will succeed while the real quota 
  // of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
  // subfile_dir2 in deleted list, new subfile, s1 in new subfile).
  hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);

  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.asDirectory().isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(
      fsdir.getBlockStoragePolicySuite());
  assertEquals(4, counts.getNameSpace());
  assertEquals(BLOCKSIZE * REPL * 2, counts.getStorageSpace());
}
项目:hadoop    文件:AbstractINodeDiff.java   
/** Combine the posterior diff and collect blocks for deletion. */
abstract QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps, final N currentINode,
    final D posterior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes);
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
/**
 * Clean an inode while we move it from the deleted list of post to the
 * deleted list of prior.
 * @param bsps The block storage policy suite.
 * @param inode The inode to clean.
 * @param post The post snapshot.
 * @param prior The id of the prior snapshot.
 * @param collectedBlocks Used to collect blocks for later deletion.
 * @return Quota usage update.
 */
private static QuotaCounts cleanDeletedINode(
    final BlockStoragePolicySuite bsps, INode inode,
    final int post, final int prior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  Deque<INode> queue = new ArrayDeque<INode>();
  queue.addLast(inode);
  while (!queue.isEmpty()) {
    INode topNode = queue.pollFirst();
    if (topNode instanceof INodeReference.WithName) {
      INodeReference.WithName wn = (INodeReference.WithName) topNode;
      if (wn.getLastSnapshotId() >= post) {
        INodeReference.WithCount wc =
            (INodeReference.WithCount) wn.getReferredINode();
        if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
          // this wn is the last wn inside of the wc, also the dstRef node has
          // been deleted. In this case, we should treat the referred file/dir
          // as normal case
          queue.add(wc.getReferredINode());
        } else {
          wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
        }
      }
      // For DstReference node, since the node is not in the created list of
      // prior, we should treat it as regular file/dir
    } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
      INodeFile file = topNode.asFile();
      counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file,
          collectedBlocks, removedINodes));
    } else if (topNode.isDirectory()) {
      INodeDirectory dir = topNode.asDirectory();
      ChildrenDiff priorChildrenDiff = null;
      DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
      if (sf != null) {
        // delete files/dirs created after prior. Note that these
        // files/dirs, along with inode, were deleted right after post.
        DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
          priorChildrenDiff = priorDiff.getChildrenDiff();
          counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir,
              collectedBlocks, removedINodes));
        }
      }

      for (INode child : dir.getChildrenList(prior)) {
        if (priorChildrenDiff != null
            && priorChildrenDiff.search(ListType.DELETED,
                child.getLocalNameBytes()) != null) {
          continue;
        }
        queue.addLast(child);
      }
    }
  }
  return counts;
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path subdir2 = new Path(dir2, "subdir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subdir2);

  final Path foo = new Path(dir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 2 (already has
  // dir2, and subdir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);

  final Path foo2 = new Path(subdir2, foo.getName());
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2)
      .addLastINode((INodesInPath) Mockito.anyObject(),
          (INode) Mockito.anyObject(), Mockito.anyBoolean());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
  // FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
  // However, the rename operation will fail since we let addLastINode throw
  // NSQuotaExceededException
  boolean rename = hdfs.rename(foo, foo2);
  assertFalse(rename);

  // check the undo
  assertTrue(hdfs.exists(foo));
  assertTrue(hdfs.exists(bar));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  INode barNode = fsdir2.getINode4Write(bar.toString());
  assertTrue(barNode.getClass() == INodeFile.class);
  assertSame(fooNode, barNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(2, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
  diffList = dir2Node.getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subsub_dir2 = new Path(sub_dir2, "subdir");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subsub_dir2);

  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, and subsub_dir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2)
      .removeLastINode((INodesInPath) Mockito.anyObject());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. However, the rename operation will fail when removing
  // subsub_dir2.
  try {
    hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
    fail("Expect QuotaExceedException");
  } catch (Exception e) {
    String msg = "fake exception";
    GenericTestUtils.assertExceptionContains(msg, e);
  }

  // check the undo
  assertTrue(hdfs.exists(foo));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  assertSame(dir1Node, fooNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(3, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
  INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
  assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
  assertSame(subdir2Node, subsubdir2Node.getParent());

  diffList = (  dir2Node).getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  restartClusterAndCheckImage(true);
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(7, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());

  restartClusterAndCheckImage(true);
}
项目:aliyun-oss-hadoop-fs    文件:TestFileWithSnapshotFeature.java   
@Test
public void testUpdateQuotaAndCollectBlocks() {
  FileDiffList diffs = new FileDiffList();
  FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
  FileDiff diff = mock(FileDiff.class);
  BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
  BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
  BlockInfo[] blocks = new BlockInfo[] {
      new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1)
  };
  BlockManager bm = mock(BlockManager.class);

  // No snapshot
  INodeFile file = mock(INodeFile.class);
  when(file.getFileWithSnapshotFeature()).thenReturn(sf);
  when(file.getBlocks()).thenReturn(blocks);
  when(file.getStoragePolicyID()).thenReturn((byte) 1);
  Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
  when(file.getPreferredBlockReplication()).thenReturn(REPL_1);

  when(bsps.getPolicy(anyByte())).thenReturn(bsp);
  INode.BlocksMapUpdateInfo collectedBlocks = mock(
      INode.BlocksMapUpdateInfo.class);
  ArrayList<INode> removedINodes = new ArrayList<>();
  INode.ReclaimContext ctx = new INode.ReclaimContext(
      bsps, collectedBlocks, removedINodes, null);
  sf.updateQuotaAndCollectBlocks(ctx, file, diff);
  QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
  Assert.assertEquals(0, counts.getStorageSpace());
  Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));

  // INode only exists in the snapshot
  INodeFile snapshotINode = mock(INodeFile.class);
  Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
  Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
  when(diff.getSnapshotINode()).thenReturn(snapshotINode);

  when(bsp.chooseStorageTypes(REPL_1))
      .thenReturn(Lists.newArrayList(SSD));
  when(bsp.chooseStorageTypes(REPL_3))
      .thenReturn(Lists.newArrayList(DISK));
  blocks[0].setReplication(REPL_3);
  sf.updateQuotaAndCollectBlocks(ctx, file, diff);
  counts = ctx.quotaDelta().getCountsCopy();
  Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE,
                      counts.getStorageSpace());
  Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
  Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path subdir2 = new Path(dir2, "subdir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subdir2);

  final Path foo = new Path(dir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 2 (already has
  // dir2, and subdir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);

  final Path foo2 = new Path(subdir2, foo.getName());
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2)
      .addLastINode((INodesInPath) Mockito.anyObject(),
          (INode) Mockito.anyObject(), Mockito.anyBoolean());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
  // FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
  // However, the rename operation will fail since we let addLastINode throw
  // NSQuotaExceededException
  boolean rename = hdfs.rename(foo, foo2);
  assertFalse(rename);

  // check the undo
  assertTrue(hdfs.exists(foo));
  assertTrue(hdfs.exists(bar));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  INode barNode = fsdir2.getINode4Write(bar.toString());
  assertTrue(barNode.getClass() == INodeFile.class);
  assertSame(fooNode, barNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(2, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
  diffList = dir2Node.getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subsub_dir2 = new Path(sub_dir2, "subdir");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subsub_dir2);

  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);

  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");

  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, and subsub_dir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2)
      .removeLastINode((INodesInPath) Mockito.anyObject());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. However, the rename operation will fail when removing
  // subsub_dir2.
  try {
    hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
    fail("Expect QuotaExceedException");
  } catch (Exception e) {
    String msg = "fake exception";
    GenericTestUtils.assertExceptionContains(msg, e);
  }

  // check the undo
  assertTrue(hdfs.exists(foo));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  assertSame(dir1Node, fooNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());

  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(3, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
  INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
  assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
  assertSame(subdir2Node, subsubdir2Node.getParent());

  diffList = (  dir2Node).getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  restartClusterAndCheckImage(true);
}
项目:aliyun-oss-hadoop-fs    文件:TestRenameWithSnapshots.java   
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");

  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);

  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);

  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");

  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");

  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(7, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());

  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());

  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());

  restartClusterAndCheckImage(true);
}
项目:big-c    文件:AbstractINodeDiff.java   
/** Combine the posterior diff and collect blocks for deletion. */
abstract QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps, final N currentINode,
    final D posterior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes);