Java 类org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite 实例源码

项目:hadoop    文件:Mover.java   
Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
  this.retryMaxAttempts = conf.getInt(
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY,
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT);
  this.retryCount = retryCount;
  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
/** Get the file info for a specific file.
 * @param fsd FSDirectory
 * @param src The string representation of the path to the file
 * @param isRawPath true if a /.reserved/raw pathname was passed by the user
 * @param includeStoragePolicy whether to include storage policy
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String path, INodesInPath src, boolean isRawPath,
    boolean includeStoragePolicy)
    throws IOException {
  fsd.readLock();
  try {
    final INode i = src.getLastINode();
    byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
        i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
    return i == null ? null : createFileStatus(
        fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
        src.getPathSnapshotId(), isRawPath, src);
  } finally {
    fsd.readUnlock();
  }
}
项目:hadoop    文件:FSDirStatAndListingOp.java   
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
    boolean includeStoragePolicy)
  throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
    if (fsd.getINode4DotSnapshot(srcs) != null) {
      return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
          BlockStoragePolicySuite.ID_UNSPECIFIED);
    }
    return null;
  }

  fsd.readLock();
  try {
    final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
    return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy);
  } finally {
    fsd.readUnlock();
  }
}
项目:hadoop    文件:INodeReference.java   
@Override
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  // if this.lastSnapshotId < lastSnapshotId, the rename of the referred 
  // node happened before the rename of its ancestor. This should be 
  // impossible since for WithName node we only count its children at the 
  // time of the rename. 
  Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID
      || this.lastSnapshotId >= lastSnapshotId);
  final INode referred = this.getReferredINode().asReference()
      .getReferredINode();
  // We will continue the quota usage computation using the same snapshot id
  // as time line (if the given snapshot id is valid). Also, we cannot use 
  // cache for the referred node since its cached quota may have already 
  // been updated by changes in the current tree.
  int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? 
      lastSnapshotId : this.lastSnapshotId;
  return referred.computeQuotaUsage(bsps, blockStoragePolicyId, counts,
      false, id);
}
项目:hadoop    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfoContiguous blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(BlockInfoContiguous.EMPTY_ARRAY);
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
    sf.clearDiffs();
  }
}
项目:hadoop    文件:FSDirRenameOp.java   
/**
 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
 * dstInodes[dstInodes.length-1]
 */
private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
    INodesInPath dst) throws QuotaExceededException {
  if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edits log is still being processed
    return;
  }
  int i = 0;
  while(src.getINode(i) == dst.getINode(i)) { i++; }
  // src[i - 1] is the last common ancestor.
  BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
  final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);

  // Reduce the required quota by dst that is being removed
  final INode dstINode = dst.getLastINode();
  if (dstINode != null) {
    delta.subtract(dstINode.computeQuotaUsage(bsps));
  }
  FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1));
}
项目:hadoop    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    filesDeleted = true;
  } else {
    filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
        .getNameSpace() >= 0;
  }
  fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
  return filesDeleted;
}
项目:hadoop    文件:FileWithSnapshotFeature.java   
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:hadoop    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
/** clear the created list */
private QuotaCounts destroyCreatedList(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(bsps, counts, true);
    c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
项目:hadoop    文件:DirectoryWithSnapshotFeature.java   
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final QuotaCounts counts = new QuotaCounts.Builder().build();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(bsps, counts, false);
        inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
项目:hadoop    文件:INodeDirectory.java   
void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) {
  DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
  if (quota != null) {
    // already has quota; so set the quota to the new values
    if (type != null) {
      quota.setQuota(ssQuota, type);
    } else {
      quota.setQuota(nsQuota, ssQuota);
    }
    if (!isQuotaSet() && !isRoot()) {
      removeFeature(quota);
    }
  } else {
    final QuotaCounts c = computeQuotaUsage(bsps);
    DirectoryWithQuotaFeature.Builder builder =
        new DirectoryWithQuotaFeature.Builder().nameSpaceQuota(nsQuota);
    if (type != null) {
      builder.typeQuota(type, ssQuota);
    } else {
      builder.storageSpaceQuota(ssQuota);
    }
    addDirectoryWithQuotaFeature(builder.build()).setSpaceConsumed(c);
  }
}
项目:hadoop    文件:INodeDirectory.java   
/**
 * Undo the rename operation for the dst tree, i.e., if the rename operation
 * (with OVERWRITE option) removes a file/dir from the dst tree, add it back
 * and delete possible record in the deleted list.  
 */
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
    final INode deletedChild,
    int latestSnapshotId) throws QuotaExceededException {
  DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  Preconditions.checkState(sf != null,
      "Directory does not have snapshot feature");
  boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
      deletedChild);
  int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
  final boolean added = addChild(deletedChild, true, sid);
  // update quota usage if adding is successfully and the old child has not
  // been stored in deleted list before
  if (added && !removeDeletedChild) {
    final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
    addSpaceConsumed(counts, false);

  }
}
项目:hadoop    文件:INodeDirectory.java   
/** Call cleanSubtree(..) recursively down the subtree. */
public QuotaCounts cleanSubtreeRecursively(final BlockStoragePolicySuite bsps,
    final int snapshot,
    int prior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final Map<INode, INode> excludedNodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  // in case of deletion snapshot, since this call happens after we modify
  // the diff list, the snapshot to be deleted has been combined or renamed
  // to its latest previous snapshot. (besides, we also need to consider nodes
  // created after prior but before snapshot. this will be done in 
  // DirectoryWithSnapshotFeature)
  int s = snapshot != Snapshot.CURRENT_STATE_ID
      && prior != Snapshot.NO_SNAPSHOT_ID ? prior : snapshot;
  for (INode child : getChildrenList(s)) {
    if (snapshot != Snapshot.CURRENT_STATE_ID && excludedNodes != null
        && excludedNodes.containsKey(child)) {
      continue;
    } else {
      QuotaCounts childCounts = child.cleanSubtree(bsps, snapshot, prior,
          collectedBlocks, removedINodes);
      counts.add(childCounts);
    }
  }
  return counts;
}
项目:hadoop    文件:INodeDirectory.java   
@Override
public void destroyAndCollectBlocks(final BlockStoragePolicySuite bsps,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.clear(bsps, this, collectedBlocks, removedINodes);
  }
  for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) {
    child.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
  }
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
}
项目:hadoop    文件:PBHelper.java   
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
  if (fs == null)
    return null;
  return new HdfsLocatedFileStatus(
      fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
      fs.getBlockReplication(), fs.getBlocksize(),
      fs.getModificationTime(), fs.getAccessTime(),
      PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
      fs.getFileType().equals(FileType.IS_SYMLINK) ? 
          fs.getSymlink().toByteArray() : null,
      fs.getPath().toByteArray(),
      fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
      fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
      fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
      fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
      fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
          : BlockStoragePolicySuite.ID_UNSPECIFIED);
}
项目:hadoop    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHots() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 1l);
  expectedOutput.put("HOT|DISK:3(HOT)", 1l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:hadoop    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:big-c    文件:INodeDirectory.java   
void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) {
  DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
  if (quota != null) {
    // already has quota; so set the quota to the new values
    if (type != null) {
      quota.setQuota(ssQuota, type);
    } else {
      quota.setQuota(nsQuota, ssQuota);
    }
    if (!isQuotaSet() && !isRoot()) {
      removeFeature(quota);
    }
  } else {
    final QuotaCounts c = computeQuotaUsage(bsps);
    DirectoryWithQuotaFeature.Builder builder =
        new DirectoryWithQuotaFeature.Builder().nameSpaceQuota(nsQuota);
    if (type != null) {
      builder.typeQuota(type, ssQuota);
    } else {
      builder.storageSpaceQuota(ssQuota);
    }
    addDirectoryWithQuotaFeature(builder.build()).setSpaceConsumed(c);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
/**
 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
 * dstInodes[dstInodes.length-1]
 */
private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
    INodesInPath dst) throws QuotaExceededException {
  if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edits log is still being processed
    return;
  }
  int i = 0;
  while(src.getINode(i) == dst.getINode(i)) { i++; }
  // src[i - 1] is the last common ancestor.
  BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
  final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);

  // Reduce the required quota by dst that is being removed
  final INode dstINode = dst.getLastINode();
  if (dstINode != null) {
    delta.subtract(dstINode.computeQuotaUsage(bsps));
  }
  FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1));
}
项目:big-c    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHots() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 1l);
  expectedOutput.put("HOT|DISK:3(HOT)", 1l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:big-c    文件:INodeDirectory.java   
/** Call cleanSubtree(..) recursively down the subtree. */
public QuotaCounts cleanSubtreeRecursively(final BlockStoragePolicySuite bsps,
    final int snapshot,
    int prior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final Map<INode, INode> excludedNodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  // in case of deletion snapshot, since this call happens after we modify
  // the diff list, the snapshot to be deleted has been combined or renamed
  // to its latest previous snapshot. (besides, we also need to consider nodes
  // created after prior but before snapshot. this will be done in 
  // DirectoryWithSnapshotFeature)
  int s = snapshot != Snapshot.CURRENT_STATE_ID
      && prior != Snapshot.NO_SNAPSHOT_ID ? prior : snapshot;
  for (INode child : getChildrenList(s)) {
    if (snapshot != Snapshot.CURRENT_STATE_ID && excludedNodes != null
        && excludedNodes.containsKey(child)) {
      continue;
    } else {
      QuotaCounts childCounts = child.cleanSubtree(bsps, snapshot, prior,
          collectedBlocks, removedINodes);
      counts.add(childCounts);
    }
  }
  return counts;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  List<Long> removedUCFiles = new ChunkedArrayList<>();
  INode.ReclaimContext context = new INode.ReclaimContext(
      bsps, collectedBlocks, removedINodes, removedUCFiles);
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(context);
    filesDeleted = true;
  } else {
    oldDstChild.cleanSubtree(context, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId());
    filesDeleted = context.quotaDelta().getNsDelta() >= 0;
  }
  fsd.updateReplicationFactor(context.collectedBlocks()
                                  .toUpdateReplicationInfo());

  fsd.getFSNamesystem().removeLeasesAndINodes(
      removedUCFiles, removedINodes, false);
  return filesDeleted;
}
项目:big-c    文件:INodeDirectory.java   
/**
 * Undo the rename operation for the dst tree, i.e., if the rename operation
 * (with OVERWRITE option) removes a file/dir from the dst tree, add it back
 * and delete possible record in the deleted list.  
 */
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
    final INode deletedChild,
    int latestSnapshotId) throws QuotaExceededException {
  DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  Preconditions.checkState(sf != null,
      "Directory does not have snapshot feature");
  boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
      deletedChild);
  int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
  final boolean added = addChild(deletedChild, true, sid);
  // update quota usage if adding is successfully and the old child has not
  // been stored in deleted list before
  if (added && !removeDeletedChild) {
    final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
    addSpaceConsumed(counts, false);

  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeDirectory.java   
void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) {
  DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
  if (quota != null) {
    // already has quota; so set the quota to the new values
    if (type != null) {
      quota.setQuota(ssQuota, type);
    } else {
      quota.setQuota(nsQuota, ssQuota);
    }
    if (!isQuotaSet() && !isRoot()) {
      removeFeature(quota);
    }
  } else {
    final QuotaCounts c = computeQuotaUsage(bsps);
    DirectoryWithQuotaFeature.Builder builder =
        new DirectoryWithQuotaFeature.Builder().nameSpaceQuota(nsQuota);
    if (type != null) {
      builder.typeQuota(type, ssQuota);
    } else {
      builder.storageSpaceQuota(ssQuota);
    }
    addDirectoryWithQuotaFeature(builder.build()).setSpaceConsumed(c);
  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeDirectory.java   
/**
 * Undo the rename operation for the dst tree, i.e., if the rename operation
 * (with OVERWRITE option) removes a file/dir from the dst tree, add it back
 * and delete possible record in the deleted list.  
 */
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
    final INode deletedChild,
    int latestSnapshotId) throws QuotaExceededException {
  DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  assert sf != null : "Directory does not have snapshot feature";
  boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
      deletedChild);
  int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
  final boolean added = addChild(deletedChild, true, sid);
  // update quota usage if adding is successfully and the old child has not
  // been stored in deleted list before
  if (added && !removeDeletedChild) {
    final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
    addSpaceConsumed(counts, false);

  }
}
项目:aliyun-oss-hadoop-fs    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHots() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 1l);
  expectedOutput.put("HOT|DISK:3(HOT)", 1l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:aliyun-oss-hadoop-fs    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:big-c    文件:TestStoragePolicySummary.java   
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
项目:big-c    文件:Mover.java   
Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
  this.retryMaxAttempts = conf.getInt(
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY,
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT);
  this.retryCount = retryCount;
  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
项目:big-c    文件:FSDirStatAndListingOp.java   
/** Get the file info for a specific file.
 * @param fsd FSDirectory
 * @param src The string representation of the path to the file
 * @param isRawPath true if a /.reserved/raw pathname was passed by the user
 * @param includeStoragePolicy whether to include storage policy
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String path, INodesInPath src, boolean isRawPath,
    boolean includeStoragePolicy)
    throws IOException {
  fsd.readLock();
  try {
    final INode i = src.getLastINode();
    byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
        i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
    return i == null ? null : createFileStatus(
        fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
        src.getPathSnapshotId(), isRawPath, src);
  } finally {
    fsd.readUnlock();
  }
}
项目:big-c    文件:FSDirStatAndListingOp.java   
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
    boolean includeStoragePolicy)
  throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
    if (fsd.getINode4DotSnapshot(srcs) != null) {
      return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
          BlockStoragePolicySuite.ID_UNSPECIFIED);
    }
    return null;
  }

  fsd.readLock();
  try {
    final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
    return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy);
  } finally {
    fsd.readUnlock();
  }
}
项目:big-c    文件:INodeReference.java   
@Override
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  // if this.lastSnapshotId < lastSnapshotId, the rename of the referred 
  // node happened before the rename of its ancestor. This should be 
  // impossible since for WithName node we only count its children at the 
  // time of the rename. 
  Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID
      || this.lastSnapshotId >= lastSnapshotId);
  final INode referred = this.getReferredINode().asReference()
      .getReferredINode();
  // We will continue the quota usage computation using the same snapshot id
  // as time line (if the given snapshot id is valid). Also, we cannot use 
  // cache for the referred node since its cached quota may have already 
  // been updated by changes in the current tree.
  int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? 
      lastSnapshotId : this.lastSnapshotId;
  return referred.computeQuotaUsage(bsps, blockStoragePolicyId, counts,
      false, id);
}
项目:big-c    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfoContiguous blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
    sf.clearDiffs();
  }
}
项目:big-c    文件:FSDirRenameOp.java   
/**
 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
 * dstInodes[dstInodes.length-1]
 */
private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
    INodesInPath dst) throws QuotaExceededException {
  if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edits log is still being processed
    return;
  }
  int i = 0;
  while(src.getINode(i) == dst.getINode(i)) { i++; }
  // src[i - 1] is the last common ancestor.
  BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
  final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);

  // Reduce the required quota by dst that is being removed
  final INode dstINode = dst.getLastINode();
  if (dstINode != null) {
    delta.subtract(dstINode.computeQuotaUsage(bsps));
  }
  FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1));
}
项目:big-c    文件:FSDirRenameOp.java   
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    filesDeleted = true;
  } else {
    filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
        .getNameSpace() >= 0;
  }
  fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
  return filesDeleted;
}
项目:big-c    文件:PBHelper.java   
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
  if (fs == null)
    return null;
  return new HdfsLocatedFileStatus(
      fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
      fs.getBlockReplication(), fs.getBlocksize(),
      fs.getModificationTime(), fs.getAccessTime(),
      PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
      fs.getFileType().equals(FileType.IS_SYMLINK) ? 
          fs.getSymlink().toByteArray() : null,
      fs.getPath().toByteArray(),
      fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
      fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
      fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
      fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
      fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
          : BlockStoragePolicySuite.ID_UNSPECIFIED);
}
项目:big-c    文件:FileWithSnapshotFeature.java   
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:big-c    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
项目:big-c    文件:DirectoryWithSnapshotFeature.java   
/** clear the created list */
private QuotaCounts destroyCreatedList(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(bsps, counts, true);
    c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}