Java 类org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature 实例源码

项目:hadoop    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfoContiguous blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(BlockInfoContiguous.EMPTY_ARRAY);
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
    sf.clearDiffs();
  }
}
项目:hadoop    文件:INodeFile.java   
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
                           BlocksMapUpdateInfo collectedBlocks) {
  if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
    return;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null)
    return;
  BlockInfoContiguous[] snapshotBlocks =
      getDiffs().findEarlierSnapshotBlocks(snapshotId);
  if(snapshotBlocks == null)
    return;
  List<Block> toDelete = collectedBlocks.getToDeleteList();
  for(Block blk : snapshotBlocks) {
    if(toDelete.contains(blk))
      collectedBlocks.removeDeleteBlock(blk);
  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
public short getPreferredBlockReplication() {
  short max = getFileReplication(CURRENT_STATE_ID);
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    short maxInSnapshot = sf.getMaxBlockRepInDiffs(null);
    if (sf.isCurrentFileDeleted()) {
      return maxInSnapshot;
    }
    max = maxInSnapshot > max ? maxInSnapshot : max;
  }
  if(!isStriped()){
    return max;
  }
  // TODO support more policies based on policyId
  ErasureCodingPolicy ecPolicy =
      ErasureCodingPolicyManager.getSystemDefaultPolicy();
  return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
  // TODO pass in the storage policy
  reclaimContext.quotaDelta().add(computeQuotaUsage(reclaimContext.bsps,
      false));
  clearFile(reclaimContext);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(
        reclaimContext.collectedBlocks);
    sf.clearDiffs();
  }
  if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
    reclaimContext.removedUCFiles.add(getId());
  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
                           BlocksMapUpdateInfo collectedBlocks) {
  if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
    return;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null)
    return;
  BlockInfo[] snapshotBlocks =
      getDiffs().findEarlierSnapshotBlocks(snapshotId);
  if(snapshotBlocks == null)
    return;
  List<BlockInfo> toDelete = collectedBlocks.getToDeleteList();
  for(BlockInfo blk : snapshotBlocks) {
    if(toDelete.contains(blk))
      collectedBlocks.removeDeleteBlock(blk);
  }
}
项目:big-c    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfoContiguous blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
    sf.clearDiffs();
  }
}
项目:big-c    文件:INodeFile.java   
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
                           BlocksMapUpdateInfo collectedBlocks) {
  if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
    return;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null)
    return;
  BlockInfoContiguous[] snapshotBlocks =
      getDiffs().findEarlierSnapshotBlocks(snapshotId);
  if(snapshotBlocks == null)
    return;
  List<Block> toDelete = collectedBlocks.getToDeleteList();
  for(Block blk : snapshotBlocks) {
    if(toDelete.contains(blk))
      collectedBlocks.removeDeleteBlock(blk);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfo blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  clear();
  removedINodes.add(this);

  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.clearDiffs();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeFile.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  long nsDelta = 1;
  final long dsDelta;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiffList fileDiffList = sf.getDiffs();
    int last = fileDiffList.getLastSnapshotId();

    if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
        || last == Snapshot.CURRENT_STATE_ID) {
      dsDelta = diskspaceConsumed();
    } else if (last < lastSnapshotId) {
      dsDelta = computeFileSize(true, false) * getFileReplication();
    } else {      
      int sid = fileDiffList.getSnapshotById(lastSnapshotId);
      dsDelta = diskspaceConsumed(sid);
    }
  } else {
    dsDelta = diskspaceConsumed();
  }
  counts.add(Quota.NAMESPACE, nsDelta);
  counts.add(Quota.DISKSPACE, dsDelta);
  return counts;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeFile.java   
private void computeContentSummary4Snapshot(final Content.Counts counts) {
  // file length and diskspace only counted for the latest state of the file
  // i.e. either the current state or the last snapshot
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.add(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      counts.add(Content.LENGTH, diffs.getLast().getFileSize());
    }

    if (sf.isCurrentFileDeleted()) {
      final long lastFileSize = diffs.getLast().getFileSize();
      counts.add(Content.DISKSPACE, lastFileSize * getBlockReplication());
    }
  }
}
项目:FlexMap    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfo blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  clear();
  removedINodes.add(this);

  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.clearDiffs();
  }
}
项目:FlexMap    文件:INodeFile.java   
private void computeContentSummary4Snapshot(final Content.Counts counts) {
  // file length and diskspace only counted for the latest state of the file
  // i.e. either the current state or the last snapshot
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.add(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      counts.add(Content.LENGTH, diffs.getLast().getFileSize());
    }

    if (sf.isCurrentFileDeleted()) {
      final long lastFileSize = diffs.getLast().getFileSize();
      counts.add(Content.DISKSPACE, lastFileSize * getBlockReplication());
    }
  }
}
项目:hadoop-on-lustre2    文件:INodeFile.java   
@Override
public Quota.Counts cleanSubtree(final int snapshot, int priorSnapshotId,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final boolean countDiffChange)
    throws QuotaExceededException {
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.cleanFile(this, snapshot, priorSnapshotId, collectedBlocks,
        removedINodes, countDiffChange);
  }
  Quota.Counts counts = Quota.Counts.newInstance();
  if (snapshot == CURRENT_STATE_ID && priorSnapshotId == NO_SNAPSHOT_ID) {
    // this only happens when deleting the current file and the file is not
    // in any snapshot
    computeQuotaUsage(counts, false);
    destroyAndCollectBlocks(collectedBlocks, removedINodes);
  } else if (snapshot == CURRENT_STATE_ID && priorSnapshotId != NO_SNAPSHOT_ID) {
    // when deleting the current file and the file is in snapshot, we should
    // clean the 0-sized block if the file is UC
    FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
    if (uc != null) {
      uc.cleanZeroSizeBlock(this, collectedBlocks);
    }
  }
  return counts;
}
项目:hadoop-on-lustre2    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfo blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  clear();
  removedINodes.add(this);

  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.clearDiffs();
  }
}
项目:hadoop-on-lustre2    文件:INodeFile.java   
private void computeContentSummary4Snapshot(final Content.Counts counts) {
  // file length and diskspace only counted for the latest state of the file
  // i.e. either the current state or the last snapshot
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.add(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      counts.add(Content.LENGTH, diffs.getLast().getFileSize());
    }

    if (sf.isCurrentFileDeleted()) {
      final long lastFileSize = diffs.getLast().getFileSize();
      counts.add(Content.DISKSPACE, lastFileSize * getBlockReplication());
    }
  }
}
项目:hadoop    文件:INodeFile.java   
public FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
  Preconditions.checkState(!isWithSnapshot(), 
      "File is already with snapshot");
  FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
  this.addFeature(sf);
  return sf;
}
项目:hadoop    文件:INodeFile.java   
@Override
public INodeFileAttributes getSnapshotINode(final int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.getDiffs().getSnapshotINode(snapshotId, this);
  } else {
    return this;
  }
}
项目:hadoop    文件:INodeFile.java   
public void recordModification(final int latestSnapshotId, boolean withBlocks) {
  if (isInLatestSnapshot(latestSnapshotId)
      && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
    // the file is in snapshot, create a snapshot feature if it does not have
    FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
    if (sf == null) {
      sf = addSnapshotFeature(null);
    }
    // record self in the diff list if necessary
    sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null, withBlocks);
  }
}
项目:hadoop    文件:INodeFile.java   
public FileDiffList getDiffs() {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.getDiffs();
  }
  return null;
}
项目:hadoop    文件:INodeFile.java   
@Override // BlockCollection
public short getBlockReplication() {
  short max = getFileReplication(CURRENT_STATE_ID);
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    short maxInSnapshot = sf.getMaxBlockRepInDiffs();
    if (sf.isCurrentFileDeleted()) {
      return maxInSnapshot;
    }
    max = maxInSnapshot > max ? maxInSnapshot : max;
  }
  return max;
}
项目:hadoop    文件:INodeFile.java   
@Override
public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps, final int snapshot,
                                int priorSnapshotId,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.cleanFile(bsps, this, snapshot, priorSnapshotId, collectedBlocks,
        removedINodes);
  }
  QuotaCounts counts = new QuotaCounts.Builder().build();
  if (snapshot == CURRENT_STATE_ID) {
    if (priorSnapshotId == NO_SNAPSHOT_ID) {
      // this only happens when deleting the current file and the file is not
      // in any snapshot
      computeQuotaUsage(bsps, counts, false);
      destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    } else {
      // when deleting the current file and the file is in snapshot, we should
      // clean the 0-sized block if the file is UC
      FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
      if (uc != null) {
        uc.cleanZeroSizeBlock(this, collectedBlocks);
      }
    }
  }
  return counts;
}
项目:hadoop    文件:INodeFile.java   
@Override
public final ContentSummaryComputationContext computeContentSummary(
    final ContentSummaryComputationContext summary) {
  final ContentCounts counts = summary.getCounts();
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  long fileLen = 0;
  if (sf == null) {
    fileLen = computeFileSize();
    counts.addContent(Content.FILE, 1);
  } else {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.addContent(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      fileLen =  diffs.getLast().getFileSize();
    } else {
      fileLen = computeFileSize();
    }
  }
  counts.addContent(Content.LENGTH, fileLen);
  counts.addContent(Content.DISKSPACE, storagespaceConsumed());

  if (getStoragePolicyID() != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
        getPolicy(getStoragePolicyID());
    List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, fileLen);
    }
  }
  return summary;
}
项目:hadoop    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (snapshotId != CURRENT_STATE_ID && sf != null) {
    final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
    if (d != null) {
      return d.getFileSize();
    }
  }
  return computeFileSize(true, false);
}
项目:hadoop    文件:INodeFile.java   
public final long storagespaceConsumedNoReplication() {
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null) {
    return computeFileSize(true, true);
  }

  // Collect all distinct blocks
  long size = 0;
  Set<Block> allBlocks = new HashSet<Block>(Arrays.asList(getBlocks()));
  List<FileDiff> diffs = sf.getDiffs().asList();
  for(FileDiff diff : diffs) {
    BlockInfoContiguous[] diffBlocks = diff.getBlocks();
    if (diffBlocks != null) {
      allBlocks.addAll(Arrays.asList(diffBlocks));
    }
  }
  for(Block block : allBlocks) {
    size += block.getNumBytes();
  }
  // check if the last block is under construction
  BlockInfoContiguous lastBlock = getLastBlock();
  if(lastBlock != null &&
      lastBlock instanceof BlockInfoContiguousUnderConstruction) {
    size += getPreferredBlockSize() - lastBlock.getNumBytes();
  }
  return size;
}
项目:hadoop    文件:INodeFile.java   
/**
 * compute the quota usage change for a truncate op
 * @param newLength the length for truncation
 * @return the quota usage delta (not considering replication factor)
 */
long computeQuotaDeltaForTruncate(final long newLength) {
  final BlockInfoContiguous[] blocks = getBlocks();
  if (blocks == null || blocks.length == 0) {
    return 0;
  }

  int n = 0;
  long size = 0;
  for (; n < blocks.length && newLength > size; n++) {
    size += blocks[n].getNumBytes();
  }
  final boolean onBoundary = size == newLength;

  long truncateSize = 0;
  for (int i = (onBoundary ? n : n - 1); i < blocks.length; i++) {
    truncateSize += blocks[i].getNumBytes();
  }

  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiff diff = sf.getDiffs().getLast();
    BlockInfoContiguous[] sblocks = diff != null ? diff.getBlocks() : null;
    if (sblocks != null) {
      for (int i = (onBoundary ? n : n-1); i < blocks.length
          && i < sblocks.length && blocks[i].equals(sblocks[i]); i++) {
        truncateSize -= blocks[i].getNumBytes();
      }
    }
  }
  return onBoundary ? -truncateSize : (getPreferredBlockSize() - truncateSize);
}
项目:hadoop    文件:INodeFile.java   
/**
 * @return true if the block is contained in a snapshot or false otherwise.
 */
boolean isBlockInLatestSnapshot(BlockInfoContiguous block) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf == null || sf.getDiffs() == null) {
    return false;
  }
  BlockInfoContiguous[] snapshotBlocks = getDiffs()
      .findEarlierSnapshotBlocks(getDiffs().getLastSnapshotId());
  return snapshotBlocks != null &&
      Arrays.asList(snapshotBlocks).contains(block);
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
public FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
  Preconditions.checkState(!isWithSnapshot(), 
      "File is already with snapshot");
  FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
  this.addFeature(sf);
  return sf;
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
@Override
public INodeFileAttributes getSnapshotINode(final int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.getDiffs().getSnapshotINode(snapshotId, this);
  } else {
    return this;
  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
public void recordModification(final int latestSnapshotId, boolean withBlocks) {
  if (isInLatestSnapshot(latestSnapshotId)
      && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
    // the file is in snapshot, create a snapshot feature if it does not have
    FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
    if (sf == null) {
      sf = addSnapshotFeature(null);
    }
    // record self in the diff list if necessary
    sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null, withBlocks);
  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
public FileDiffList getDiffs() {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.getDiffs();
  }
  return null;
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
@Override
public void cleanSubtree(ReclaimContext reclaimContext,
    final int snapshot, int priorSnapshotId) {
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    // TODO: avoid calling getStoragePolicyID
    sf.cleanFile(reclaimContext, this, snapshot, priorSnapshotId,
        getStoragePolicyID());
  } else {
    if (snapshot == CURRENT_STATE_ID) {
      if (priorSnapshotId == NO_SNAPSHOT_ID) {
        // this only happens when deleting the current file and it is not
        // in any snapshot
        destroyAndCollectBlocks(reclaimContext);
      } else {
        FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
        // when deleting the current file and it is in snapshot, we should
        // clean the 0-sized block if the file is UC
        if (uc != null) {
          uc.cleanZeroSizeBlock(this, reclaimContext.collectedBlocks);
          if (reclaimContext.removedUCFiles != null) {
            reclaimContext.removedUCFiles.add(getId());
          }
        }
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (snapshotId != CURRENT_STATE_ID && sf != null) {
    final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
    if (d != null) {
      return d.getFileSize();
    }
  }
  return computeFileSize(true, false);
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
public final QuotaCounts storagespaceConsumedContiguous(
    BlockStoragePolicy bsp) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final Iterable<BlockInfo> blocks;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf == null) {
    blocks = Arrays.asList(getBlocks());
  } else {
    // Collect all distinct blocks
    Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
    List<FileDiff> diffs = sf.getDiffs().asList();
    for(FileDiff diff : diffs) {
      BlockInfo[] diffBlocks = diff.getBlocks();
      if (diffBlocks != null) {
        allBlocks.addAll(Arrays.asList(diffBlocks));
      }
    }
    blocks = allBlocks;
  }

  final short replication = getPreferredBlockReplication();
  for (BlockInfo b : blocks) {
    long blockSize = b.isComplete() ? b.getNumBytes() :
        getPreferredBlockSize();
    counts.addStorageSpace(blockSize * replication);
    if (bsp != null) {
      List<StorageType> types = bsp.chooseStorageTypes(replication);
      for (StorageType t : types) {
        if (t.supportTypeQuota()) {
          counts.addTypeSpace(t, blockSize);
        }
      }
    }
  }
  return counts;
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/**
 * @return true if the block is contained in a snapshot or false otherwise.
 */
boolean isBlockInLatestSnapshot(BlockInfo block) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf == null || sf.getDiffs() == null) {
    return false;
  }
  BlockInfo[] snapshotBlocks = getDiffs()
      .findEarlierSnapshotBlocks(getDiffs().getLastSnapshotId());
  return snapshotBlocks != null &&
      Arrays.asList(snapshotBlocks).contains(block);
}
项目:aliyun-oss-hadoop-fs    文件:TestTruncateQuotaUpdate.java   
private static void addSnapshotFeature(INodeFile file, BlockInfo[] blocks) {
  FileDiff diff = mock(FileDiff.class);
  when(diff.getBlocks()).thenReturn(blocks);
  FileDiffList diffList = new FileDiffList();
  @SuppressWarnings("unchecked")
  ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>)Whitebox.getInternalState
      (diffList, "diffs"));
  diffs.add(diff);
  FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
  file.addFeature(sf);
}
项目:big-c    文件:INodeFile.java   
public FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
  Preconditions.checkState(!isWithSnapshot(), 
      "File is already with snapshot");
  FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
  this.addFeature(sf);
  return sf;
}
项目:big-c    文件:INodeFile.java   
@Override
public INodeFileAttributes getSnapshotINode(final int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.getDiffs().getSnapshotINode(snapshotId, this);
  } else {
    return this;
  }
}
项目:big-c    文件:INodeFile.java   
public void recordModification(final int latestSnapshotId, boolean withBlocks) {
  if (isInLatestSnapshot(latestSnapshotId)
      && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
    // the file is in snapshot, create a snapshot feature if it does not have
    FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
    if (sf == null) {
      sf = addSnapshotFeature(null);
    }
    // record self in the diff list if necessary
    sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null, withBlocks);
  }
}
项目:big-c    文件:INodeFile.java   
public FileDiffList getDiffs() {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    return sf.getDiffs();
  }
  return null;
}
项目:big-c    文件:INodeFile.java   
@Override // BlockCollection
public short getBlockReplication() {
  short max = getFileReplication(CURRENT_STATE_ID);
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (sf != null) {
    short maxInSnapshot = sf.getMaxBlockRepInDiffs();
    if (sf.isCurrentFileDeleted()) {
      return maxInSnapshot;
    }
    max = maxInSnapshot > max ? maxInSnapshot : max;
  }
  return max;
}