Java 类org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff 实例源码

项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/** @return blocks of the file corresponding to the snapshot. */
public BlockInfo[] getBlocks(int snapshot) {
  if (snapshot == CURRENT_STATE_ID || getDiffs() == null) {
    return getBlocks();
  }
  // find blocks stored in snapshot diffs (for truncate)
  FileDiff diff = getDiffs().getDiffById(snapshot);
  // note that currently FileDiff can only store contiguous blocks
  BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
  if (snapshotBlocks != null) {
    return snapshotBlocks;
  }
  // Blocks are not in the current snapshot
  // Find next snapshot with blocks present or return current file blocks
  snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
  return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
项目:hadoop    文件:INodeFile.java   
/** @return blocks of the file corresponding to the snapshot. */
public BlockInfoContiguous[] getBlocks(int snapshot) {
  if(snapshot == CURRENT_STATE_ID || getDiffs() == null)
    return getBlocks();
  FileDiff diff = getDiffs().getDiffById(snapshot);
  BlockInfoContiguous[] snapshotBlocks =
      diff == null ? getBlocks() : diff.getBlocks();
  if(snapshotBlocks != null)
    return snapshotBlocks;
  // Blocks are not in the current snapshot
  // Find next snapshot with blocks present or return current file blocks
  snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
  return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
项目:hadoop    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (snapshotId != CURRENT_STATE_ID && sf != null) {
    final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
    if (d != null) {
      return d.getFileSize();
    }
  }
  return computeFileSize(true, false);
}
项目:hadoop    文件:INodeFile.java   
public final long storagespaceConsumedNoReplication() {
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null) {
    return computeFileSize(true, true);
  }

  // Collect all distinct blocks
  long size = 0;
  Set<Block> allBlocks = new HashSet<Block>(Arrays.asList(getBlocks()));
  List<FileDiff> diffs = sf.getDiffs().asList();
  for(FileDiff diff : diffs) {
    BlockInfoContiguous[] diffBlocks = diff.getBlocks();
    if (diffBlocks != null) {
      allBlocks.addAll(Arrays.asList(diffBlocks));
    }
  }
  for(Block block : allBlocks) {
    size += block.getNumBytes();
  }
  // check if the last block is under construction
  BlockInfoContiguous lastBlock = getLastBlock();
  if(lastBlock != null &&
      lastBlock instanceof BlockInfoContiguousUnderConstruction) {
    size += getPreferredBlockSize() - lastBlock.getNumBytes();
  }
  return size;
}
项目:hadoop    文件:INodeFile.java   
/**
 * compute the quota usage change for a truncate op
 * @param newLength the length for truncation
 * @return the quota usage delta (not considering replication factor)
 */
long computeQuotaDeltaForTruncate(final long newLength) {
  final BlockInfoContiguous[] blocks = getBlocks();
  if (blocks == null || blocks.length == 0) {
    return 0;
  }

  int n = 0;
  long size = 0;
  for (; n < blocks.length && newLength > size; n++) {
    size += blocks[n].getNumBytes();
  }
  final boolean onBoundary = size == newLength;

  long truncateSize = 0;
  for (int i = (onBoundary ? n : n - 1); i < blocks.length; i++) {
    truncateSize += blocks[i].getNumBytes();
  }

  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiff diff = sf.getDiffs().getLast();
    BlockInfoContiguous[] sblocks = diff != null ? diff.getBlocks() : null;
    if (sblocks != null) {
      for (int i = (onBoundary ? n : n-1); i < blocks.length
          && i < sblocks.length && blocks[i].equals(sblocks[i]); i++) {
        truncateSize -= blocks[i].getNumBytes();
      }
    }
  }
  return onBoundary ? -truncateSize : (getPreferredBlockSize() - truncateSize);
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (snapshotId != CURRENT_STATE_ID && sf != null) {
    final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
    if (d != null) {
      return d.getFileSize();
    }
  }
  return computeFileSize(true, false);
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
public final QuotaCounts storagespaceConsumedContiguous(
    BlockStoragePolicy bsp) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final Iterable<BlockInfo> blocks;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf == null) {
    blocks = Arrays.asList(getBlocks());
  } else {
    // Collect all distinct blocks
    Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
    List<FileDiff> diffs = sf.getDiffs().asList();
    for(FileDiff diff : diffs) {
      BlockInfo[] diffBlocks = diff.getBlocks();
      if (diffBlocks != null) {
        allBlocks.addAll(Arrays.asList(diffBlocks));
      }
    }
    blocks = allBlocks;
  }

  final short replication = getPreferredBlockReplication();
  for (BlockInfo b : blocks) {
    long blockSize = b.isComplete() ? b.getNumBytes() :
        getPreferredBlockSize();
    counts.addStorageSpace(blockSize * replication);
    if (bsp != null) {
      List<StorageType> types = bsp.chooseStorageTypes(replication);
      for (StorageType t : types) {
        if (t.supportTypeQuota()) {
          counts.addTypeSpace(t, blockSize);
        }
      }
    }
  }
  return counts;
}
项目:aliyun-oss-hadoop-fs    文件:TestTruncateQuotaUpdate.java   
private static void addSnapshotFeature(INodeFile file, BlockInfo[] blocks) {
  FileDiff diff = mock(FileDiff.class);
  when(diff.getBlocks()).thenReturn(blocks);
  FileDiffList diffList = new FileDiffList();
  @SuppressWarnings("unchecked")
  ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>)Whitebox.getInternalState
      (diffList, "diffs"));
  diffs.add(diff);
  FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
  file.addFeature(sf);
}
项目:big-c    文件:INodeFile.java   
/** @return blocks of the file corresponding to the snapshot. */
public BlockInfoContiguous[] getBlocks(int snapshot) {
  if(snapshot == CURRENT_STATE_ID || getDiffs() == null)
    return getBlocks();
  FileDiff diff = getDiffs().getDiffById(snapshot);
  BlockInfoContiguous[] snapshotBlocks =
      diff == null ? getBlocks() : diff.getBlocks();
  if(snapshotBlocks != null)
    return snapshotBlocks;
  // Blocks are not in the current snapshot
  // Find next snapshot with blocks present or return current file blocks
  snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
  return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
项目:big-c    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (snapshotId != CURRENT_STATE_ID && sf != null) {
    final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
    if (d != null) {
      return d.getFileSize();
    }
  }
  return computeFileSize(true, false);
}
项目:big-c    文件:INodeFile.java   
public final long storagespaceConsumedNoReplication() {
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null) {
    return computeFileSize(true, true);
  }

  // Collect all distinct blocks
  long size = 0;
  Set<Block> allBlocks = new HashSet<Block>(Arrays.asList(getBlocks()));
  List<FileDiff> diffs = sf.getDiffs().asList();
  for(FileDiff diff : diffs) {
    BlockInfoContiguous[] diffBlocks = diff.getBlocks();
    if (diffBlocks != null) {
      allBlocks.addAll(Arrays.asList(diffBlocks));
    }
  }
  for(Block block : allBlocks) {
    size += block.getNumBytes();
  }
  // check if the last block is under construction
  BlockInfoContiguous lastBlock = getLastBlock();
  if(lastBlock != null &&
      lastBlock instanceof BlockInfoContiguousUnderConstruction) {
    size += getPreferredBlockSize() - lastBlock.getNumBytes();
  }
  return size;
}
项目:big-c    文件:INodeFile.java   
/**
 * compute the quota usage change for a truncate op
 * @param newLength the length for truncation
 * @return the quota usage delta (not considering replication factor)
 */
long computeQuotaDeltaForTruncate(final long newLength) {
  final BlockInfoContiguous[] blocks = getBlocks();
  if (blocks == null || blocks.length == 0) {
    return 0;
  }

  int n = 0;
  long size = 0;
  for (; n < blocks.length && newLength > size; n++) {
    size += blocks[n].getNumBytes();
  }
  final boolean onBoundary = size == newLength;

  long truncateSize = 0;
  for (int i = (onBoundary ? n : n - 1); i < blocks.length; i++) {
    truncateSize += blocks[i].getNumBytes();
  }

  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiff diff = sf.getDiffs().getLast();
    BlockInfoContiguous[] sblocks = diff != null ? diff.getBlocks() : null;
    if (sblocks != null) {
      for (int i = (onBoundary ? n : n-1); i < blocks.length
          && i < sblocks.length && blocks[i].equals(sblocks[i]); i++) {
        truncateSize -= blocks[i].getNumBytes();
      }
    }
  }
  return onBoundary ? -truncateSize : (getPreferredBlockSize() - truncateSize);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (snapshotId != CURRENT_STATE_ID && sf != null) {
    final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
    if (d != null) {
      return d.getFileSize();
    }
  }
  return computeFileSize(true, false);
}
项目:FlexMap    文件:INodeFile.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  long nsDelta = 1;
  final long dsDelta;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiffList fileDiffList = sf.getDiffs();
    int last = fileDiffList.getLastSnapshotId();
    List<FileDiff> diffs = fileDiffList.asList();

    if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
        || last == Snapshot.CURRENT_STATE_ID) {
      nsDelta += diffs.size();
      dsDelta = diskspaceConsumed();
    } else if (last < lastSnapshotId) {
      dsDelta = computeFileSize(true, false) * getFileReplication();
    } else {      
      int sid = fileDiffList.getSnapshotById(lastSnapshotId);
      dsDelta = diskspaceConsumed(sid);
    }
  } else {
    dsDelta = diskspaceConsumed();
  }
  counts.add(Quota.NAMESPACE, nsDelta);
  counts.add(Quota.DISKSPACE, dsDelta);
  return counts;
}
项目:FlexMap    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (snapshotId != CURRENT_STATE_ID && sf != null) {
    final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
    if (d != null) {
      return d.getFileSize();
    }
  }
  return computeFileSize(true, false);
}
项目:hadoop-on-lustre2    文件:INodeFile.java   
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
    boolean useCache, int lastSnapshotId) {
  long nsDelta = 1;
  final long dsDelta;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiffList fileDiffList = sf.getDiffs();
    int last = fileDiffList.getLastSnapshotId();
    List<FileDiff> diffs = fileDiffList.asList();

    if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
        || last == Snapshot.CURRENT_STATE_ID) {
      nsDelta += diffs.size();
      dsDelta = diskspaceConsumed();
    } else if (last < lastSnapshotId) {
      dsDelta = computeFileSize(true, false) * getFileReplication();
    } else {      
      int sid = fileDiffList.getSnapshotById(lastSnapshotId);
      dsDelta = diskspaceConsumed(sid);
    }
  } else {
    dsDelta = diskspaceConsumed();
  }
  counts.add(Quota.NAMESPACE, nsDelta);
  counts.add(Quota.DISKSPACE, dsDelta);
  return counts;
}
项目:hadoop-on-lustre2    文件:INodeFile.java   
/**
 * Compute file size of the current file if the given snapshot is null;
 * otherwise, get the file size from the given snapshot.
 */
public final long computeFileSize(int snapshotId) {
  FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
  if (snapshotId != CURRENT_STATE_ID && sf != null) {
    final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
    if (d != null) {
      return d.getFileSize();
    }
  }
  return computeFileSize(true, false);
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/**
 * compute the quota usage change for a truncate op
 * @param newLength the length for truncation
 * TODO: properly handle striped blocks (HDFS-7622)
 **/
void computeQuotaDeltaForTruncate(
    long newLength, BlockStoragePolicy bsps,
    QuotaCounts delta) {
  final BlockInfo[] blocks = getBlocks();
  if (blocks == null || blocks.length == 0) {
    return;
  }

  long size = 0;
  for (BlockInfo b : blocks) {
    size += b.getNumBytes();
  }

  BlockInfo[] sblocks = null;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiff diff = sf.getDiffs().getLast();
    sblocks = diff != null ? diff.getBlocks() : null;
  }

  for (int i = blocks.length - 1; i >= 0 && size > newLength;
       size -= blocks[i].getNumBytes(), --i) {
    BlockInfo bi = blocks[i];
    long truncatedBytes;
    if (size - newLength < bi.getNumBytes()) {
      // Record a full block as the last block will be copied during
      // recovery
      truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
    } else {
      truncatedBytes = bi.getNumBytes();
    }

    // The block exist in snapshot, adding back the truncated bytes in the
    // existing files
    if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
      truncatedBytes -= bi.getNumBytes();
    }

    delta.addStorageSpace(-truncatedBytes * bi.getReplication());
    if (bsps != null) {
      List<StorageType> types = bsps.chooseStorageTypes(bi.getReplication());
      for (StorageType t : types) {
        if (t.supportTypeQuota()) {
          delta.addTypeSpace(t, -truncatedBytes);
        }
      }
    }
  }
}