Java 类org.apache.hadoop.hdfs.server.namenode.INodeFile 实例源码

项目:hadoop    文件:FileWithSnapshotFeature.java   
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:hadoop    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
项目:hadoop    文件:TestSnapshotReplication.java   
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
    final INodeFile ssInode = iip.getLastINode().asFile();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshotId()));
  }
}
项目:aliyun-oss-hadoop-fs    文件:FileWithSnapshotFeature.java   
public void cleanFile(INode.ReclaimContext reclaimContext,
    final INodeFile file, final int snapshotId, int priorSnapshotId,
    byte storagePolicyId) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
        .getPolicy(storagePolicyId);
    QuotaCounts old = file.storagespaceConsumed(policy);
    collectBlocksAndClear(reclaimContext, file);
    QuotaCounts current = file.storagespaceConsumed(policy);
    reclaimContext.quotaDelta().add(old.subtract(current));
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
        file);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(
    INode.ReclaimContext reclaimContext, final INodeFile file) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.clearFile(reclaimContext);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else {
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks,
                                     reclaimContext.collectedBlocks());
}
项目:big-c    文件:FileWithSnapshotFeature.java   
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:big-c    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
项目:big-c    文件:TestSnapshotReplication.java   
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
    final INodeFile ssInode = iip.getLastINode().asFile();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshotId()));
  }
}
项目:hops    文件:LastBlockReplicasHashBucketLock.java   
@Override
protected void acquire(TransactionLocks locks) throws IOException {
  BlockLock blockLock = (BlockLock) locks.getLock(Type.Block);
  for (INodeFile iNodeFile : blockLock.getFiles()) {
    Block lastBlock = iNodeFile.getLastBlock();
    if (iNodeFile.getLastBlock() != null) {
      List<Replica> replicas = (List<Replica>) EntityManager
          .findList(Replica.Finder.ByBlockIdAndINodeId,
              lastBlock.getBlockId(),
              iNodeFile.getId());
      if (replicas != null) {
        Collections.sort(replicas, new Comparator<Replica>() {
          @Override
          public int compare(Replica o1, Replica o2) {
            return new Integer(o1.getBucketId()).compareTo(o2.getBucketId());
          }
        });

        for (Replica replica : replicas) {
          EntityManager.find(HashBucket.Finder.ByStorageIdAndBucketId, replica
              .getStorageId(), replica.getBucketId());
        }
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileWithSnapshotFeature.java   
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(file, collectedBlocks, removedINodes);
    return Quota.Counts.newInstance();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileWithSnapshotFeature.java   
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
    FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  long oldDiskspace = file.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = file.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = file.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
    }
  }

  collectBlocksAndClear(file, collectedBlocks, removedINodes);

  long dsDelta = oldDiskspace - file.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
private void collectBlocksAndClear(final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  if (isCurrentFileDeleted()) {
    final FileDiff last = getDiffs().getLast();
    max = last == null? 0: last.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  collectBlocksBeyondMax(file, max, info);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSImageFormatPBSnapshot.java   
private void serializeFileDiffList(INodeFile file, OutputStream out)
    throws IOException {
  FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
  if (sf != null) {
    List<FileDiff> diffList = sf.getDiffs().asList();
    SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
        .newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
        .setNumOfDiff(diffList.size()).build();
    entry.writeDelimitedTo(out);
    for (int i = diffList.size() - 1; i >= 0; i--) {
      FileDiff diff = diffList.get(i);
      SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff
          .newBuilder().setSnapshotId(diff.getSnapshotId())
          .setFileSize(diff.getFileSize());
      INodeFileAttributes copy = diff.snapshotINode;
      if (copy != null) {
        fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
            .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
      }
      fb.build().writeDelimitedTo(out);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestSnapshotReplication.java   
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number that should be returned by
 *          {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
    final INodeFile ssInode = (INodeFile)iip.getLastINode();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshotId()));
  }
}
项目:hadoop-EAR    文件:RaidCodec.java   
/**
 * Count the number of live replicas of each parity block in the raided file
 * If any stripe has not enough parity block replicas, add the stripe to 
 *  raidEncodingTasks to schedule encoding.
 * If forceAdd is true, we always add the stripe to raidEncodingTasks 
 * without checking
 * @param sourceINode
 * @param raidTasks
 * @param fs
 * @param forceAdd
 * @return true if all parity blocks of the file have enough replicas
 * @throws IOException
 */
public boolean checkRaidProgress(INodeFile sourceINode, 
    LightWeightLinkedSet<RaidBlockInfo> raidEncodingTasks, FSNamesystem fs,
    boolean forceAdd) throws IOException {
  boolean result = true;
  BlockInfo[] blocks = sourceINode.getBlocks();
  for (int i = 0; i < blocks.length;
      i += numStripeBlocks) {
    boolean hasParity = true;
    if (!forceAdd) {
      for (int j = 0; j < numParityBlocks; j++) {
        if (fs.countLiveNodes(blocks[i + j]) < this.parityReplication) {
          hasParity = false;
          break;
        }
      }
    }
    if (!hasParity || forceAdd) {
      raidEncodingTasks.add(new RaidBlockInfo(blocks[i], parityReplication, i));
      result = false; 
    }
  }
  return result;
}
项目:hadoop-plus    文件:FileWithSnapshot.java   
private static Quota.Counts updateQuotaAndCollectBlocks(
    INodeFile currentINode, FileDiff removed,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
  long oldDiskspace = currentINode.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = currentINode.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = currentINode.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
          * replication;
    }
  }

  Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);

  long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
项目:hadoop-plus    文件:TestSnapshotReplication.java   
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number that should be returned by
 *          {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
    final INodeFile ssInode = (INodeFile)iip.getLastINode();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshot()));
  }
}
项目:FlexMap    文件:FileWithSnapshotFeature.java   
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final boolean countDiffChange)
    throws QuotaExceededException {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(file, collectedBlocks, removedINodes);
    return Quota.Counts.newInstance();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes, countDiffChange);
  }
}
项目:FlexMap    文件:FileWithSnapshotFeature.java   
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
    FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  long oldDiskspace = file.diskspaceConsumed();
  if (removed.snapshotINode != null) {
    short replication = removed.snapshotINode.getFileReplication();
    short currentRepl = file.getBlockReplication();
    if (currentRepl == 0) {
      oldDiskspace = file.computeFileSize(true, true) * replication;
    } else if (replication > currentRepl) {  
      oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
    }
  }

  collectBlocksAndClear(file, collectedBlocks, removedINodes);

  long dsDelta = oldDiskspace - file.diskspaceConsumed();
  return Quota.Counts.newInstance(0, dsDelta);
}
项目:FlexMap    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
private void collectBlocksAndClear(final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  if (isCurrentFileDeleted()) {
    final FileDiff last = getDiffs().getLast();
    max = last == null? 0: last.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  collectBlocksBeyondMax(file, max, info);
}
项目:FlexMap    文件:FSImageFormatPBSnapshot.java   
private void serializeFileDiffList(INodeFile file, OutputStream out)
    throws IOException {
  FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
  if (sf != null) {
    List<FileDiff> diffList = sf.getDiffs().asList();
    SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
        .newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
        .setNumOfDiff(diffList.size()).build();
    entry.writeDelimitedTo(out);
    for (int i = diffList.size() - 1; i >= 0; i--) {
      FileDiff diff = diffList.get(i);
      SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff
          .newBuilder().setSnapshotId(diff.getSnapshotId())
          .setFileSize(diff.getFileSize());
      INodeFileAttributes copy = diff.snapshotINode;
      if (copy != null) {
        fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
            .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
      }
      fb.build().writeDelimitedTo(out);
    }
  }
}
项目:FlexMap    文件:TestSnapshotReplication.java   
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number that should be returned by
 *          {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
    final INodeFile ssInode = (INodeFile)iip.getLastINode();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshotId()));
  }
}
项目:hadoop    文件:FileDiffList.java   
public void saveSelf2Snapshot(int latestSnapshotId, INodeFile iNodeFile,
    INodeFileAttributes snapshotCopy, boolean withBlocks) {
  final FileDiff diff =
      super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy);
  if(withBlocks)  // Store blocks if this is the first update
    diff.setBlocks(iNodeFile.getBlocks());
}
项目:hadoop    文件:FileWithSnapshotFeature.java   
boolean changedBetweenSnapshots(INodeFile file, Snapshot from, Snapshot to) {
  int[] diffIndexPair = diffs.changedBetweenSnapshots(from, to);
  if (diffIndexPair == null) {
    return false;
  }
  int earlierDiffIndex = diffIndexPair[0];
  int laterDiffIndex = diffIndexPair[1];

  final List<FileDiff> diffList = diffs.asList();
  final long earlierLength = diffList.get(earlierDiffIndex).getFileSize();
  final long laterLength = laterDiffIndex == diffList.size() ? file
      .computeFileSize(true, false) : diffList.get(laterDiffIndex)
      .getFileSize();
  if (earlierLength != laterLength) { // file length has been changed
    return true;
  }

  INodeFileAttributes earlierAttr = null; // check the metadata
  for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
    FileDiff diff = diffList.get(i);
    if (diff.snapshotINode != null) {
      earlierAttr = diff.snapshotINode;
      break;
    }
  }
  if (earlierAttr == null) { // no meta-change at all, return false
    return false;
  }
  INodeFileAttributes laterAttr = diffs.getSnapshotINode(
      Math.max(Snapshot.getSnapshotId(from), Snapshot.getSnapshotId(to)),
      file);
  return !earlierAttr.metadataEquals(laterAttr);
}
项目:hadoop    文件:FileDiff.java   
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    BlockStoragePolicySuite bsps, INodeFile currentINode,
    FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
  assert sf != null : "FileWithSnapshotFeature is null";
  return sf.updateQuotaAndCollectBlocks(
      bsps, currentINode, posterior, collectedBlocks, removedINodes);
}
项目:hadoop    文件:FileDiff.java   
@Override
QuotaCounts destroyDiffAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile currentINode,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  return currentINode.getFileWithSnapshotFeature()
      .updateQuotaAndCollectBlocks(bsps, currentINode, this, collectedBlocks,
          removedINodes);
}
项目:hadoop    文件:FSImageFormatPBSnapshot.java   
private void serializeFileDiffList(INodeFile file, OutputStream out)
    throws IOException {
  FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
  if (sf != null) {
    List<FileDiff> diffList = sf.getDiffs().asList();
    SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
        .newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
        .setNumOfDiff(diffList.size()).build();
    entry.writeDelimitedTo(out);
    for (int i = diffList.size() - 1; i >= 0; i--) {
      FileDiff diff = diffList.get(i);
      SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff
          .newBuilder().setSnapshotId(diff.getSnapshotId())
          .setFileSize(diff.getFileSize());
      if(diff.getBlocks() != null) {
        for(Block block : diff.getBlocks()) {
          fb.addBlocks(PBHelper.convert(block));
        }
      }
      INodeFileAttributes copy = diff.snapshotINode;
      if (copy != null) {
        fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
            .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
      }
      fb.build().writeDelimitedTo(out);
    }
  }
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  INodeFile fileNode = cluster.getNameNode(0).getNamesystem()
      .getFSDirectory().getINode4Write(fileName).asFile();
  boolean fileIsUC = fileNode.isUnderConstruction();
  for (int i = 0; i < CHECKTIMES && !fileIsUC; i++) {
    Thread.sleep(1000);
    fileNode = cluster.getNameNode(0).getNamesystem().getFSDirectory()
        .getINode4Write(fileName).asFile();
    fileIsUC = fileNode.isUnderConstruction();
  }
  return fileIsUC;
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory()
      .getINode4Write(file).asFile();
  BlockInfoContiguousUnderConstruction blkUC =
      (BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1];
  int datanodeNum = blkUC.getExpectedStorageLocations().length;
  for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) {
    Thread.sleep(1000);
    datanodeNum = blkUC.getExpectedStorageLocations().length;
  }
  return datanodeNum == 2;
}
项目:hadoop    文件:TestSnapshotDeletion.java   
/**
 * Delete a snapshot that is taken before a directory deletion (recursively),
 * directory diff list should be combined correctly.
 */
@Test (timeout=60000)
public void testDeleteSnapshot2() throws Exception {
  final Path root = new Path("/");

  Path dir = new Path("/dir1");
  Path file1 = new Path(dir, "file1");
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);

  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  Path file2 = new Path(dir, "file2");
  DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
  INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
  long file2NodeId = file2Node.getId();

  hdfs.createSnapshot(root, "s2");

  // delete directory recursively
  assertTrue(hdfs.delete(dir, true));
  assertNotNull(fsdir.getInode(file2NodeId));

  // delete second snapshot
  hdfs.deleteSnapshot(root, "s2");
  assertTrue(fsdir.getInode(file2NodeId) == null);

  NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
  NameNodeAdapter.saveNamespace(cluster.getNameNode());

  // restart NN
  cluster.restartNameNodes();
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
/**
 * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
 */
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(bar, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
      bar.getName());
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
/**
 * Make sure we delete 0-sized block when deleting an under-construction file
 */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(subDir, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
/**
 * 1. rename under-construction file with 0-sized blocks after snapshot.
 * 2. delete the renamed directory.
 * make sure we delete the 0-sized block.
 * see HDFS-5476.
 */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  // rename bar
  final Path bar2 = new Path(subDir, "bar2");
  hdfs.rename(bar, bar2);

  INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
  blks = bar2Node.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  // delete subDir
  hdfs.delete(subDir, true);

  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
/**
 * Make sure that a delete of a non-zero-length file which results in a
 * zero-length file in a snapshot works.
 */
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final byte[] testData = "foo bar baz".getBytes();

  // Create a zero-length file.
  DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
  assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);

  // Create a snapshot that includes that file.
  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");

  // Extend that file.
  FSDataOutputStream out = hdfs.append(bar);
  out.write(testData);
  out.close();
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(testData.length, blks[0].getNumBytes());

  // Delete the file.
  hdfs.delete(bar, true);

  // Now make sure that the NN can still save an fsimage successfully.
  cluster.getNameNode().getRpcServer().setSafeMode(
      SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNode().getRpcServer().saveNamespace();
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * Rename a single file across snapshottable dirs.
 */
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);

  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");

  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);

  // change the replication factor of foo
  hdfs.setReplication(newfoo, REPL_1);

  // /dir2/.snapshot/s2/foo should still work
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo");
  assertTrue(hdfs.exists(foo_s2));
  FileStatus status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());

  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
      "foo");
  assertFalse(hdfs.exists(foo_s3));
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
项目:hadoop    文件:TestRenameWithSnapshots.java   
/**
 * This test demonstrates that 
 * {@link INodeDirectory#removeChild}
 * and 
 * {@link INodeDirectory#addChild}
 * should use {@link INode#isInLatestSnapshot} to check if the
 * added/removed child should be recorded in snapshots.
 */
@Test
public void testRenameDirAndDeleteSnapshot_5() throws Exception {
  final Path dir1 = new Path("/dir1");
  final Path dir2 = new Path("/dir2");
  final Path dir3 = new Path("/dir3");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  hdfs.mkdirs(dir3);

  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  final Path bar = new Path(foo, "bar");
  // create file bar, and foo will become an INodeDirectory with snapshot
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  // delete snapshot s1. now foo is not in any snapshot
  hdfs.deleteSnapshot(dir1, "s1");

  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  // rename /dir1/foo to /dir2/foo
  final Path foo2 = new Path(dir2, foo.getName());
  hdfs.rename(foo, foo2);
  // rename /dir2/foo/bar to /dir3/foo/bar
  final Path bar2 = new Path(dir2, "foo/bar");
  final Path bar3 = new Path(dir3, "bar");
  hdfs.rename(bar2, bar3);

  // delete /dir2/foo. Since it is not in any snapshot, we will call its 
  // destroy function. If we do not use isInLatestSnapshot in removeChild and
  // addChild methods in INodeDirectory (with snapshot), the file bar will be 
  // stored in the deleted list of foo, and will be destroyed.
  hdfs.delete(foo2, true);

  // check if /dir3/bar still exists
  assertTrue(hdfs.exists(bar3));
  INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
  assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
}
项目:hadoop    文件:TestSnapshotReplication.java   
/**
 * Check the replication of a given file. We test both
 * {@link INodeFile#getFileReplication()} and
 * {@link INodeFile#getBlockReplication()}.
 *
 * @param file The given file
 * @param replication The expected replication number
 * @param blockReplication The expected replication number for the block
 * @throws Exception
 */
private void checkFileReplication(Path file, short replication,
    short blockReplication) throws Exception {
  // Get FileStatus of file1, and identify the replication number of file1.
  // Note that the replication number in FileStatus was derived from
  // INodeFile#getFileReplication().
  short fileReplication = hdfs.getFileStatus(file1).getReplication();
  assertEquals(replication, fileReplication);
  // Check the correctness of getBlockReplication()
  INode inode = fsdir.getINode(file1.toString());
  assertTrue(inode instanceof INodeFile);
  assertEquals(blockReplication, ((INodeFile) inode).getBlockReplication());
}
项目:hadoop    文件:TestSnapshotReplication.java   
/**
 * Test replication for a file with snapshots, also including the scenario
 * where the original file is deleted
 */
@Test (timeout=60000)
public void testReplicationAfterDeletion() throws Exception {
  // Create file1, set its replication to 3
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  Map<Path, Short> snapshotRepMap = new HashMap<Path, Short>();
  // Take 3 snapshots of sub1
  for (int i = 1; i <= 3; i++) {
    Path root = SnapshotTestHelper.createSnapshot(hdfs, sub1, "s" + i);
    Path ssFile = new Path(root, file1.getName());
    snapshotRepMap.put(ssFile, REPLICATION);
  }
  // Check replication
  checkFileReplication(file1, REPLICATION, REPLICATION);
  checkSnapshotFileReplication(file1, snapshotRepMap, REPLICATION);

  // Delete file1
  hdfs.delete(file1, true);
  // Check replication of snapshots
  for (Path ss : snapshotRepMap.keySet()) {
    final INodeFile ssInode = getINodeFile(ss);
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(REPLICATION, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication());
  }
}
项目:aliyun-oss-hadoop-fs    文件:FileDiffList.java   
public void saveSelf2Snapshot(int latestSnapshotId, INodeFile iNodeFile,
    INodeFileAttributes snapshotCopy, boolean withBlocks) {
  final FileDiff diff =
      super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy);
  if (withBlocks) {  // Store blocks if this is the first update
    BlockInfo[] blks = iNodeFile.getBlocks();
    assert blks != null;
    diff.setBlocks(blks);
  }
}