Java 类org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous 实例源码

项目:hadoop    文件:INodeFile.java   
@Override // BlockCollection, the file should be under construction
public BlockInfoContiguousUnderConstruction setLastBlock(
    BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations)
    throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoContiguousUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
项目:hadoop    文件:INodeFile.java   
/**
 * Remove a block from the block list. This block should be
 * the last one on the list.
 */
boolean removeLastBlock(Block oldblock) {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");
  if (blocks == null || blocks.length == 0) {
    return false;
  }
  int size_1 = blocks.length - 1;
  if (!blocks[size_1].equals(oldblock)) {
    return false;
  }

  //copy to a new list
  BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1];
  System.arraycopy(blocks, 0, newlist, 0, size_1);
  setBlocks(newlist);
  return true;
}
项目:hadoop    文件:INodeFile.java   
/**
 * append array of blocks to this.blocks
 */
void concatBlocks(INodeFile[] inodes) {
  int size = this.blocks.length;
  int totalAddedBlocks = 0;
  for(INodeFile f : inodes) {
    totalAddedBlocks += f.blocks.length;
  }

  BlockInfoContiguous[] newlist =
      new BlockInfoContiguous[size + totalAddedBlocks];
  System.arraycopy(this.blocks, 0, newlist, 0, size);

  for(INodeFile in: inodes) {
    System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
    size += in.blocks.length;
  }

  setBlocks(newlist);
  updateBlockCollection();
}
项目:hadoop    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfoContiguous blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(BlockInfoContiguous.EMPTY_ARRAY);
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
    sf.clearDiffs();
  }
}
项目:hadoop    文件:INodeFile.java   
public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks,
                                        BlocksMapUpdateInfo collectedBlocks) {
  BlockInfoContiguous[] oldBlocks = getBlocks();
  if(snapshotBlocks == null || oldBlocks == null)
    return;
  // Skip blocks in common between the file and the snapshot
  int n = 0;
  while(n < oldBlocks.length && n < snapshotBlocks.length &&
        oldBlocks[n] == snapshotBlocks[n]) {
    n++;
  }
  truncateBlocksTo(n);
  // Collect the remaining blocks of the file
  while(n < oldBlocks.length) {
    collectedBlocks.addDeleteBlock(oldBlocks[n++]);
  }
}
项目:hadoop    文件:INodeFile.java   
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
                           BlocksMapUpdateInfo collectedBlocks) {
  if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
    return;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null)
    return;
  BlockInfoContiguous[] snapshotBlocks =
      getDiffs().findEarlierSnapshotBlocks(snapshotId);
  if(snapshotBlocks == null)
    return;
  List<Block> toDelete = collectedBlocks.getToDeleteList();
  for(Block blk : snapshotBlocks) {
    if(toDelete.contains(blk))
      collectedBlocks.removeDeleteBlock(blk);
  }
}
项目:hadoop    文件:FSNamesystem.java   
/** Compute quota change for converting a complete block to a UC block */
private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
  final QuotaCounts delta = new QuotaCounts.Builder().build();
  final BlockInfoContiguous lastBlock = file.getLastBlock();
  if (lastBlock != null) {
    final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
    final short repl = file.getBlockReplication();
    delta.addStorageSpace(diff * repl);
    final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
        .getPolicy(file.getStoragePolicyID());
    List<StorageType> types = policy.chooseStorageTypes(repl);
    for (StorageType t : types) {
      if (t.supportTypeQuota()) {
        delta.addTypeSpace(t, diff);
      }
    }
  }
  return delta;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * Check that the indicated file's blocks are present and
 * replicated.  If not, return false. If checkall is true, then check
 * all blocks, otherwise check only penultimate block.
 */
boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
  if (checkall) {
    // check all blocks of the file.
    for (BlockInfoContiguous block: v.getBlocks()) {
      if (!isCompleteBlock(src, block, blockManager.minReplication)) {
        return false;
      }
    }
  } else {
    // check the penultimate block of this file
    BlockInfoContiguous b = v.getPenultimateBlock();
    if (b != null
        && !isCompleteBlock(src, b, blockManager.minReplication)) {
      return false;
    }
  }
  return true;
}
项目:hadoop    文件:FSNamesystem.java   
/**
 * @param pendingFile open file that needs to be closed
 * @param storedBlock last block
 * @return Path of the file that was closed.
 * @throws IOException on error
 */
@VisibleForTesting
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBlock)
    throws IOException {
  final INodesInPath iip = INodesInPath.fromINode(pendingFile);
  final String src = iip.getPath();

  // commit the last block and complete it if it has minimum replicas
  commitOrCompleteLastBlock(pendingFile, iip, storedBlock);

  //remove lease, close file
  finalizeINodeFileUnderConstruction(src, pendingFile,
      Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID));

  return src;
}
项目:hadoop    文件:FileDiffList.java   
public BlockInfoContiguous[] findEarlierSnapshotBlocks(int snapshotId) {
  assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
  if(snapshotId == Snapshot.CURRENT_STATE_ID) {
    return null;
  }
  List<FileDiff> diffs = this.asList();
  int i = Collections.binarySearch(diffs, snapshotId);
  BlockInfoContiguous[] blocks = null;
  for(i = i >= 0 ? i : -i-2; i >= 0; i--) {
    blocks = diffs.get(i).getBlocks();
    if(blocks != null) {
      break;
    }
  }
  return blocks;
}
项目:hadoop    文件:FileDiffList.java   
public BlockInfoContiguous[] findLaterSnapshotBlocks(int snapshotId) {
  assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
  if(snapshotId == Snapshot.CURRENT_STATE_ID) {
    return null;
  }
  List<FileDiff> diffs = this.asList();
  int i = Collections.binarySearch(diffs, snapshotId);
  BlockInfoContiguous[] blocks = null;
  for(i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) {
    blocks = diffs.get(i).getBlocks();
    if(blocks != null) {
      break;
    }
  }
  return blocks;
}
项目:hadoop    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
项目:hadoop    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop    文件:TestCommitBlockSynchronization.java   
@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
  INodeFile file = mockFileUnderConstruction();
  Block block = new Block(blockId, length, genStamp);
  FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
  DatanodeID[] newTargets = new DatanodeID[0];

  ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(
        lastBlock, genStamp, length, true,
        false, newTargets, null);

  // Repeat the call to make sure it returns true
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);

  BlockInfoContiguous completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
  completedBlockInfo.setBlockCollection(file);
  completedBlockInfo.setGenerationStamp(genStamp);
  doReturn(completedBlockInfo).when(namesystemSpy)
      .getStoredBlock(any(Block.class));
  doReturn(completedBlockInfo).when(file).getLastBlock();

  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);
}
项目:hadoop    文件:TestINodeFile.java   
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize, (byte)0);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfoContiguous newblock = new BlockInfoContiguous(replication);
    iNodes[i].addBlock(newblock);
  }

  return iNodes;
}
项目:hadoop    文件:TestGetBlockLocations.java   
private static FSNamesystem setupFileSystem() throws IOException {
  Configuration conf = new Configuration();
  conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
  FSEditLog editlog = mock(FSEditLog.class);
  FSImage image = mock(FSImage.class);
  when(image.getEditLog()).thenReturn(editlog);
  final FSNamesystem fsn = new FSNamesystem(conf, image, true);

  final FSDirectory fsd = fsn.getFSDirectory();
  INodesInPath iip = fsd.getINodesInPath("/", true);
  PermissionStatus perm = new PermissionStatus(
      "hdfs", "supergroup",
      FsPermission.createImmutable((short) 0x1ff));
  final INodeFile file = new INodeFile(
      MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
      perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1,
      DFS_BLOCK_SIZE_DEFAULT);
  fsn.getFSDirectory().addINode(iip, file);
  return fsn;
}
项目:hadoop    文件:TestEditLog.java   
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestCommitBlockSynchronization.java   
@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
  INodeFile file = mockFileUnderConstruction();
  Block block = new Block(blockId, length, genStamp);
  FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
  DatanodeID[] newTargets = new DatanodeID[0];

  ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(
        lastBlock, genStamp, length, true,
        false, newTargets, null);

  // Repeat the call to make sure it returns true
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);

  BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
  completedBlockInfo.setBlockCollectionId(file.getId());
  completedBlockInfo.setGenerationStamp(genStamp);
  doReturn(completedBlockInfo).when(namesystemSpy)
      .getStoredBlock(any(Block.class));
  doReturn(completedBlockInfo).when(file).getLastBlock();

  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);
}
项目:aliyun-oss-hadoop-fs    文件:TestINodeFile.java   
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfo newblock = new BlockInfoContiguous(replication);
    iNodes[i].addBlock(newblock);
  }

  return iNodes;
}
项目:big-c    文件:INodeFile.java   
@Override // BlockCollection, the file should be under construction
public BlockInfoContiguousUnderConstruction setLastBlock(
    BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations)
    throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoContiguousUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
项目:big-c    文件:INodeFile.java   
/**
 * Remove a block from the block list. This block should be
 * the last one on the list.
 */
boolean removeLastBlock(Block oldblock) {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");
  if (blocks == null || blocks.length == 0) {
    return false;
  }
  int size_1 = blocks.length - 1;
  if (!blocks[size_1].equals(oldblock)) {
    return false;
  }

  //copy to a new list
  BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1];
  System.arraycopy(blocks, 0, newlist, 0, size_1);
  setBlocks(newlist);
  return true;
}
项目:big-c    文件:INodeFile.java   
/**
 * append array of blocks to this.blocks
 */
void concatBlocks(INodeFile[] inodes) {
  int size = this.blocks.length;
  int totalAddedBlocks = 0;
  for(INodeFile f : inodes) {
    totalAddedBlocks += f.blocks.length;
  }

  BlockInfoContiguous[] newlist =
      new BlockInfoContiguous[size + totalAddedBlocks];
  System.arraycopy(this.blocks, 0, newlist, 0, size);

  for(INodeFile in: inodes) {
    System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
    size += in.blocks.length;
  }

  setBlocks(newlist);
  updateBlockCollection();
}
项目:big-c    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfoContiguous blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
    sf.clearDiffs();
  }
}
项目:big-c    文件:INodeFile.java   
public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks,
                                        BlocksMapUpdateInfo collectedBlocks) {
  BlockInfoContiguous[] oldBlocks = getBlocks();
  if(snapshotBlocks == null || oldBlocks == null)
    return;
  // Skip blocks in common between the file and the snapshot
  int n = 0;
  while(n < oldBlocks.length && n < snapshotBlocks.length &&
        oldBlocks[n] == snapshotBlocks[n]) {
    n++;
  }
  truncateBlocksTo(n);
  // Collect the remaining blocks of the file
  while(n < oldBlocks.length) {
    collectedBlocks.addDeleteBlock(oldBlocks[n++]);
  }
}
项目:big-c    文件:INodeFile.java   
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
                           BlocksMapUpdateInfo collectedBlocks) {
  if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
    return;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null)
    return;
  BlockInfoContiguous[] snapshotBlocks =
      getDiffs().findEarlierSnapshotBlocks(snapshotId);
  if(snapshotBlocks == null)
    return;
  List<Block> toDelete = collectedBlocks.getToDeleteList();
  for(Block blk : snapshotBlocks) {
    if(toDelete.contains(blk))
      collectedBlocks.removeDeleteBlock(blk);
  }
}
项目:big-c    文件:FSNamesystem.java   
/** Compute quota change for converting a complete block to a UC block */
private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
  final QuotaCounts delta = new QuotaCounts.Builder().build();
  final BlockInfoContiguous lastBlock = file.getLastBlock();
  if (lastBlock != null) {
    final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
    final short repl = file.getBlockReplication();
    delta.addStorageSpace(diff * repl);
    final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
        .getPolicy(file.getStoragePolicyID());
    List<StorageType> types = policy.chooseStorageTypes(repl);
    for (StorageType t : types) {
      if (t.supportTypeQuota()) {
        delta.addTypeSpace(t, diff);
      }
    }
  }
  return delta;
}
项目:big-c    文件:FSNamesystem.java   
/**
 * Check that the indicated file's blocks are present and
 * replicated.  If not, return false. If checkall is true, then check
 * all blocks, otherwise check only penultimate block.
 */
boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
  if (checkall) {
    // check all blocks of the file.
    for (BlockInfoContiguous block: v.getBlocks()) {
      if (!isCompleteBlock(src, block, blockManager.minReplication)) {
        return false;
      }
    }
  } else {
    // check the penultimate block of this file
    BlockInfoContiguous b = v.getPenultimateBlock();
    if (b != null
        && !isCompleteBlock(src, b, blockManager.minReplication)) {
      return false;
    }
  }
  return true;
}
项目:big-c    文件:FSNamesystem.java   
/**
 * @param pendingFile open file that needs to be closed
 * @param storedBlock last block
 * @return Path of the file that was closed.
 * @throws IOException on error
 */
@VisibleForTesting
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBlock)
    throws IOException {
  final INodesInPath iip = INodesInPath.fromINode(pendingFile);
  final String src = iip.getPath();

  // commit the last block and complete it if it has minimum replicas
  commitOrCompleteLastBlock(pendingFile, iip, storedBlock);

  //remove lease, close file
  finalizeINodeFileUnderConstruction(src, pendingFile,
      Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID));

  return src;
}
项目:big-c    文件:FileDiffList.java   
public BlockInfoContiguous[] findEarlierSnapshotBlocks(int snapshotId) {
  assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
  if(snapshotId == Snapshot.CURRENT_STATE_ID) {
    return null;
  }
  List<FileDiff> diffs = this.asList();
  int i = Collections.binarySearch(diffs, snapshotId);
  BlockInfoContiguous[] blocks = null;
  for(i = i >= 0 ? i : -i-2; i >= 0; i--) {
    blocks = diffs.get(i).getBlocks();
    if(blocks != null) {
      break;
    }
  }
  return blocks;
}
项目:big-c    文件:FileDiffList.java   
public BlockInfoContiguous[] findLaterSnapshotBlocks(int snapshotId) {
  assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
  if(snapshotId == Snapshot.CURRENT_STATE_ID) {
    return null;
  }
  List<FileDiff> diffs = this.asList();
  int i = Collections.binarySearch(diffs, snapshotId);
  BlockInfoContiguous[] blocks = null;
  for(i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) {
    blocks = diffs.get(i).getBlocks();
    if(blocks != null) {
      break;
    }
  }
  return blocks;
}
项目:big-c    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
项目:big-c    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:big-c    文件:TestCommitBlockSynchronization.java   
@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
  INodeFile file = mockFileUnderConstruction();
  Block block = new Block(blockId, length, genStamp);
  FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
  DatanodeID[] newTargets = new DatanodeID[0];

  ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(
        lastBlock, genStamp, length, true,
        false, newTargets, null);

  // Repeat the call to make sure it returns true
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);

  BlockInfoContiguous completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
  completedBlockInfo.setBlockCollection(file);
  completedBlockInfo.setGenerationStamp(genStamp);
  doReturn(completedBlockInfo).when(namesystemSpy)
      .getStoredBlock(any(Block.class));
  doReturn(completedBlockInfo).when(file).getLastBlock();

  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);
}
项目:big-c    文件:TestINodeFile.java   
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize, (byte)0);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfoContiguous newblock = new BlockInfoContiguous(replication);
    iNodes[i].addBlock(newblock);
  }

  return iNodes;
}
项目:big-c    文件:TestGetBlockLocations.java   
private static FSNamesystem setupFileSystem() throws IOException {
  Configuration conf = new Configuration();
  conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
  FSEditLog editlog = mock(FSEditLog.class);
  FSImage image = mock(FSImage.class);
  when(image.getEditLog()).thenReturn(editlog);
  final FSNamesystem fsn = new FSNamesystem(conf, image, true);

  final FSDirectory fsd = fsn.getFSDirectory();
  INodesInPath iip = fsd.getINodesInPath("/", true);
  PermissionStatus perm = new PermissionStatus(
      "hdfs", "supergroup",
      FsPermission.createImmutable((short) 0x1ff));
  final INodeFile file = new INodeFile(
      MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
      perm, 1, 1, new BlockInfoContiguous[] {}, (short) 1,
      DFS_BLOCK_SIZE_DEFAULT);
  fsn.getFSDirectory().addINode(iip, file);
  return fsn;
}
项目:big-c    文件:TestEditLog.java   
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfoContiguous.EMPTY_ARRAY, replication, blockSize);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
项目:hadoop    文件:FSEditLogLoader.java   
/**
 * Add a new block into the given INodeFile
 */
private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
    throws IOException {
  BlockInfoContiguous[] oldBlocks = file.getBlocks();
  Block pBlock = op.getPenultimateBlock();
  Block newBlock= op.getLastBlock();

  if (pBlock != null) { // the penultimate block is not null
    Preconditions.checkState(oldBlocks != null && oldBlocks.length > 0);
    // compare pBlock with the last block of oldBlocks
    Block oldLastBlock = oldBlocks[oldBlocks.length - 1];
    if (oldLastBlock.getBlockId() != pBlock.getBlockId()
        || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) {
      throw new IOException(
          "Mismatched block IDs or generation stamps for the old last block of file "
              + op.getPath() + ", the old last block is " + oldLastBlock
              + ", and the block read from editlog is " + pBlock);
    }

    oldLastBlock.setNumBytes(pBlock.getNumBytes());
    if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) {
      fsNamesys.getBlockManager().forceCompleteBlock(file,
          (BlockInfoContiguousUnderConstruction) oldLastBlock);
      fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
    }
  } else { // the penultimate block is null
    Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
  }
  // add the new block
  BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction(
        newBlock, file.getBlockReplication());
  fsNamesys.getBlockManager().addBlockCollection(newBI, file);
  file.addBlock(newBI);
  fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
项目:hadoop    文件:FileUnderConstructionFeature.java   
/**
 * When deleting a file in the current fs directory, and the file is contained
 * in a snapshot, we should delete the last block if it's under construction
 * and its size is 0.
 */
void cleanZeroSizeBlock(final INodeFile f,
    final BlocksMapUpdateInfo collectedBlocks) {
  final BlockInfoContiguous[] blocks = f.getBlocks();
  if (blocks != null && blocks.length > 0
      && blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) {
    BlockInfoContiguousUnderConstruction lastUC =
        (BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1];
    if (lastUC.getNumBytes() == 0) {
      // this is a 0-sized block. do not need check its UC state here
      collectedBlocks.addDeleteBlock(lastUC);
      f.removeLastBlock(lastUC);
    }
  }
}
项目:hadoop    文件:FSDirectory.java   
private static INodeFile newINodeFile(long id, PermissionStatus permissions,
    long mtime, long atime, short replication, long preferredBlockSize,
    byte storagePolicyId) {
  return new INodeFile(id, null, permissions, mtime, atime,
      BlockInfoContiguous.EMPTY_ARRAY, replication, preferredBlockSize,
      storagePolicyId);
}
项目:hadoop    文件:FSDirectory.java   
/**
 * Add a block to the file. Returns a reference to the added block.
 */
BlockInfoContiguous addBlock(String path, INodesInPath inodesInPath,
    Block block, DatanodeStorageInfo[] targets) throws IOException {
  writeLock();
  try {
    final INodeFile fileINode = inodesInPath.getLastINode().asFile();
    Preconditions.checkState(fileINode.isUnderConstruction());

    // check quota limits and updated space consumed
    updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
        fileINode.getBlockReplication(), true);

    // associate new last block for the file
    BlockInfoContiguousUnderConstruction blockInfo =
      new BlockInfoContiguousUnderConstruction(
          block,
          fileINode.getFileReplication(),
          BlockUCState.UNDER_CONSTRUCTION,
          targets);
    getBlockManager().addBlockCollection(blockInfo, fileINode);
    fileINode.addBlock(blockInfo);

    if(NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: "
          + path + " with " + block
          + " block is added to the in-memory "
          + "file system");
    }
    return blockInfo;
  } finally {
    writeUnlock();
  }
}