Java 类org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo 实例源码

项目:aliyun-oss-hadoop-fs    文件:FSDirAppendOp.java   
/** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn,
    INodeFile file) {
  final QuotaCounts delta = new QuotaCounts.Builder().build();
  final BlockInfo lastBlock = file.getLastBlock();
  if (lastBlock != null) {
    final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
    final short repl = lastBlock.getReplication();
    delta.addStorageSpace(diff * repl);
    final BlockStoragePolicy policy = fsn.getFSDirectory()
        .getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
    List<StorageType> types = policy.chooseStorageTypes(repl);
    for (StorageType t : types) {
      if (t.supportTypeQuota()) {
        delta.addTypeSpace(t, diff);
      }
    }
  }
  return delta;
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/**
 * Remove a block from the block list. This block should be
 * the last one on the list.
 */
BlockInfo removeLastBlock(Block oldblock) {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");
  if (blocks == null || blocks.length == 0) {
    return null;
  }
  int size_1 = blocks.length - 1;
  if (!blocks[size_1].equals(oldblock)) {
    return null;
  }

  BlockInfo ucBlock = blocks[size_1];
  //copy to a new list
  BlockInfo[] newlist = new BlockInfo[size_1];
  System.arraycopy(blocks, 0, newlist, 0, size_1);
  setBlocks(newlist);
  return ucBlock;
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/** @return blocks of the file corresponding to the snapshot. */
public BlockInfo[] getBlocks(int snapshot) {
  if (snapshot == CURRENT_STATE_ID || getDiffs() == null) {
    return getBlocks();
  }
  // find blocks stored in snapshot diffs (for truncate)
  FileDiff diff = getDiffs().getDiffById(snapshot);
  // note that currently FileDiff can only store contiguous blocks
  BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
  if (snapshotBlocks != null) {
    return snapshotBlocks;
  }
  // Blocks are not in the current snapshot
  // Find next snapshot with blocks present or return current file blocks
  snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
  return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/**
 * Compute file size of the current file.
 * 
 * @param includesLastUcBlock
 *          If the last block is under construction, should it be included?
 * @param usePreferredBlockSize4LastUcBlock
 *          If the last block is under construction, should we use actual
 *          block size or preferred block size?
 *          Note that usePreferredBlockSize4LastUcBlock is ignored
 *          if includesLastUcBlock == false.
 * @return file size
 */
public final long computeFileSize(boolean includesLastUcBlock,
    boolean usePreferredBlockSize4LastUcBlock) {
  if (blocks == null || blocks.length == 0) {
    return 0;
  }
  final int last = blocks.length - 1;
  //check if the last block is BlockInfoUnderConstruction
  BlockInfo lastBlk = blocks[last];
  long size = lastBlk.getNumBytes();
  if (!lastBlk.isComplete()) {
     if (!includesLastUcBlock) {
       size = 0;
     } else if (usePreferredBlockSize4LastUcBlock) {
       size = isStriped()?
           getPreferredBlockSize() *
               ((BlockInfoStriped)lastBlk).getDataBlockNum() :
           getPreferredBlockSize();
     }
  }
  //sum other blocks
  for (int i = 0; i < last; i++) {
    size += blocks[i].getNumBytes();
  }
  return size;
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/**
 * This function is only called when block list is stored in snapshot
 * diffs. Note that this can only happen when truncation happens with
 * snapshots. Since we do not support truncation with striped blocks,
 * we only need to handle contiguous blocks here.
 */
public void collectBlocksBeyondSnapshot(BlockInfo[] snapshotBlocks,
                                        BlocksMapUpdateInfo collectedBlocks) {
  Preconditions.checkState(!isStriped());
  BlockInfo[] oldBlocks = getBlocks();
  if(snapshotBlocks == null || oldBlocks == null)
    return;
  // Skip blocks in common between the file and the snapshot
  int n = 0;
  while(n < oldBlocks.length && n < snapshotBlocks.length &&
        oldBlocks[n] == snapshotBlocks[n]) {
    n++;
  }
  truncateBlocksTo(n);
  // Collect the remaining blocks of the file
  while(n < oldBlocks.length) {
    collectedBlocks.addDeleteBlock(oldBlocks[n++]);
  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
                           BlocksMapUpdateInfo collectedBlocks) {
  if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
    return;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null)
    return;
  BlockInfo[] snapshotBlocks =
      getDiffs().findEarlierSnapshotBlocks(snapshotId);
  if(snapshotBlocks == null)
    return;
  List<BlockInfo> toDelete = collectedBlocks.getToDeleteList();
  for(BlockInfo blk : snapshotBlocks) {
    if(toDelete.contains(blk))
      collectedBlocks.removeDeleteBlock(blk);
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static boolean setReplication(
    FSDirectory fsd, BlockManager bm, String src, final short replication)
    throws IOException {
  bm.verifyReplication(src, replication, null);
  final boolean isFile;
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  fsd.writeLock();
  try {
    src = fsd.resolvePath(pc, src, pathComponents);
    final INodesInPath iip = fsd.getINodesInPath4Write(src);
    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    final BlockInfo[] blocks = unprotectedSetReplication(fsd, src,
                                                         replication);
    isFile = blocks != null;
    if (isFile) {
      fsd.getEditLog().logSetReplication(src, replication);
    }
  } finally {
    fsd.writeUnlock();
  }
  return isFile;
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatPBINode.java   
private void save(OutputStream out, INodeFile n) throws IOException {
  INodeSection.INodeFile.Builder b = buildINodeFile(n,
      parent.getSaverContext());
  BlockInfo[] blocks = n.getBlocks();

  if (blocks != null) {
    for (Block block : n.getBlocks()) {
      b.addBlocks(PBHelperClient.convert(block));
    }
  }

  FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
  if (uc != null) {
    INodeSection.FileUnderConstructionFeature f =
        INodeSection.FileUnderConstructionFeature
        .newBuilder().setClientName(uc.getClientName())
        .setClientMachine(uc.getClientMachine()).build();
    b.setFileUC(f);
  }

  INodeSection.INode r = buildINodeCommon(n)
      .setType(INodeSection.INode.Type.FILE).setFile(b).build();
  r.writeDelimitedTo(out);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirWriteFileOp.java   
static boolean unprotectedRemoveBlock(
    FSDirectory fsd, String path, INodesInPath iip, INodeFile fileNode,
    Block block) throws IOException {
  // modify file-> block and blocksMap
  // fileNode should be under construction
  BlockInfo uc = fileNode.removeLastBlock(block);
  if (uc == null) {
    return false;
  }
  fsd.getBlockManager().removeBlockFromMap(uc);

  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "
        +path+" with "+block
        +" block is removed from the file system");
  }

  // update space consumed
  fsd.updateCount(iip, 0, -fileNode.getPreferredBlockSize(),
      fileNode.getPreferredBlockReplication(), true);
  return true;
}
项目:aliyun-oss-hadoop-fs    文件:FileDiffList.java   
public BlockInfo[] findEarlierSnapshotBlocks(int snapshotId) {
  assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    return null;
  }
  List<FileDiff> diffs = this.asList();
  int i = Collections.binarySearch(diffs, snapshotId);
  BlockInfo[] blocks = null;
  for(i = i >= 0 ? i : -i-2; i >= 0; i--) {
    blocks = diffs.get(i).getBlocks();
    if(blocks != null) {
      break;
    }
  }
  return blocks;
}
项目:aliyun-oss-hadoop-fs    文件:FileDiffList.java   
public BlockInfo[] findLaterSnapshotBlocks(int snapshotId) {
  assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id";
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    return null;
  }
  List<FileDiff> diffs = this.asList();
  int i = Collections.binarySearch(diffs, snapshotId);
  BlockInfo[] blocks = null;
  for (i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) {
    blocks = diffs.get(i).getBlocks();
    if (blocks != null) {
      break;
    }
  }
  return blocks;
}
项目:aliyun-oss-hadoop-fs    文件:FileWithSnapshotFeature.java   
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(
    INode.ReclaimContext reclaimContext, final INodeFile file) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.clearFile(reclaimContext);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else {
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks,
                                     reclaimContext.collectedBlocks());
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock, !storedBlock.isComplete());
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = storedBlock
      .getUnderConstructionFeature().getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:aliyun-oss-hadoop-fs    文件:TestCommitBlockSynchronization.java   
@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
  INodeFile file = mockFileUnderConstruction();
  Block block = new Block(blockId, length, genStamp);
  FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
  DatanodeID[] newTargets = new DatanodeID[0];

  ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(
        lastBlock, genStamp, length, true,
        false, newTargets, null);

  // Repeat the call to make sure it returns true
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);

  BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
  completedBlockInfo.setBlockCollectionId(file.getId());
  completedBlockInfo.setGenerationStamp(genStamp);
  doReturn(completedBlockInfo).when(namesystemSpy)
      .getStoredBlock(any(Block.class));
  doReturn(completedBlockInfo).when(file).getLastBlock();

  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, true, false, newTargets, null);
}
项目:aliyun-oss-hadoop-fs    文件:TestTruncateQuotaUpdate.java   
private INodeFile createMockFile(long size, short replication) {
  ArrayList<BlockInfo> blocks = new ArrayList<>();
  long createdSize = 0;
  while (createdSize < size) {
    long blockSize = Math.min(BLOCKSIZE, size - createdSize);
    BlockInfo bi = newBlock(blockSize, replication);
    blocks.add(bi);
    createdSize += BLOCKSIZE;
  }
  PermissionStatus perm = new PermissionStatus("foo", "bar", FsPermission
      .createImmutable((short) 0x1ff));
  return new INodeFile(
      ++nextMockINodeId, new byte[0], perm, 0, 0,
      blocks.toArray(new BlockInfo[blocks.size()]), replication,
      BLOCKSIZE);
}
项目:aliyun-oss-hadoop-fs    文件:TestINodeFile.java   
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfo newblock = new BlockInfoContiguous(replication);
    iNodes[i].addBlock(newblock);
  }

  return iNodes;
}
项目:aliyun-oss-hadoop-fs    文件:TestGetBlockLocations.java   
private static FSNamesystem setupFileSystem() throws IOException {
  Configuration conf = new Configuration();
  conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
  FSEditLog editlog = mock(FSEditLog.class);
  FSImage image = mock(FSImage.class);
  when(image.getEditLog()).thenReturn(editlog);
  final FSNamesystem fsn = new FSNamesystem(conf, image, true);

  final FSDirectory fsd = fsn.getFSDirectory();
  INodesInPath iip = fsd.getINodesInPath("/", true);
  PermissionStatus perm = new PermissionStatus(
      "hdfs", "supergroup",
      FsPermission.createImmutable((short) 0x1ff));
  final INodeFile file = new INodeFile(
      MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8),
      perm, 1, 1, new BlockInfo[] {}, (short) 1,
      DFS_BLOCK_SIZE_DEFAULT);
  fsn.getFSDirectory().addINode(iip, file);
  return fsn;
}
项目:aliyun-oss-hadoop-fs    文件:TestEditLog.java   
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeFile.java   
@Override // BlockCollection, the file should be under construction
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
    DatanodeStorageInfo[] locations) throws IOException {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");

  if (numBlocks() == 0) {
    throw new IOException("Failed to set last block: File is empty.");
  }
  BlockInfoUnderConstruction ucBlock =
    lastBlock.convertToBlockUnderConstruction(
        BlockUCState.UNDER_CONSTRUCTION, locations);
  ucBlock.setBlockCollection(this);
  setBlock(numBlocks() - 1, ucBlock);
  return ucBlock;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfo blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  clear();
  removedINodes.add(this);

  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    sf.clearDiffs();
  }
}
项目:FlexMap    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestINodeFile.java   
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize, (byte)0);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfo newblock = new BlockInfo(replication);
    iNodes[i].addBlock(newblock);
  }

  return iNodes;
}
项目:hops    文件:INodeFile.java   
/**
 * @return the blocks of the file.
 */
@Override
public BlockInfo[] getBlocks()
    throws StorageException, TransactionContextException {

  if(isFileStoredInDB()){
    FSNamesystem.LOG.debug("Stuffed Inode:  getBlocks(). the file is stored in the database. Returning empty list of blocks");
    return BlockInfo.EMPTY_ARRAY;
  }

  List<BlockInfo> blocks = getBlocksOrderedByIndex();
  if(blocks == null){
    return BlockInfo.EMPTY_ARRAY;
  }

  BlockInfo[] blks = new BlockInfo[blocks.size()];
  return blocks.toArray(blks);
}
项目:hops    文件:BlockInfoContext.java   
private List<BlockInfo> syncBlockInfoInstances(List<BlockInfo> newBlocks,
    boolean syncInodeBlocks) {
  List<BlockInfo> finalList = new ArrayList<>();

  for (BlockInfo blockInfo : newBlocks) {
    if (isRemoved(blockInfo.getBlockId())) {
      continue;
    }

    gotFromDB(blockInfo);
    finalList.add(blockInfo);

    if (syncInodeBlocks) {
      List<BlockInfo> blockList = inodeBlocks.get(blockInfo.getInodeId());
      if (blockList == null) {
        blockList = new ArrayList<>();
        inodeBlocks.put(blockInfo.getInodeId(), blockList);
      }
      blockList.add(blockInfo);
    }
  }

  return finalList;
}
项目:FlexMap    文件:TestINodeFile.java   
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize, (byte)0);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfo newblock = new BlockInfo(replication);
    iNodes[i].addBlock(newblock);
  }

  return iNodes;
}
项目:FlexMap    文件:TestEditLog.java   
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestEditLog.java   
@Override
public void run() {
  PermissionStatus p = namesystem.createFsOwnerPermissions(
                                      new FsPermission((short)0777));
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null,
        p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0);
    inode.toUnderConstruction("", "");

    editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
    editLog.logCloseFile("/filename" + (startIndex + i), inode);
    editLog.logSync();
  }
}
项目:hadoop-plus    文件:INodeFileUnderConstruction.java   
INodeFileUnderConstruction(long id,
                           byte[] name,
                           short blockReplication,
                           long modificationTime,
                           long preferredBlockSize,
                           BlockInfo[] blocks,
                           PermissionStatus perm,
                           String clientName,
                           String clientMachine,
                           DatanodeDescriptor clientNode) {
  super(id, name, perm, modificationTime, modificationTime,
      blocks, blockReplication, preferredBlockSize);
  this.clientName = clientName;
  this.clientMachine = clientMachine;
  this.clientNode = clientNode;
}
项目:hadoop-plus    文件:FSDirectory.java   
/** Replace an INodeFile and record modification for the latest snapshot. */
void unprotectedReplaceINodeFile(final String path, final INodeFile oldnode,
    final INodeFile newnode) {
  Preconditions.checkState(hasWriteLock());

  oldnode.getParent().replaceChild(oldnode, newnode, inodeMap);
  oldnode.clear();

  /* Currently oldnode and newnode are assumed to contain the same
   * blocks. Otherwise, blocks need to be removed from the blocksMap.
   */
  int index = 0;
  for (BlockInfo b : newnode.getBlocks()) {
    BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
    newnode.setBlock(index, info); // inode refers to the block in BlocksMap
    index++;
  }
}
项目:hops    文件:INodeUtil.java   
public static INodeIdentifier resolveINodeFromBlock(final Block b)
    throws StorageException {
  if (b instanceof BlockInfo || b instanceof BlockInfoUnderConstruction) {
    INodeIdentifier inodeIden =
        new INodeIdentifier(((BlockInfo) b).getInodeId());
    INodeDALAdaptor ida = (INodeDALAdaptor) HdfsStorageFactory
        .getDataAccess(INodeDataAccess.class);
    INode inode = ida.findInodeByIdFTIS(((BlockInfo) b).getInodeId());
    if (inode != null) {
      inodeIden.setName(inode.getLocalName());
      inodeIden.setPid(inode.getParentId());
      inodeIden.setPartitionId(inode.getPartitionId());
    }
    return inodeIden;
  } else {
    return resolveINodeFromBlockID(b.getBlockId());
  }
}
项目:hadoop-plus    文件:INodeFile.java   
/**
 * append array of blocks to this.blocks
 */
void concatBlocks(INodeFile[] inodes) {
  int size = this.blocks.length;
  int totalAddedBlocks = 0;
  for(INodeFile f : inodes) {
    totalAddedBlocks += f.blocks.length;
  }

  BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
  System.arraycopy(this.blocks, 0, newlist, 0, size);

  for(INodeFile in: inodes) {
    System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
    size += in.blocks.length;
  }

  setBlocks(newlist);
  updateBlockCollection();
}
项目:hadoop-plus    文件:INodeFile.java   
@Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (blocks != null && collectedBlocks != null) {
    for (BlockInfo blk : blocks) {
      collectedBlocks.addDeleteBlock(blk);
      blk.setBlockCollection(null);
    }
  }
  setBlocks(null);
  clear();
  removedINodes.add(this);

  if (this instanceof FileWithSnapshot) {
    ((FileWithSnapshot) this).getDiffs().clear();
  }
}
项目:hops    文件:BlockInfoContext.java   
private void updateInodeBlocks(BlockInfo newBlock) {
  if(newBlock == null)
    return;

  List<BlockInfo> blockList = inodeBlocks.get(newBlock.getInodeId());

  if (blockList != null) {
    int idx = blockList.indexOf(newBlock);
    if (idx != -1) {
      blockList.set(idx, newBlock);
    } else {
      blockList.add(newBlock);
    }
  } else {
    List<BlockInfo> list =
        new ArrayList<>(DEFAULT_NUM_BLOCKS_PER_INODE);
    list.add(newBlock);
    inodeBlocks.put(newBlock.getInodeId(), list);
  }
}
项目:hadoop-plus    文件:TestCommitBlockSynchronization.java   
private FSNamesystem makeNameSystemSpy(Block block,
                                       INodeFileUnderConstruction file)
    throws IOException {
  Configuration conf = new Configuration();
  FSImage image = new FSImage(conf);
  DatanodeDescriptor[] targets = new DatanodeDescriptor[0];

  FSNamesystem namesystem = new FSNamesystem(conf, image);
  FSNamesystem namesystemSpy = spy(namesystem);
  BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
      block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
  blockInfo.setBlockCollection(file);
  blockInfo.setGenerationStamp(genStamp);
  blockInfo.initializeBlockRecovery(genStamp);
  doReturn(true).when(file).removeLastBlock(any(Block.class));

  doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
  doReturn("").when(namesystemSpy).closeFileCommitBlocks(
      any(INodeFileUnderConstruction.class),
      any(BlockInfo.class));
  doReturn("").when(namesystemSpy).persistBlocks(
      any(INodeFileUnderConstruction.class), anyBoolean());
  doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();

  return namesystemSpy;
}
项目:hadoop-plus    文件:TestPipelinesFailover.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  DatanodeDescriptor[] datanodes = ucBlock.getExpectedLocations();
  DatanodeDescriptor expectedPrimary = datanodes[0];
  long mostRecentLastUpdate = expectedPrimary.getLastUpdate();
  for (int i = 1; i < datanodes.length; i++) {
    if (datanodes[i].getLastUpdate() > mostRecentLastUpdate) {
      expectedPrimary = datanodes[i];
      mostRecentLastUpdate = expectedPrimary.getLastUpdate();
    }
  }
  return expectedPrimary;
}
项目:hadoop-plus    文件:TestINodeFile.java   
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfo newblock = new BlockInfo(replication);
    iNodes[i].addBlock(newblock);
  }

  return iNodes;
}
项目:hops    文件:BlockInfoContext.java   
private void deleteBlocksForConcat(INodeCandidatePrimaryKey trg_param,
    List<INodeCandidatePrimaryKey> deleteINodes, List<BlockInfo> oldBlks /* blks with old pk*/)
    throws TransactionContextException {

  if (!getRemoved()
      .isEmpty()) {//in case of concat new block_infos rows are added by
    // the concat fn
    throw new IllegalStateException(
        "Concat file(s) whose blocks are changed. During rename and move no block blocks should have been changed.");
  }

  for (BlockInfo bInfo : oldBlks) {
    INodeCandidatePrimaryKey pk =
        new INodeCandidatePrimaryKey(bInfo.getInodeId());
    if (deleteINodes.contains(pk)) {
      //remove the block
      concatRemovedBlks.add(bInfo);
      if(isLogDebugEnabled()) {
        log("snapshot-maintenance-removed-blockinfo", "bid", bInfo.getBlockId(),
                "inodeId", bInfo.getInodeId());
      }
    }
  }
}
项目:hops    文件:FSDirectory.java   
INode unprotectedAddFile(String path, PermissionStatus permissions,
    short replication, long modificationTime, long atime,
    long preferredBlockSize, boolean underConstruction, String clientName,
    String clientMachine) throws IOException {
  INode newNode;
  if (underConstruction) {
    newNode = new INodeFileUnderConstruction(permissions, replication,
        preferredBlockSize, modificationTime, clientName, clientMachine,
        null);
  } else {
    newNode = new INodeFile(permissions, BlockInfo.EMPTY_ARRAY, replication,
        modificationTime, atime, preferredBlockSize);
  }

  try {
    newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE);
  } catch (IOException e) {
    if (NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug(
          "DIR* FSDirectory.unprotectedAddFile: exception when add " + path +
              " to the file system", e);
    }
    return null;
  }
  return newNode;
}
项目:hops    文件:INodeAttributesContext.java   
@Override
public void snapshotMaintenance(TransactionContextMaintenanceCmds cmds,
    Object... params) throws TransactionContextException {
  HdfsTransactionContextMaintenanceCmds hopCmds =
      (HdfsTransactionContextMaintenanceCmds) cmds;
  switch (hopCmds) {
    case INodePKChanged:
      // need to update the rows with updated inodeId or partKey
      INode inodeBeforeChange = (INode) params[0];
      INode inodeAfterChange = (INode) params[1];
      break;
    case Concat:
      INodeCandidatePrimaryKey trg_param =
          (INodeCandidatePrimaryKey) params[0];
      List<INodeCandidatePrimaryKey> srcs_param =
          (List<INodeCandidatePrimaryKey>) params[1];
      List<BlockInfo> oldBlks = (List<BlockInfo>) params[2];
      updateAttributes(trg_param, srcs_param);
      break;
  }
}
项目:FlexMap    文件:INodeFile.java   
/**
 * Remove a block from the block list. This block should be
 * the last one on the list.
 */
boolean removeLastBlock(Block oldblock) {
  Preconditions.checkState(isUnderConstruction(),
      "file is no longer under construction");
  if (blocks == null || blocks.length == 0) {
    return false;
  }
  int size_1 = blocks.length - 1;
  if (!blocks[size_1].equals(oldblock)) {
    return false;
  }

  //copy to a new list
  BlockInfo[] newlist = new BlockInfo[size_1];
  System.arraycopy(blocks, 0, newlist, 0, size_1);
  setBlocks(newlist);
  return true;
}