Java 类org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo 实例源码

项目:hadoop-EAR    文件:TestINodeMap.java   
private INode generateINode(long inodeId) {
  return new INode(inodeId, new PermissionStatus("", "", new FsPermission((short) 0)), 0, 0) {
    @Override
    long[] computeContentSummary(long[] summary) {
      return null;
    }

    @Override
    DirCounts spaceConsumedInTree(DirCounts counts) {
      return null;
    }

    @Override
    public boolean isDirectory() {
      return false;
    }

    @Override
    int collectSubtreeBlocksAndClear(List<BlockInfo> v, 
                                     int blocksLimit, 
                                     List<INode> removedINodes) {
      return 0;
    }
  };
}
项目:hadoop-EAR    文件:WaitingRoom.java   
private void filterMapWithInode(INode node) {
  // Must NOT filter with files in WaitingRoom already!
  if (node.getFullPathName().startsWith(wrDir)) return;

  LOG.info("Filtering WaitingRoomMaps with inode " + node.getFullPathName());

  if (node.isDirectory()) {
    INodeDirectory dir = (INodeDirectory) node;
    for (INode child: dir.getChildren()) {
      filterMapWithInode(child);
    }
  } else {
    BlockInfo[] blocks = ((INodeFile)node).getBlocks();

    // Mark all blocks of this file as referenced
    for (BlockInfo block: blocks) {
      blockRefMap.remove(block.getBlockId());
    }
  }
}
项目:hadoop-EAR    文件:SnapshotNode.java   
private void getAllLocatedBlocks(INode inode,
    List<LocatedBlocksWithMetaInfo> blocks)
throws IOException {
  if (inode.isDirectory()) {
    INodeDirectory dir = (INodeDirectory) inode;
    for (INode child: dir.getChildren()) {
      getAllLocatedBlocks(child, blocks);
    }
  } else {
    INodeFile file = (INodeFile) inode;
    BlockInfo[] fileBlocks = file.getBlocks();
    List<LocatedBlock> lb = new ArrayList<LocatedBlock>();
    for (BlockInfo block: fileBlocks) {
      // DatanodeInfo is unavailable, so set as empty for now
      lb.add(new LocatedBlock(block, new DatanodeInfo[0]));
    }

    LocatedBlocks locatedBlocks =  new LocatedBlocks(
                           file.computeContentSummary().getLength(), // flength
                           lb, // blks
                           false); // isUnderConstruction

    // Update DatanodeInfo from NN
    blocks.add(namenode.updateDatanodeInfo(locatedBlocks));
  }
}
项目:hadoop-EAR    文件:RaidCodec.java   
public BlockInfo[] getParityBlocks(BlockInfo[] blocks) {
  int numBlocks = (blocks.length / numStripeBlocks) * numParityBlocks
      + ((blocks.length % numStripeBlocks == 0) ? 0 : numParityBlocks);
  BlockInfo[] parityBlocks = new BlockInfo[numBlocks];
  int pos = 0;
  int parityEnd = numParityBlocks;
  for (int i = 0; i < numBlocks; i++) {
    parityBlocks[i] = blocks[pos];
    pos++;
    if (pos == parityEnd) {
      pos += numDataBlocks;
      parityEnd += numStripeBlocks;
    }
  }
  return parityBlocks;
}
项目:hadoop-EAR    文件:RaidCodec.java   
/**
 * Count the number of live replicas of each parity block in the raided file
 * If any stripe has not enough parity block replicas, add the stripe to 
 *  raidEncodingTasks to schedule encoding.
 * If forceAdd is true, we always add the stripe to raidEncodingTasks 
 * without checking
 * @param sourceINode
 * @param raidTasks
 * @param fs
 * @param forceAdd
 * @return true if all parity blocks of the file have enough replicas
 * @throws IOException
 */
public boolean checkRaidProgress(INodeFile sourceINode, 
    LightWeightLinkedSet<RaidBlockInfo> raidEncodingTasks, FSNamesystem fs,
    boolean forceAdd) throws IOException {
  boolean result = true;
  BlockInfo[] blocks = sourceINode.getBlocks();
  for (int i = 0; i < blocks.length;
      i += numStripeBlocks) {
    boolean hasParity = true;
    if (!forceAdd) {
      for (int j = 0; j < numParityBlocks; j++) {
        if (fs.countLiveNodes(blocks[i + j]) < this.parityReplication) {
          hasParity = false;
          break;
        }
      }
    }
    if (!hasParity || forceAdd) {
      raidEncodingTasks.add(new RaidBlockInfo(blocks[i], parityReplication, i));
      result = false; 
    }
  }
  return result;
}
项目:hadoop-EAR    文件:INodeRegularStorage.java   
@Override
public void appendBlocks(INodeFile [] inodes, int totalAddedBlocks, INodeFile inode) {
  int size = this.blocks.length;

  BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
  System.arraycopy(this.blocks, 0, newlist, 0, size);

  for(INodeFile in: inodes) {
    BlockInfo[] blks = in.storage.getBlocks();
    System.arraycopy(blks, 0, newlist, size, blks.length);
    size += blks.length;
  }

  this.blocks = newlist;

  for(BlockInfo bi: this.blocks) {
    bi.setINode(inode);
  }
}
项目:hadoop-EAR    文件:DatanodeDescriptor.java   
/**
 * Remove block from the list and insert
 * into the head of the list of blocks
 * related to the specified DatanodeDescriptor.
 * If the head is null then form a new list.
 * @return current block as the new head of the list.
 */
protected BlockInfo listMoveToHead(BlockInfo block, BlockInfo head,
    DatanodeIndex indexes) {
  assert head != null : "Head can not be null";
  if (head == block) {
    return head;
  }
  BlockInfo next = block.getSetNext(indexes.currentIndex, head);
  BlockInfo prev = block.getSetPrevious(indexes.currentIndex, null);

  head.setPrevious(indexes.headIndex, block);
  indexes.headIndex = indexes.currentIndex;
  prev.setNext(prev.findDatanode(this), next);
  if (next != null)
    next.setPrevious(next.findDatanode(this), prev);
  return block;
}
项目:hadoop-EAR    文件:INodeRaidStorage.java   
@Override
public boolean isSourceBlock(BlockInfo block) {
  int index = 0;
  if (block instanceof RaidBlockInfo) {
    RaidBlockInfo rbi = (RaidBlockInfo)block;
    index = rbi.index; 
  } else {
    if (LOG.isDebugEnabled()) {
      LOG.debug("block: " + block + " is not raid block info");
    }
    for (index = 0; index < blocks.length; index++) {
      if (blocks[index].equals(block)) {
        break;
      }
    }
    if (index == blocks.length) {
      return false; 
    }
  }
  return index % codec.numStripeBlocks >= codec.numParityBlocks;
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * Update a block's priority queue in neededReplicaiton queues
 * 
 * @param blockInfo blockInfo
 * @param delta the change of number of replicas
 * @param numCurrentReplicas current number of replicas
 * @param numCurrentDecommissionedReplicas current number of decommissioed replicas
 * @param node the node where the replica resides
 * @param fileReplication expected number of replicas
 */
private void updateNeededReplicationQueue(BlockInfo blockInfo, int delta,
    int numCurrentReplicas, int numCurrentDecommissionedReplicas,
     DatanodeDescriptor node, short fileReplication) {
   int numOldReplicas = numCurrentReplicas;
   int numOldDecommissionedReplicas = numCurrentDecommissionedReplicas;
   if (node.isDecommissioned() || node.isDecommissionInProgress()) {
     numOldDecommissionedReplicas -= delta;
   } else {
     numOldReplicas -= delta;
   }   
   if (fileReplication > numOldReplicas) {
     neededReplications.remove(blockInfo, numOldReplicas,
         numOldDecommissionedReplicas, fileReplication);
   }
   if (fileReplication > numCurrentReplicas) {
     neededReplications.add(blockInfo, numCurrentReplicas,
         numCurrentDecommissionedReplicas, fileReplication); 
   }   
 }
项目:hadoop-EAR    文件:FSEditLogOp.java   
public void set(long inodeId,
    String path,
    short replication,
    long mtime,
    long atime,
    long blockSize,
    BlockInfo[] blocks,
    PermissionStatus permissions,
    String clientName,
    String clientMachine) {
  this.inodeId = inodeId;
  this.path = path;
  this.replication = replication;
  this.mtime = mtime;
  this.atime = atime;
  this.blockSize = blockSize;
  this.blocks = blocks;
  this.permissions = permissions;
  this.clientName = clientName;
  this.clientMachine = clientMachine;
}
项目:hadoop-EAR    文件:DatanodeDescriptor.java   
/**
 * Adds blocks already connected into list, to this descriptor's blocks.
 * The blocks in the input list already have this descriptor inserted to them.
 * Used for parallel initial block reports.
 */
void insertIntoList(BlockInfo head, int headIndex, BlockInfo tail, int tailIndex, int count) {
  if (head == null)
    return;

  // connect tail to now-head
  tail.setNext(tailIndex, blockList);
  if (blockList != null)
    blockList.setPrevious(blockList.findDatanode(this), tail);

  // create new head
  blockList = head;
  blockList.setPrevious(headIndex, null);

  // add new blocks to the count
  numOfBlocks += count;
}
项目:hadoop-EAR    文件:NameNode.java   
private LocatedBlockWithFileName getBlockInfoInternal(long blockId)
     throws IOException {
    Block block = new Block(blockId);
    BlockInfo blockInfo = namesystem.blocksMap.getBlockInfo(block);
    if (null == blockInfo) {
        return null;
    }

    INodeFile inode = blockInfo.getINode();
    if (null == inode) {
        return null;
    }

    String fileName = inode.getFullPathName();
    // get the location info
    List<DatanodeInfo> diList = new ArrayList<DatanodeInfo>();
    for (Iterator<DatanodeDescriptor> it
            = namesystem.blocksMap.nodeIterator(block); it.hasNext();) {
        diList.add(it.next());
    }
    return new LocatedBlockWithFileName(block,
            diList.toArray(new DatanodeInfo[] {}), fileName);
}
项目:hadoop-EAR    文件:FSNamesystem.java   
public ReplicationWork(BlockInfo block,
                        INodeFile fileINode,
                        int numOfReplicas,
                        DatanodeDescriptor srcNode,
                        List<DatanodeDescriptor> containingNodes,
                        int priority){
  this.block = block;
  this.blockSize = block.getNumBytes();
  this.fileINode = fileINode;
  this.numOfReplicas = numOfReplicas;
  this.srcNode = srcNode;
  this.containingNodes = containingNodes;
  this.priority = priority;

  this.targets = null;
}
项目:hadoop-EAR    文件:INodeFile.java   
int getBlockIndex(Block blk, String file) throws IOException {
  BlockInfo[] blocks = getBlocks();
  if (blocks == null) {
    throw new IOException("blocks is null for file " + file);
  }
  // null indicates that this block is currently added. Return size() 
  // as the index in this case
  if (blk == null) {
    return blocks.length;
  }
  for (int curBlk = 0; curBlk < blocks.length; curBlk++) {
    if (blocks[curBlk].equals(blk)) {
      return curBlk;
    }
  }
  throw new IOException("Cannot locate " + blk + " in file " + file);
}
项目:hadoop-EAR    文件:PendingReplicationBlocks.java   
/**
 * Iterate through all items and print them.
 */
void metaSave(PrintWriter out) {
  synchronized (pendingReplications) {
    out.println("Metasave: Blocks being replicated: " +
                pendingReplications.size());
    Iterator<Map.Entry<BlockInfo, PendingBlockInfo>> iter = 
        pendingReplications.entrySet().iterator();
    while (iter.hasNext()) {
      Map.Entry<BlockInfo, PendingBlockInfo> entry = iter.next();
      PendingBlockInfo pendingBlock = entry.getValue();
      BlockInfo block = entry.getKey();
      out.println(block + 
                  " StartTime: " + new Time(pendingBlock.timeStamp) +
                  " NumReplicaInProgress: " + 
                  pendingBlock.numReplicasInProgress);
    }
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * corrupts a file by:
 * 1. removing all targets of the last block
 */
void corruptFileForTesting(String src) throws IOException {
  INodeFile inode = dir.getFileINode(src);

  if (inode.isUnderConstruction()) {
    INodeFileUnderConstruction pendingFile =
      (INodeFileUnderConstruction) inode;
    BlockInfo[] blocks = pendingFile.getBlocks();
    if (blocks != null && blocks.length >= 1) {
      BlockInfo lastBlockInfo = blocks[blocks.length - 1];

      pendingFile.setLastBlock(
        lastBlockInfo,
        new DatanodeDescriptor[0]
      );
    }
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * If there were any replication requests that timed out, reap them
 * and put them back into the neededReplication queue
 */
void processPendingReplications() {
  BlockInfo[] timedOutItems = pendingReplications.getTimedOutBlocks();
  if (timedOutItems != null) {
    writeLock();
    try {
      for (int i = 0; i < timedOutItems.length; i++) {
        NumberReplicas num = countNodes(timedOutItems[i]);
        neededReplications.add(
          timedOutItems[i], 
          num.liveReplicas(),
          num.decommissionedReplicas(),
          getReplication(timedOutItems[i]));
      }
    } finally {
      writeUnlock();
    }
    /* If we know the target datanodes where the replication timedout,
     * we could invoke decBlocksScheduled() on it. Its ok for now.
     */
  }
}
项目:hadoop-EAR    文件:TestINodeFile.java   
INodeFile createINodeRaidFile(short replication, RaidCodec codec, 
    long blockSize, BlockInfo[] blocks) {
  return new INodeFile(INodeId.GRANDFATHER_INODE_ID, 
      new PermissionStatus(userName, null,
      FsPermission.getDefault()), blocks, replication,
      1L, 2L, preferredBlockSize, codec);
}
项目:hadoop-EAR    文件:TestINodeFile.java   
@Test
public void testEmptyINodeRaidStorage() throws IOException {
  INodeFile emptyFile = createINodeRaidFile(replication,
      RaidCodecBuilder.getRSCodec("rs", 4, 10, RaidCodec.FULL_BLOCK, 
          parityReplication, parityReplication), preferredBlockSize, null);
  BlockInfo fakeBlockInfo = new BlockInfo(new Block(0, 0, 0), 1);
  assertEquals(2L, emptyFile.accessTime);
  assertEquals(1L, emptyFile.modificationTime);
  assertEquals(replication, emptyFile.getReplication());
  assertEquals(StorageType.RAID_STORAGE, emptyFile.getStorageType());
  assertEquals(null, emptyFile.getLastBlock());
  assertFalse(emptyFile.isLastBlock(fakeBlockInfo));
  LOG.info("Test getBlockIndex");
  try {
    emptyFile.getBlockIndex(fakeBlockInfo, "");
  } catch (IOException ioe) {
    assertTrue(ioe.getMessage().startsWith("blocks is null for file "));     
  }

  LOG.info("Test computeContentSummary");
  long[] summary = new long[]{0L, 0L, 0L, 0L};
  emptyFile.computeContentSummary(summary);
  assertEquals(0, summary[0]);
  assertEquals(1, summary[1]);
  assertEquals(0, summary[3]);

  LOG.info("Test collectSubtreeBlocksAndClear");
  ArrayList<BlockInfo> removedBlocks = new ArrayList<BlockInfo>();
  ArrayList<INode> removedINodes = new ArrayList<INode>();
  assertEquals(1, emptyFile.collectSubtreeBlocksAndClear(removedBlocks,
      Integer.MAX_VALUE, removedINodes));
  assertEquals(null, emptyFile.getStorage());
  assertEquals(0, removedBlocks.size());
  assertEquals(1, removedINodes.size());
  assertEquals(emptyFile, removedINodes.get(0));
}
项目:hadoop-EAR    文件:UnderReplicatedBlocks.java   
synchronized void update(BlockInfo blockInfo, int curReplicas, 
                         int decommissionedReplicas,
                         int curExpectedReplicas,
                         int curReplicasDelta, int expectedReplicasDelta) {
  int oldReplicas = curReplicas-curReplicasDelta;
  int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta;
  int curPri = getPriority(blockInfo, curReplicas, decommissionedReplicas, curExpectedReplicas);
  int oldPri = getPriority(blockInfo, oldReplicas, decommissionedReplicas, oldExpectedReplicas);
  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("UnderReplicationBlocks.update " + 
                                  blockInfo +
                                  " curReplicas " + curReplicas +
                                  " curExpectedReplicas " + curExpectedReplicas +
                                  " oldReplicas " + oldReplicas +
                                  " oldExpectedReplicas  " + oldExpectedReplicas +
                                  " curPri  " + curPri +
                                  " oldPri  " + oldPri);
  }
  if(oldPri != LEVEL && oldPri != curPri) {
    remove(blockInfo, oldPri);
  }
  if(curPri != LEVEL && priorityQueues.get(curPri).add(blockInfo)) {
    if (NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug(
                                    "BLOCK* NameSystem.UnderReplicationBlock.update:"
                                    + blockInfo
                                    + " has only "+curReplicas
                                    + " replicas and need " + curExpectedReplicas
                                    + " replicas so is added to neededReplications"
                                    + " at priority level " + curPri);
    }
  }
}
项目:hadoop-EAR    文件:LogOpGenerator.java   
/**
 * Prefills the list of possible operations.
 */
private void init() {
  LOG.info("--- Generating " + numOpsPossible + " log operations! ---");
  Random rng = new Random();
  for (int i = 0; i < numFiles; i++) {
    PermissionStatus p = new PermissionStatus("hadoop",
        "hadoop", new FsPermission((short)0777));
    INodeFileUnderConstruction inode = new INodeFileUnderConstruction(inodeId.nextValue(),
        p, (short) 3, 64, 0, "", "", null);
    for (int b = 0; b < blocksPerFile; b++) {
      Block block = new Block(b);
      BlockInfo bi = new BlockInfo(block, 3);
      bi.setChecksum(rng.nextInt(Integer.MAX_VALUE) + 1);
      try {
        inode.storage.addBlock(bi);
      } catch (IOException ioe) {
        LOG.error("Cannot add block", ioe);
      }
    }
    FsPermission perm = new FsPermission((short) 0);
    String name = "/filename-" + i;
    possibleOps.addAll(Arrays.asList(newOpenFile(name, inode),
        newCloseFile(name, inode),
        newDelete(name, 0),
        newSetReplication(name, (short)3),
        newGenerationStamp(i),
        newMkDir(name, inode),
        newRename(name, name, i),
        newSetOwner(name, "hadoop", "hadop"),
        newSetQuota(name, 1, 1),
        newTimes(name, 0 , 0),
        newSetPermissions(name, perm),
        newConcat(name, new String[] { name, name, name}, i),
        newMerge(name, name, "xor", new int[]{1, 1, 1}, i)));
  }
  LOG.info("--- Created " + numOpsPossible + " log operations! ---");
}
项目:hadoop-EAR    文件:TestBlockInfo.java   
private void printContents(BlockInfo head) {
  BlockInfo it = head;
  while (it != null) {
    LOG.info("Block: " + it.toString());
    it = it.getNext(0);
  }
}
项目:hadoop-EAR    文件:TestBlocksMap.java   
private void insertBlocks(int numBlocks, boolean underConstruction) {
  Random r = new Random();
  map = new BlocksMap(1000, 0.75f, new MyNamesystem());
  Set<Long> ids = new HashSet<Long>(numBlocks);

  blockList = new HashSet<Block>(numBlocks);
  if (underConstruction) {
    INodeFile node = new INodeFile();
    iNode = new INodeFileUnderConstruction(node.getId(),
        node.getLocalNameBytes(), (short) 2,
        node.getModificationTime(), 0, node.getPreferredBlockSize(),
        node.getBlocks(), node.getPermissionStatus(), "", "", null);
  } else {
    iNode= new INodeFile();
  }
  int inserted = 0;

  while (inserted < numBlocks) {
    long id;
    while (ids.contains((id = r.nextLong())))
      ;
    ids.add(id);
    Block b = new Block(id, 0, GenerationStamp.FIRST_VALID_STAMP);
    blockList.add(b);
    BlockInfo info = map.addINode(b, iNode, iNode.getReplication());

    // create 2 datanode descriptors
    DatanodeDescriptor dd; 

    dd = new DatanodeDescriptor();
    dd.addBlock(info);
    dd = new DatanodeDescriptor();
    dd.addBlock(info);

    inserted++;
  }
}
项目:hadoop-EAR    文件:TestBlocksMap.java   
private void testShardedIterator(int numBlocks, long memSize) {

    // make the map have very few buckets
    doReturn(new Long(memSize)).when(runtime).maxMemory();

    insertBlocks(numBlocks, false);
    assertEquals(map.size(), numBlocks);
    assertEquals(blockList.size(), numBlocks);

    LOG.info("Starting iteration...");
    long start = System.currentTimeMillis();
    Set<Block> iteratedBlocks = new HashSet<Block>();

    // get sharded iterators
    List<Iterator<BlockInfo>> iterators = map.getBlocksIterarors(16);
    assertEquals(16, iterators.size());
    for (Iterator<BlockInfo> iterator : iterators) {
      LOG.info("Next sharded iterator");
      while (iterator.hasNext()) {
        Block b = new Block(iterator.next());
        // no block should be seen more than once
        assertFalse(iteratedBlocks.contains(b));
        iteratedBlocks.add(b);
      }
    }

    long stop = System.currentTimeMillis();

    // each block should be seen once
    assertEquals(blockList, iteratedBlocks);
    LOG.info("Iterated : " + numBlocks + " in: " + (stop - start));
  }
项目:hadoop-EAR    文件:TestBlockReplicationQueue.java   
protected void setUp(){
  blockList.clear();
  LOG.info("Generating blocks...");
  for (int i = 0; i < MAX_BLOCKS; i++) {
    blockList.add(new BlockInfo(new Block(i, 0,
        GenerationStamp.FIRST_VALID_STAMP), 3));
  }
}
项目:hadoop-EAR    文件:TestRaidFile.java   
private void fillChecksums(Path source) {
  INodeFile file = getINodeFile(nn, source);
  BlockInfo[] bis = file.getBlocks();
  for (int i = 0; i < bis.length; i++) {
    bis[i].setChecksum(1);
  }
}
项目:hadoop-EAR    文件:TestPreTransactionalServerLogReader.java   
private void writeOperation(EditLogFileOutputStream out,
    long txId, boolean forceSync) throws IOException {
  FSEditLogOp.AddOp op = FSEditLogOp.AddOp.getUniqueInstance();
  op.setTransactionId(txId);
  op.set(INodeId.GRANDFATHER_INODE_ID, "/a/b", (short)3, 100L, 100L, 100L, new BlockInfo[0],
      PermissionStatus.createImmutable("x", "y", FsPermission.getDefault()),
      "x", "y");
  out.write(op);
  LOG.info("Wrote operation " + txId);
  if (txId % 10 == 0 || forceSync) {
    out.setReadyToFlush();
    out.flush();
    LOG.info("Flushed operation " + txId);
  }  
}
项目:hadoop-EAR    文件:RaidCodec.java   
public Block getLastBlock(BlockInfo[] blocks) {
  if (blocks == null ||
      blocks.length == 0)
    return null;
  int mod = (blocks.length - 1) % numStripeBlocks;
  Block lastBlock = blocks[blocks.length - 1];
  if (mod < numParityBlocks) {
    LOG.error("Last block is not source block " + lastBlock + 
        " numBlocks: " + blocks.length + " codec: " + this);
    return null;
  }
  return lastBlock;
}
项目:hadoop-EAR    文件:RaidCodec.java   
public long getFileSize(BlockInfo[] blocks) {
  if (blocks == null) {
    return 0L;
  }
  long fileSize = 0L;
  for (int i = 0; i < blocks.length; i+=numStripeBlocks) {
    for (int dataBlockId = numParityBlocks;
        i + dataBlockId < blocks.length && dataBlockId < numStripeBlocks;
        dataBlockId++) {
      fileSize += blocks[i + dataBlockId].getNumBytes();
    }
  }
  return fileSize;
}
项目:hadoop-EAR    文件:RaidCodec.java   
public BlockInfo[] convertToRaidStorage(BlockInfo[] parityBlocks, 
    BlockInfo[] blocks, int[] checksums, BlocksMap blocksMap,
    short replication, INodeFile inode) throws IOException {
  BlockInfo[] newList = new BlockInfo[parityBlocks.length + blocks.length];
  int pPos = 0;
  int sPos = 0;
  int pos = 0;
  int numStripes = getNumStripes(blocks.length);
  for (int i = 0; i < numStripes; i++) {
    System.arraycopy(parityBlocks, pPos, newList, pos, numParityBlocks);
    for (int j = pos; j < pos + numParityBlocks; j++) {
      blocksMap.updateINode(newList[j],
          new RaidBlockInfo(newList[j], parityReplication, j), inode,
          parityReplication, true);
    }
    pPos += numParityBlocks;
    pos += numParityBlocks;
    for (int j = 0; j < numDataBlocks && sPos < blocks.length; 
        j++, pos++, sPos++) {
      newList[pos] = blocks[sPos];
      if (checksums != null) {
        if (blocks[sPos].getChecksum() != BlockInfo.NO_BLOCK_CHECKSUM
            && blocks[sPos].getChecksum() != checksums[sPos]) {
          throw new IOException("Checksum mismatch for the " + sPos +
              "th source blocks. New=" + checksums[sPos] +
              ", Existing=" + blocks[sPos].getChecksum());
        }
        blocks[sPos].setChecksum(checksums[sPos]);
      }
      blocksMap.updateINode(newList[pos], new RaidBlockInfo(newList[pos], 
          replication, pos), inode, replication, true);
    }
  }
  return newList;
}
项目:hadoop-EAR    文件:RaidCodec.java   
public BlockInfo[] getBlocksInOneStripe(BlockInfo[] blocks, 
    RaidBlockInfo rbi) {
  int size = Math.min(this.numStripeBlocks, blocks.length - rbi.getIndex());
  BlockInfo[] stripeBlocks = new BlockInfo[size];
  System.arraycopy(blocks, rbi.getIndex(), stripeBlocks, 0, size);
  return stripeBlocks;
}
项目:hadoop-EAR    文件:INodeRegularStorage.java   
/**
 * add a block to the block list
 */
@Override
public void addBlock(BlockInfo newblock) {
  if (this.blocks == null) {
    this.blocks = new BlockInfo[1];
    this.blocks[0] = newblock;
  } else {
    int size = this.blocks.length;
    BlockInfo[] newlist = new BlockInfo[size + 1];
    System.arraycopy(this.blocks, 0, newlist, 0, size);
    newlist[size] = newblock;
    this.blocks = newlist;
  }
}
项目:hadoop-EAR    文件:INodeRegularStorage.java   
@Override
public void removeBlock(Block oldblock) throws IOException {
  if (blocks == null) {
    throw new IOException("Trying to delete non-existant block " + oldblock);
  }
  int size_1 = blocks.length - 1;
  if (!blocks[size_1].equals(oldblock)) {
    throw new IOException("Trying to delete non-last block " + oldblock);
  }

  //copy to a new list
  BlockInfo[] newlist = new BlockInfo[size_1];
  System.arraycopy(blocks, 0, newlist, 0, size_1);
  blocks = newlist;
}
项目:hadoop-EAR    文件:INodeDirectory.java   
@Override
int collectSubtreeBlocksAndClear(List<BlockInfo> v, 
                                 int blocksLimit, 
                                 List<INode> removedINodes) {
  if (isBlocksLimitReached(v, blocksLimit)) {
    return 0;
  }
  int total = 0;
  if (children == null) {
    parent = null;
    name = null;
    removedINodes.add(this);
    return ++total;
  }
  int i;
  for (i=0; i<children.size(); i++) {
    INode child = children.get(i);
    total += child.collectSubtreeBlocksAndClear(v, blocksLimit, removedINodes);
    if (isBlocksLimitReached(v, blocksLimit)) {
      // reached blocks limit
      if (child.parent != null) {
        i--; // this child has not finished yet
      }
      break;
    }
  }
  if (i<children.size()-1) { // partial children are processed
    // Remove children [0,i]
    children = children.subList(i+1, children.size());
    return total;
  }
  // all the children are processed
  parent = null;
  name = null;
  children = null;
  removedINodes.add(this);
  return ++total;
}
项目:hadoop-EAR    文件:INodeHardLinkFile.java   
protected INodeHardLinkFile(long id, PermissionStatus permissions, BlockInfo[] blocks, 
   short replication, long modificationTime,  
   long atime, long preferredBlockSize,   
   HardLinkFileInfo hardLinkFileInfo) { 
  super(id,
      permissions,  
      blocks, 
      replication,  
      modificationTime,   
      atime,  
      preferredBlockSize,
      null);  
  this.hardLinkFileInfo = hardLinkFileInfo;

}
项目:hadoop-EAR    文件:FSImageSerialization.java   
static INodeFileUnderConstruction readINodeUnderConstruction(
    DataInputStream in, FSDirectory fsDir, int imgVersion) throws IOException {
  byte[] name = readBytes(in);
  long inodeId = LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion) ? in
      .readLong() : fsDir.allocateNewInodeId();
  short blockReplication = in.readShort();
  long modificationTime = in.readLong();
  long preferredBlockSize = in.readLong();
  int numBlocks = in.readInt();
  BlockInfo[] blocks = new BlockInfo[numBlocks];
  for (int i = 0; i < numBlocks; i++) {
    blocks[i] = new BlockInfo();
    blocks[i].readFields(in);
    if (LayoutVersion.supports(Feature.BLOCK_CHECKSUM, imgVersion)) {
      blocks[i].setChecksum(in.readInt());
    }
  }
  PermissionStatus perm = PermissionStatus.read(in);
  String clientName = readString(in);
  String clientMachine = readString(in);

  // These locations are not used at all
  int numLocs = in.readInt();
  DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
  for (int i = 0; i < numLocs; i++) {
    locations[i] = new DatanodeDescriptor();
    locations[i].readFields(in);
  }

  return new INodeFileUnderConstruction(inodeId,
                                        name, 
                                        blockReplication, 
                                        modificationTime,
                                        preferredBlockSize,
                                        blocks,
                                        perm,
                                        clientName,
                                        clientMachine,
                                        null);
}
项目:hadoop-EAR    文件:FSImageSerialization.java   
public static void writeBlocks(BlockInfo[] blocks, DataOutput out) throws IOException {
  out.writeInt(blocks.length); 
  for (BlockInfo blk : blocks) {
    blk.write(out);
    out.writeInt(blk.getChecksum());
  }
}
项目:hadoop-EAR    文件:INodeFileUnderConstruction.java   
public INodeFileUnderConstruction(long id,
                           byte[] name,
                           short blockReplication,
                           long modificationTime,
                           long preferredBlockSize,
                           BlockInfo[] blocks,
                           PermissionStatus perm,
                           String clientName,
                           String clientMachine,
                           DatanodeDescriptor clientNode) {
  this(id, name, blockReplication, modificationTime, modificationTime,preferredBlockSize, 
       blocks, perm, clientName, clientMachine,clientNode);
}
项目:hadoop-EAR    文件:INodeFileUnderConstruction.java   
/**
 * When deleting an open file, we should remove it from the list
 * of its targets.
 */
int collectSubtreeBlocksAndClear(List<BlockInfo> v, 
                                 int blocksLimit, 
                                 List<INode> removedINodes) {
  clearTargets();
  return super.collectSubtreeBlocksAndClear(v, blocksLimit, removedINodes);
}
项目:hadoop-EAR    文件:DatanodeDescriptor.java   
/**
 * Remove block from the list of blocks belonging to the data-node.
 * Remove data-node from the block.
 */
boolean removeBlock(BlockInfo b) {
  blockList = b.listRemove(blockList, this);
  if ( b.removeNode(this) ) {
    numOfBlocks--;
    return true;
  } else {
    return false;
  }
}