Java 类org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator 实例源码

项目:cumulus    文件:FSNamesystem.java   
/**
 * @param path Restrict corrupt files to this portion of namespace.
 * @param startBlockAfter Support for continuation; the set of files we return
 *  back is ordered by blockid; startBlockAfter tells where to start from
 * @return a list in which each entry describes a corrupt file/block
 * @throws AccessControlException
 * @throws IOException
 */
Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
    String startBlockAfter) throws AccessControlException, IOException {

  readLock();
  try {
  checkSuperuserPrivilege();
  long startBlockId = 0;
  // print a limited # of corrupt files per call
  int count = 0;
  ArrayList<CorruptFileBlockInfo> corruptFiles = new ArrayList<CorruptFileBlockInfo>();

  if (startBlockAfter != null) {
    startBlockId = Block.filename2id(startBlockAfter);
  }
  BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator();
  while (blkIterator.hasNext()) {
    Block blk = blkIterator.next();
    INode inode = blockManager.getINode(blk);
    if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
      String src = FSDirectory.getFullPathName(inode);
      if (((startBlockAfter == null) || (blk.getBlockId() > startBlockId))
          && (src.startsWith(path))) {
        corruptFiles.add(new CorruptFileBlockInfo(src, blk));
        count++;
        if (count >= DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED)
          break;
      }
    }
  }
  LOG.info("list corrupt file blocks returned: " + count);
  return corruptFiles;
  } finally {
    readUnlock();
  }
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * Return an iterator over the set of blocks for which there are no replicas.
 */
BlockIterator getCorruptReplicaBlockIterator() {
  return neededReplications
    .iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * @param path Restrict corrupt files to this portion of namespace.
 * @param startBlockAfter Support for continuation; the set of files we return
 *  back is ordered by blockid; startBlockAfter tells where to start from
 * @param decommissioningOnly if set the blocks returned will be the ones that
 *  only have replicas on the nodes that are being decommissioned
 * @return a list in which each entry describes a corrupt file/block
 * @throws AccessControlException
 * @throws IOException
 */
Collection<CorruptFileBlockInfo>
  listCorruptFileBlocks(String path,
                        String[] cookieTab,
                        boolean decommissioningOnly) 
  throws IOException {
  readLock();
  synchronized (neededReplications) {
    try {
      if (!isPopulatingReplQueues()) {
        throw new IOException("Cannot run listCorruptFileBlocks because "
            + "replication queues have not been initialized.");
      }

      // print a limited # of corrupt files per call
      int count = 0;
      ArrayList<CorruptFileBlockInfo> corruptFiles = 
        new ArrayList<CorruptFileBlockInfo>();

      BlockIterator blkIterator = null;
      if (decommissioningOnly) {
        blkIterator = neededReplications.iterator(0);
      } else {
        blkIterator = getCorruptReplicaBlockIterator();
      }

      if (cookieTab == null) {
        cookieTab = new String[] { null };
      }
      int skip = getIntCookie(cookieTab[0]);
      for(int i = 0; i < skip && blkIterator.hasNext(); i++) {
        blkIterator.next();
      }

      while (blkIterator.hasNext()) {
        Block blk = blkIterator.next();
        INode inode = blocksMap.getINode(blk);
        skip++;
        if (inode != null) {
          try {
            String src = FSDirectory.getFullPathName(inode);
            if (src != null && src.startsWith(path)) {
              NumberReplicas num = countNodes(blk);
              if (num.liveReplicas == 0) {
                if (decommissioningOnly && num.decommissionedReplicas > 0 ||
                    !decommissioningOnly && num.decommissionedReplicas == 0) {
                  corruptFiles.add(new CorruptFileBlockInfo(src, blk));
                  count++;
                  if (count >= maxCorruptFilesReturned)
                    break;
                }
              }
            }
          } catch (IOException ioe) {
            // the node may have already been deleted; ingore it
            LOG.info("Invalid inode", ioe);
          }
        }
      }
      cookieTab[0] = String.valueOf(skip);
      LOG.info("list corrupt file blocks under " + path  + ": " + count);
      return corruptFiles;
    } finally {
      readUnlock();
    }
  }
}
项目:cumulus    文件:BlockManager.java   
/**
 * Return an iterator over the set of blocks for which there are no replicas.
 */
BlockIterator getCorruptReplicaBlockIterator() {
  return neededReplications
      .iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
}
项目:RDFS    文件:FSNamesystem.java   
/**
 * Return an iterator over the set of blocks for which there are no replicas.
 */
BlockIterator getCorruptReplicaBlockIterator() {
  return neededReplications
    .iterator(UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
}
项目:RDFS    文件:FSNamesystem.java   
/**
 * @param path Restrict corrupt files to this portion of namespace.
 * @param startBlockAfter Support for continuation; the set of files we return
 *  back is ordered by blockid; startBlockAfter tells where to start from
 * @param decommissioningOnly if set the blocks returned will be the ones that
 *  only have replicas on the nodes that are being decommissioned
 * @return a list in which each entry describes a corrupt file/block
 * @throws AccessControlException
 * @throws IOException
 */
Collection<CorruptFileBlockInfo>
  listCorruptFileBlocks(String path,
                        String[] cookieTab,
                        boolean decommissioningOnly) 
  throws IOException {
  readLock();
  synchronized (neededReplications) {
    try {
      if (!isPopulatingReplQueues()) {
        throw new IOException("Cannot run listCorruptFileBlocks because "
            + "replication queues have not been initialized.");
      }

      checkSuperuserPrivilege();
      // print a limited # of corrupt files per call
      int count = 0;
      ArrayList<CorruptFileBlockInfo> corruptFiles = 
        new ArrayList<CorruptFileBlockInfo>();

      BlockIterator blkIterator = null;
      if (decommissioningOnly) {
        blkIterator = neededReplications.iterator(0);
      } else {
        blkIterator = getCorruptReplicaBlockIterator();
      }

      if (cookieTab == null) {
        cookieTab = new String[] { null };
      }
      int skip = getIntCookie(cookieTab[0]);
      for(int i = 0; i < skip && blkIterator.hasNext(); i++) {
        blkIterator.next();
      }

      while (blkIterator.hasNext()) {
        Block blk = blkIterator.next();
        INode inode = blocksMap.getINode(blk);
        skip++;
        if (inode != null) {
          String src = FSDirectory.getFullPathName(inode);
          if (src.startsWith(path)) {
            NumberReplicas num = countNodes(blk);
            if (num.liveReplicas == 0) {
              if (decommissioningOnly && num.decommissionedReplicas > 0 ||
                  !decommissioningOnly && num.decommissionedReplicas == 0) {
                corruptFiles.add(new CorruptFileBlockInfo(src, blk));
                count++;
                if (count >= maxCorruptFilesReturned)
                  break;
              }
            }
          }
        }
      }
      cookieTab[0] = String.valueOf(skip);
      LOG.info("list corrupt file blocks under " + path  + ": " + count);
      return corruptFiles;
    } finally {
      readUnlock();
    }
  }
}