Java 类org.apache.hadoop.hdfs.server.blockmanagement.BlockManager 实例源码

项目:hadoop    文件:FsckServlet.java   
/** Handle fsck request */
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws IOException {
  @SuppressWarnings("unchecked")
  final Map<String,String[]> pmap = request.getParameterMap();
  final PrintWriter out = response.getWriter();
  final InetAddress remoteAddress = 
    InetAddress.getByName(request.getRemoteAddr());
  final ServletContext context = getServletContext();    
  final Configuration conf = NameNodeHttpServer.getConfFromContext(context);

  final UserGroupInformation ugi = getUGI(request, conf);
  try {
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);

        final FSNamesystem namesystem = nn.getNamesystem();
        final BlockManager bm = namesystem.getBlockManager();
        final int totalDatanodes = 
            namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
        new NamenodeFsck(conf, nn,
            bm.getDatanodeManager().getNetworkTopology(), pmap, out,
            totalDatanodes, remoteAddress).fsck();

        return null;
      }
    });
  } catch (InterruptedException e) {
    response.sendError(400, e.getMessage());
  }
}
项目:hadoop    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static boolean setReplication(
    FSDirectory fsd, BlockManager bm, String src, final short replication)
    throws IOException {
  bm.verifyReplication(src, replication, null);
  final boolean isFile;
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  fsd.writeLock();
  try {
    src = fsd.resolvePath(pc, src, pathComponents);
    final INodesInPath iip = fsd.getINodesInPath4Write(src);
    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    final BlockInfo[] blocks = unprotectedSetReplication(fsd, src,
                                                         replication);
    isFile = blocks != null;
    if (isFile) {
      fsd.getEditLog().logSetReplication(src, replication);
    }
  } finally {
    fsd.writeUnlock();
  }
  return isFile;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm,
    String path) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(path);
  fsd.readLock();
  try {
    path = fsd.resolvePath(pc, path, pathComponents);
    final INodesInPath iip = fsd.getINodesInPath(path, false);
    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ);
    }
    INode inode = iip.getLastINode();
    if (inode == null) {
      throw new FileNotFoundException("File/Directory does not exist: "
          + iip.getPath());
    }
    return bm.getStoragePolicy(inode.getStoragePolicyID());
  } finally {
    fsd.readUnlock();
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSNamesystem.java   
/**
 * Get a new generation stamp together with an access token for 
 * a block under construction
 * 
 * This method is called for recovering a failed write or setting up
 * a block for appended.
 * 
 * @param block a block
 * @param clientName the name of a client
 * @return a located block with a new generation stamp and an access token
 * @throws IOException if any error occurs
 */
LocatedBlock bumpBlockGenerationStamp(ExtendedBlock block,
    String clientName) throws IOException {
  final LocatedBlock locatedBlock;
  checkOperation(OperationCategory.WRITE);
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);

    // check vadility of parameters
    final INodeFile file = checkUCBlock(block, clientName);

    // get a new generation stamp and an access token
    block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock())));

    locatedBlock = BlockManager.newLocatedBlock(
        block, file.getLastBlock(), null, -1);
    blockManager.setBlockToken(locatedBlock,
        BlockTokenIdentifier.AccessMode.WRITE);
  } finally {
    writeUnlock();
  }
  // Ensure we record the new generation stamp
  getEditLog().logSync();
  return locatedBlock;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirWriteFileOp.java   
static DatanodeStorageInfo[] chooseTargetForNewBlock(
    BlockManager bm, String src, DatanodeInfo[] excludedNodes, String[]
    favoredNodes, ValidateAddBlockResult r) throws IOException {
  Node clientNode = bm.getDatanodeManager()
      .getDatanodeByHost(r.clientMachine);
  if (clientNode == null) {
    clientNode = getClientNode(bm, r.clientMachine);
  }

  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<>(excludedNodes.length);
    Collections.addAll(excludedNodesSet, excludedNodes);
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);

  // choose targets for the new block to be allocated.
  return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
                                  excludedNodesSet, r.blockSize,
                                  favoredNodesList, r.storagePolicyID,
                                  r.isStriped);
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock, !storedBlock.isComplete());
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = storedBlock
      .getUnderConstructionFeature().getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:big-c    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfoContiguous storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoContiguousUnderConstruction);
  BlockInfoContiguousUnderConstruction ucBlock =
    (BlockInfoContiguousUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor()
      .getLastUpdateMonotonic();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor()
        .getLastUpdateMonotonic();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop-plus    文件:TestPipelinesFailover.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  DatanodeDescriptor[] datanodes = ucBlock.getExpectedLocations();
  DatanodeDescriptor expectedPrimary = datanodes[0];
  long mostRecentLastUpdate = expectedPrimary.getLastUpdate();
  for (int i = 1; i < datanodes.length; i++) {
    if (datanodes[i].getLastUpdate() > mostRecentLastUpdate) {
      expectedPrimary = datanodes[i];
      mostRecentLastUpdate = expectedPrimary.getLastUpdate();
    }
  }
  return expectedPrimary;
}
项目:FlexMap    文件:DFSTestUtil.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop-TCP    文件:TestPipelinesFailover.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  DatanodeDescriptor[] datanodes = ucBlock.getExpectedLocations();
  DatanodeDescriptor expectedPrimary = datanodes[0];
  long mostRecentLastUpdate = expectedPrimary.getLastUpdate();
  for (int i = 1; i < datanodes.length; i++) {
    if (datanodes[i].getLastUpdate() > mostRecentLastUpdate) {
      expectedPrimary = datanodes[i];
      mostRecentLastUpdate = expectedPrimary.getLastUpdate();
    }
  }
  return expectedPrimary;
}
项目:hardfs    文件:TestPipelinesFailover.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  DatanodeDescriptor[] datanodes = ucBlock.getExpectedLocations();
  DatanodeDescriptor expectedPrimary = datanodes[0];
  long mostRecentLastUpdate = expectedPrimary.getLastUpdate();
  for (int i = 1; i < datanodes.length; i++) {
    if (datanodes[i].getLastUpdate() > mostRecentLastUpdate) {
      expectedPrimary = datanodes[i];
      mostRecentLastUpdate = expectedPrimary.getLastUpdate();
    }
  }
  return expectedPrimary;
}
项目:hadoop-on-lustre2    文件:NameNodeRpcServer.java   
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean hasStaleStorages = true;
  for(StorageBlockReport r : reports) {
    final BlockListAsLongs blocks = new BlockListAsLongs(r.getBlocks());
    hasStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !nn.isStandbyState() &&
      !hasStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
项目:hadoop-on-lustre2    文件:TestPipelinesFailover.java   
/**
 * @return the node which is expected to run the recovery of the
 * given block, which is known to be under construction inside the
 * given NameNOde.
 */
private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
    ExtendedBlock blk) {
  BlockManager bm0 = nn.getNamesystem().getBlockManager();
  BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
  assertTrue("Block " + blk + " should be under construction, " +
      "got: " + storedBlock,
      storedBlock instanceof BlockInfoUnderConstruction);
  BlockInfoUnderConstruction ucBlock =
    (BlockInfoUnderConstruction)storedBlock;
  // We expect that the replica with the most recent heart beat will be
  // the one to be in charge of the synchronization / recovery protocol.
  final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
  DatanodeStorageInfo expectedPrimary = storages[0];
  long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
  for (int i = 1; i < storages.length; i++) {
    final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
    if (lastUpdate > mostRecentLastUpdate) {
      expectedPrimary = storages[i];
      mostRecentLastUpdate = lastUpdate;
    }
  }
  return expectedPrimary.getDatanodeDescriptor();
}
项目:hadoop    文件:FSDirAttrOp.java   
static boolean setReplication(
    FSDirectory fsd, BlockManager bm, String src, final short replication)
    throws IOException {
  bm.verifyReplication(src, replication, null);
  final boolean isFile;
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  fsd.writeLock();
  try {
    src = fsd.resolvePath(pc, src, pathComponents);
    final INodesInPath iip = fsd.getINodesInPath4Write(src);
    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    final short[] blockRepls = new short[2]; // 0: old, 1: new
    final Block[] blocks = unprotectedSetReplication(fsd, src, replication,
                                                     blockRepls);
    isFile = blocks != null;
    if (isFile) {
      fsd.getEditLog().logSetReplication(src, replication);
      bm.setReplication(blockRepls[0], blockRepls[1], src, blocks);
    }
  } finally {
    fsd.writeUnlock();
  }
  return isFile;
}
项目:hadoop    文件:FSDirAttrOp.java   
static HdfsFileStatus setStoragePolicy(
    FSDirectory fsd, BlockManager bm, String src, final String policyName)
    throws IOException {
  if (!fsd.isStoragePolicyEnabled()) {
    throw new IOException(
        "Failed to set storage policy since "
            + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  INodesInPath iip;
  fsd.writeLock();
  try {
    src = FSDirectory.resolvePath(src, pathComponents, fsd);
    iip = fsd.getINodesInPath4Write(src);

    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    // get the corresponding policy and make sure the policy name is valid
    BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
    if (policy == null) {
      throw new HadoopIllegalArgumentException(
          "Cannot find a block policy with the name " + policyName);
    }
    unprotectedSetStoragePolicy(fsd, bm, iip, policy.getId());
    fsd.getEditLog().logSetStoragePolicy(src, policy.getId());
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
项目:hadoop    文件:FSDirAttrOp.java   
static void unprotectedSetStoragePolicy(
    FSDirectory fsd, BlockManager bm, INodesInPath iip, byte policyId)
    throws IOException {
  assert fsd.hasWriteLock();
  final INode inode = iip.getLastINode();
  if (inode == null) {
    throw new FileNotFoundException("File/Directory does not exist: "
        + iip.getPath());
  }
  final int snapshotId = iip.getLatestSnapshotId();
  if (inode.isFile()) {
    BlockStoragePolicy newPolicy = bm.getStoragePolicy(policyId);
    if (newPolicy.isCopyOnCreateFile()) {
      throw new HadoopIllegalArgumentException(
          "Policy " + newPolicy + " cannot be set after file creation.");
    }

    BlockStoragePolicy currentPolicy =
        bm.getStoragePolicy(inode.getLocalStoragePolicyID());

    if (currentPolicy != null && currentPolicy.isCopyOnCreateFile()) {
      throw new HadoopIllegalArgumentException(
          "Existing policy " + currentPolicy.getName() +
              " cannot be changed after file creation.");
    }
    inode.asFile().setStoragePolicyID(policyId, snapshotId);
  } else if (inode.isDirectory()) {
    setDirStoragePolicy(fsd, inode.asDirectory(), policyId, snapshotId);
  } else {
    throw new FileNotFoundException(iip.getPath()
        + " is not a file or directory");
  }
}
项目:hadoop    文件:FSImageFormatPBINode.java   
public static void updateBlocksMap(INodeFile file, BlockManager bm) {
  // Add file->block mapping
  final BlockInfoContiguous[] blocks = file.getBlocks();
  if (blocks != null) {
    for (int i = 0; i < blocks.length; i++) {
      file.setBlock(i, bm.addBlockCollection(blocks[i], file));
    }
  }
}
项目:hadoop    文件:NameNodeRpcServer.java   
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
      String poolId, StorageBlockReport[] reports,
      BlockReportContext context) throws IOException {
  checkNNStartup();
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean noStaleStorages = false;
  for (int r = 0; r < reports.length; r++) {
    final BlockListAsLongs blocks = reports[r].getBlocks();
    //
    // BlockManager.processReport accumulates information of prior calls
    // for the same node and storage, so the value returned by the last
    // call of this loop is the final updated value for noStaleStorage.
    //
    noStaleStorages = bm.processReport(nodeReg, reports[r].getStorage(),
        blocks, context, (r == reports.length - 1));
    metrics.incrStorageBlockReportOps();
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !namesystem.isRollingUpgrade() &&
      !nn.isStandbyState() &&
      noStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
项目:hadoop    文件:CacheManager.java   
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
项目:hadoop    文件:FSImageFormat.java   
public void updateBlocksMap(INodeFile file) {
  // Add file->block mapping
  final BlockInfoContiguous[] blocks = file.getBlocks();
  if (blocks != null) {
    final BlockManager bm = namesystem.getBlockManager();
    for (int i = 0; i < blocks.length; i++) {
      file.setBlock(i, bm.addBlockCollection(blocks[i], file));
    } 
  }
}
项目:hadoop    文件:DFSTestUtil.java   
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
项目:hadoop    文件:TestSnapshotBlocksMap.java   
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
项目:hadoop    文件:SnapshotTestHelper.java   
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }

  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));

  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
项目:hadoop    文件:TestDataNodeVolumeFailure.java   
/**
 * Test that there are under replication blocks after vol failures
 */
@Test
public void testUnderReplicationAfterVolFailure() throws Exception {
  // This test relies on denying access to data volumes to simulate data volume
  // failure.  This doesn't work on Windows, because an owner of an object
  // always has the ability to read and change permissions on the object.
  assumeTrue(!Path.WINDOWS);

  // Bring up one more datanode
  cluster.startDataNodes(conf, 1, true, null, null);
  cluster.waitActive();

  final BlockManager bm = cluster.getNamesystem().getBlockManager();

  Path file1 = new Path("/test1");
  DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
  DFSTestUtil.waitReplication(fs, file1, (short)3);

  // Fail the first volume on both datanodes
  File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
  File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
  DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);

  Path file2 = new Path("/test2");
  DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
  DFSTestUtil.waitReplication(fs, file2, (short)3);

  // underReplicatedBlocks are due to failed volumes
  int underReplicatedBlocks =
      BlockManagerTestUtil.checkHeartbeatAndGetUnderReplicatedBlocksCount(
          cluster.getNamesystem(), bm);
  assertTrue("There is no under replicated block after volume failure",
      underReplicatedBlocks > 0);
}
项目:hadoop    文件:TestFileCreation.java   
private void assertBlocks(BlockManager bm, LocatedBlocks lbs, 
    boolean exist) {
  for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
    if (exist) {
      assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
          getLocalBlock()) != null);
    } else {
      assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
          getLocalBlock()) == null);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirectory.java   
/**
 * Tell the block manager to update the replication factors when delete
 * happens. Deleting a file or a snapshot might decrease the replication
 * factor of the blocks as the blocks are always replicated to the highest
 * replication factor among all snapshots.
 */
void updateReplicationFactor(Collection<UpdatedReplicationInfo> blocks) {
  BlockManager bm = getBlockManager();
  for (UpdatedReplicationInfo e : blocks) {
    BlockInfo b = e.block();
    bm.setReplication(b.getReplication(), e.targetReplication(), b);
  }
}
项目:aliyun-oss-hadoop-fs    文件:INodeFile.java   
/**
 * append array of blocks to this.blocks
 */
void concatBlocks(INodeFile[] inodes, BlockManager bm) {
  int size = this.blocks.length;
  int totalAddedBlocks = 0;
  for(INodeFile f : inodes) {
    Preconditions.checkState(f.isStriped() == this.isStriped());
    totalAddedBlocks += f.blocks.length;
  }

  BlockInfo[] newlist =
      new BlockInfo[size + totalAddedBlocks];
  System.arraycopy(this.blocks, 0, newlist, 0, size);

  for(INodeFile in: inodes) {
    System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
    size += in.blocks.length;
  }

  setBlocks(newlist);
  for(BlockInfo b : blocks) {
    b.setBlockCollectionId(getId());
    short oldRepl = b.getReplication();
    short repl = getPreferredBlockReplication();
    if (oldRepl != repl) {
      bm.setReplication(oldRepl, repl, b);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static HdfsFileStatus setStoragePolicy(
    FSDirectory fsd, BlockManager bm, String src, final String policyName)
    throws IOException {
  if (!fsd.isStoragePolicyEnabled()) {
    throw new IOException(
        "Failed to set storage policy since "
            + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  INodesInPath iip;
  fsd.writeLock();
  try {
    src = FSDirectory.resolvePath(src, pathComponents, fsd);
    iip = fsd.getINodesInPath4Write(src);

    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    // get the corresponding policy and make sure the policy name is valid
    BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
    if (policy == null) {
      throw new HadoopIllegalArgumentException(
          "Cannot find a block policy with the name " + policyName);
    }
    unprotectedSetStoragePolicy(fsd, bm, iip, policy.getId());
    fsd.getEditLog().logSetStoragePolicy(src, policy.getId());
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
项目:aliyun-oss-hadoop-fs    文件:FSDirAttrOp.java   
static void unprotectedSetStoragePolicy(
    FSDirectory fsd, BlockManager bm, INodesInPath iip, byte policyId)
    throws IOException {
  assert fsd.hasWriteLock();
  final INode inode = iip.getLastINode();
  if (inode == null) {
    throw new FileNotFoundException("File/Directory does not exist: "
        + iip.getPath());
  }
  final int snapshotId = iip.getLatestSnapshotId();
  if (inode.isFile()) {
    BlockStoragePolicy newPolicy = bm.getStoragePolicy(policyId);
    if (newPolicy.isCopyOnCreateFile()) {
      throw new HadoopIllegalArgumentException(
          "Policy " + newPolicy + " cannot be set after file creation.");
    }

    BlockStoragePolicy currentPolicy =
        bm.getStoragePolicy(inode.getLocalStoragePolicyID());

    if (currentPolicy != null && currentPolicy.isCopyOnCreateFile()) {
      throw new HadoopIllegalArgumentException(
          "Existing policy " + currentPolicy.getName() +
              " cannot be changed after file creation.");
    }
    inode.asFile().setStoragePolicyID(policyId, snapshotId);
  } else if (inode.isDirectory()) {
    setDirStoragePolicy(fsd, inode.asDirectory(), policyId, snapshotId);
  } else {
    throw new FileNotFoundException(iip.getPath()
        + " is not a file or directory");
  }
}
项目:aliyun-oss-hadoop-fs    文件:FsckServlet.java   
/** Handle fsck request */
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws IOException {
  @SuppressWarnings("unchecked")
  final Map<String,String[]> pmap = request.getParameterMap();
  final PrintWriter out = response.getWriter();
  final InetAddress remoteAddress = 
    InetAddress.getByName(request.getRemoteAddr());
  final ServletContext context = getServletContext();    
  final Configuration conf = NameNodeHttpServer.getConfFromContext(context);

  final UserGroupInformation ugi = getUGI(request, conf);
  try {
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);

        final FSNamesystem namesystem = nn.getNamesystem();
        final BlockManager bm = namesystem.getBlockManager();
        final int totalDatanodes = 
            namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
        new NamenodeFsck(conf, nn,
            bm.getDatanodeManager().getNetworkTopology(), pmap, out,
            totalDatanodes, remoteAddress).fsck();

        return null;
      }
    });
  } catch (InterruptedException e) {
    response.sendError(400, e.getMessage());
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormatPBINode.java   
public static void updateBlocksMap(INodeFile file, BlockManager bm) {
  // Add file->block mapping
  final BlockInfo[] blocks = file.getBlocks();
  if (blocks != null) {
    for (int i = 0; i < blocks.length; i++) {
      file.setBlock(i, bm.addBlockCollectionWithCheck(blocks[i], file));
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:NameNodeRpcServer.java   
@Override // DatanodeProtocol
public void blockReceivedAndDeleted(final DatanodeRegistration nodeReg,
    String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks)
        throws IOException {
  checkNNStartup();
  verifyRequest(nodeReg);
  metrics.incrBlockReceivedAndDeletedOps();
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
        +"from "+nodeReg+" "+receivedAndDeletedBlocks.length
        +" blocks.");
  }
  final BlockManager bm = namesystem.getBlockManager();
  for (final StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) {
    bm.enqueueBlockOp(new Runnable() {
      @Override
      public void run() {
        try {
          namesystem.processIncrementalBlockReport(nodeReg, r);
        } catch (Exception ex) {
          // usually because the node is unregistered/dead.  next heartbeat
          // will correct the problem
          blockStateChangeLog.error(
              "*BLOCK* NameNode.blockReceivedAndDeleted: "
                  + "failed from " + nodeReg + ": " + ex.getMessage());
        }
      }
    });
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSDirWriteFileOp.java   
static LocatedBlock makeLocatedBlock(FSNamesystem fsn, BlockInfo blk,
    DatanodeStorageInfo[] locs, long offset) throws IOException {
  LocatedBlock lBlk = BlockManager.newLocatedBlock(
      fsn.getExtendedBlock(new Block(blk)), blk, locs, offset);
  fsn.getBlockManager().setBlockToken(lBlk,
      BlockTokenIdentifier.AccessMode.WRITE);
  return lBlk;
}
项目:aliyun-oss-hadoop-fs    文件:FSDirWriteFileOp.java   
private static void setNewINodeStoragePolicy(BlockManager bm, INodeFile
    inode, INodesInPath iip, boolean isLazyPersist)
    throws IOException {

  if (isLazyPersist) {
    BlockStoragePolicy lpPolicy =
        bm.getStoragePolicy("LAZY_PERSIST");

    // Set LAZY_PERSIST storage policy if the flag was passed to
    // CreateFile.
    if (lpPolicy == null) {
      throw new HadoopIllegalArgumentException(
          "The LAZY_PERSIST storage policy has been disabled " +
          "by the administrator.");
    }
    inode.setStoragePolicyID(lpPolicy.getId(),
                               iip.getLatestSnapshotId());
  } else {
    BlockStoragePolicy effectivePolicy =
        bm.getStoragePolicy(inode.getStoragePolicyID());

    if (effectivePolicy != null &&
        effectivePolicy.isCopyOnCreateFile()) {
      // Copy effective policy from ancestor directory to current file.
      inode.setStoragePolicyID(effectivePolicy.getId(),
                               iip.getLatestSnapshotId());
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:CacheManager.java   
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
项目:aliyun-oss-hadoop-fs    文件:FSImageFormat.java   
public void updateBlocksMap(INodeFile file) {
  // Add file->block mapping
  final BlockInfo[] blocks = file.getBlocks();
  if (blocks != null) {
    final BlockManager bm = namesystem.getBlockManager();
    for (int i = 0; i < blocks.length; i++) {
      file.setBlock(i, bm.addBlockCollectionWithCheck(blocks[i], file));
    } 
  }
}
项目:aliyun-oss-hadoop-fs    文件:DFSTestUtil.java   
public static void setNameNodeLogLevel(Level level) {
  GenericTestUtils.setLogLevel(FSNamesystem.LOG, level);
  GenericTestUtils.setLogLevel(BlockManager.LOG, level);
  GenericTestUtils.setLogLevel(LeaseManager.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.LOG, level);
  GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
  GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, level);
}
项目:aliyun-oss-hadoop-fs    文件:TestDeadDatanode.java   
@Test
public void testDeadNodeAsBlockTarget() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();

  String poolId = cluster.getNamesystem().getBlockPoolId();
  // wait for datanode to be marked live
  DataNode dn = cluster.getDataNodes().get(0);
  DatanodeRegistration reg = DataNodeTestUtils.getDNRegistrationForBP(cluster
      .getDataNodes().get(0), poolId);
  // Get the updated datanode descriptor
  BlockManager bm = cluster.getNamesystem().getBlockManager();
  DatanodeManager dm = bm.getDatanodeManager();
  Node clientNode = dm.getDatanode(reg);

  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true,
      20000);

  // Shutdown and wait for datanode to be marked dead
  dn.shutdown();
  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false,
      20000);
  // Get the updated datanode descriptor available in DNM
  // choose the targets, but local node should not get selected as this is not
  // part of the cluster anymore
  DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
      clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7,
      false);
  for (DatanodeStorageInfo datanodeStorageInfo : results) {
    assertFalse("Dead node should not be choosen", datanodeStorageInfo
        .getDatanodeDescriptor().equals(clientNode));
  }
}