Java 类org.apache.hadoop.hdfs.protocol.UnregisteredNodeException 实例源码

项目:hadoop    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:aliyun-oss-hadoop-fs    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:big-c    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:hadoop-plus    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:hadoop-plus    文件:DatanodeManager.java   
/**
 * Remove a datanode
 * @throws UnregisteredNodeException 
 */
public void removeDatanode(final DatanodeID node
    ) throws UnregisteredNodeException {
  namesystem.writeLock();
  try {
    final DatanodeDescriptor descriptor = getDatanode(node);
    if (descriptor != null) {
      removeDatanode(descriptor);
    } else {
      NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
                                   + node + " does not exist");
    }
  } finally {
    namesystem.writeUnlock();
  }
}
项目:FlexMap    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:hops    文件:DatanodeManager.java   
/**
 * Get data node by storage ID.
 *
 * @param nodeID
 * @return DatanodeDescriptor or null if the node is not found.
 * @throws UnregisteredNodeException
 */
public DatanodeDescriptor getDatanode(DatanodeID nodeID)
    throws UnregisteredNodeException {
  DatanodeDescriptor node = null;
  if (nodeID != null && nodeID.getStorageID() != null &&
      !nodeID.getStorageID().equals("")) {
    node = getDatanode(nodeID.getStorageID());
  }
  if (node == null) {
    return null;
  }
  if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
    final UnregisteredNodeException e =
        new UnregisteredNodeException(nodeID, node);
    NameNode.stateChangeLog
        .fatal("BLOCK* NameSystem.getDatanode: " + e.getLocalizedMessage());
    throw e;
  }
  return node;
}
项目:hadoop-TCP    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:hardfs    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:hadoop-on-lustre2    文件:BackupNode.java   
/** 
 * Verifies a journal request
 */
private void verifyJournalRequest(JournalInfo journalInfo)
    throws IOException {
  verifyLayoutVersion(journalInfo.getLayoutVersion());
  String errorMsg = null;
  int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
  if (journalInfo.getNamespaceId() != expectedNamespaceID) {
    errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID
        + " actual " + journalInfo.getNamespaceId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  } 
  if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) {
    errorMsg = "Invalid clusterId in journal request - expected "
        + journalInfo.getClusterId() + " actual " + namesystem.getClusterId();
    LOG.warn(errorMsg);
    throw new UnregisteredNodeException(journalInfo);
  }
}
项目:hadoop    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
private void verifyRequest(NodeRegistration nodeReg) throws IOException {
  // verify registration ID
  final String id = nodeReg.getRegistrationID();
  final String expectedID = namesystem.getRegistrationID();
  if (!expectedID.equals(id)) {
    LOG.warn("Registration IDs mismatched: the "
        + nodeReg.getClass().getSimpleName() + " ID is " + id
        + " but the expected ID is " + expectedID);
     throw new UnregisteredNodeException(nodeReg);
  }
}
项目:hadoop    文件:BlockManager.java   
/**
 * Get blocks to invalidate for <i>nodeId</i>
 * in {@link #invalidateBlocks}.
 *
 * @return number of blocks scheduled for removal during this iteration.
 */
private int invalidateWorkForOneNode(DatanodeInfo dn) {
  final List<Block> toInvalidate;

  namesystem.writeLock();
  try {
    // blocks should not be replicated or removed if safe mode is on
    if (namesystem.isInSafeMode()) {
      LOG.debug("In safemode, not computing replication work");
      return 0;
    }
    try {
      DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
      if (dnDescriptor == null) {
        LOG.warn("DataNode " + dn + " cannot be found with UUID " +
            dn.getDatanodeUuid() + ", removing block invalidation work.");
        invalidateBlocks.remove(dn);
        return 0;
      }
      toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);

      if (toInvalidate == null) {
        return 0;
      }
    } catch(UnregisteredNodeException une) {
      return 0;
    }
  } finally {
    namesystem.writeUnlock();
  }
  blockLog.info("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
      dn, toInvalidate);
  return toInvalidate.size();
}
项目:aliyun-oss-hadoop-fs    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
private void verifyRequest(NodeRegistration nodeReg) throws IOException {
  // verify registration ID
  final String id = nodeReg.getRegistrationID();
  final String expectedID = namesystem.getRegistrationID();
  if (!expectedID.equals(id)) {
    LOG.warn("Registration IDs mismatched: the "
        + nodeReg.getClass().getSimpleName() + " ID is " + id
        + " but the expected ID is " + expectedID);
     throw new UnregisteredNodeException(nodeReg);
  }
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/**
 * Get blocks to invalidate for <i>nodeId</i>
 * in {@link #invalidateBlocks}.
 *
 * @return number of blocks scheduled for removal during this iteration.
 */
private int invalidateWorkForOneNode(DatanodeInfo dn) {
  final List<Block> toInvalidate;

  namesystem.writeLock();
  try {
    // blocks should not be replicated or removed if safe mode is on
    if (namesystem.isInSafeMode()) {
      LOG.debug("In safemode, not computing replication work");
      return 0;
    }
    try {
      DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
      if (dnDescriptor == null) {
        LOG.warn("DataNode " + dn + " cannot be found with UUID " +
            dn.getDatanodeUuid() + ", removing block invalidation work.");
        invalidateBlocks.remove(dn);
        return 0;
      }
      toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);

      if (toInvalidate == null) {
        return 0;
      }
    } catch(UnregisteredNodeException une) {
      return 0;
    }
  } finally {
    namesystem.writeUnlock();
  }
  blockLog.debug("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
      dn, toInvalidate);
  return toInvalidate.size();
}
项目:big-c    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
private void verifyRequest(NodeRegistration nodeReg) throws IOException {
  // verify registration ID
  final String id = nodeReg.getRegistrationID();
  final String expectedID = namesystem.getRegistrationID();
  if (!expectedID.equals(id)) {
    LOG.warn("Registration IDs mismatched: the "
        + nodeReg.getClass().getSimpleName() + " ID is " + id
        + " but the expected ID is " + expectedID);
     throw new UnregisteredNodeException(nodeReg);
  }
}
项目:big-c    文件:BlockManager.java   
/**
 * Get blocks to invalidate for <i>nodeId</i>
 * in {@link #invalidateBlocks}.
 *
 * @return number of blocks scheduled for removal during this iteration.
 */
private int invalidateWorkForOneNode(DatanodeInfo dn) {
  final List<Block> toInvalidate;

  namesystem.writeLock();
  try {
    // blocks should not be replicated or removed if safe mode is on
    if (namesystem.isInSafeMode()) {
      LOG.debug("In safemode, not computing replication work");
      return 0;
    }
    try {
      DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
      if (dnDescriptor == null) {
        LOG.warn("DataNode " + dn + " cannot be found with UUID " +
            dn.getDatanodeUuid() + ", removing block invalidation work.");
        invalidateBlocks.remove(dn);
        return 0;
      }
      toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);

      if (toInvalidate == null) {
        return 0;
      }
    } catch(UnregisteredNodeException une) {
      return 0;
    }
  } finally {
    namesystem.writeUnlock();
  }
  blockLog.info("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
      dn, toInvalidate);
  return toInvalidate.size();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
private void verifyRequest(NodeRegistration nodeReg) throws IOException {
  // verify registration ID
  final String id = nodeReg.getRegistrationID();
  final String expectedID = namesystem.getRegistrationID();
  if (!expectedID.equals(id)) {
    LOG.warn("Registration IDs mismatched: the "
        + nodeReg.getClass().getSimpleName() + " ID is " + id
        + " but the expected ID is " + expectedID);
     throw new UnregisteredNodeException(nodeReg);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockManager.java   
/**
 * Get blocks to invalidate for <i>nodeId</i>
 * in {@link #invalidateBlocks}.
 *
 * @return number of blocks scheduled for removal during this iteration.
 */
private int invalidateWorkForOneNode(DatanodeInfo dn) {
  final List<Block> toInvalidate;

  namesystem.writeLock();
  try {
    // blocks should not be replicated or removed if safe mode is on
    if (namesystem.isInSafeMode()) {
      LOG.debug("In safemode, not computing replication work");
      return 0;
    }
    try {
      DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
      if (dnDescriptor == null) {
        LOG.warn("DataNode " + dn + " cannot be found with UUID " +
            dn.getDatanodeUuid() + ", removing block invalidation work.");
        invalidateBlocks.remove(dn);
        return 0;
      }
      toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);

      if (toInvalidate == null) {
        return 0;
      }
    } catch(UnregisteredNodeException une) {
      return 0;
    }
  } finally {
    namesystem.writeUnlock();
  }
  blockLog.info("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
      dn, toInvalidate);
  return toInvalidate.size();
}
项目:hadoop-plus    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
void verifyRequest(NodeRegistration nodeReg) throws IOException {
  verifyLayoutVersion(nodeReg.getVersion());
  if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
    LOG.warn("Invalid registrationID - expected: "
        + namesystem.getRegistrationID() + " received: "
        + nodeReg.getRegistrationID());
    throw new UnregisteredNodeException(nodeReg);
  }
}
项目:hadoop-plus    文件:DatanodeManager.java   
/**
 * Get data node by storage ID.
 * 
 * @param nodeID
 * @return DatanodeDescriptor or null if the node is not found.
 * @throws UnregisteredNodeException
 */
public DatanodeDescriptor getDatanode(DatanodeID nodeID
    ) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanode(nodeID.getStorageID());
  if (node == null) 
    return null;
  if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
    final UnregisteredNodeException e = new UnregisteredNodeException(
        nodeID, node);
    NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
                                  + e.getLocalizedMessage());
    throw e;
  }
  return node;
}
项目:FlexMap    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
private void verifyRequest(NodeRegistration nodeReg) throws IOException {
  // verify registration ID
  final String id = nodeReg.getRegistrationID();
  final String expectedID = namesystem.getRegistrationID();
  if (!expectedID.equals(id)) {
    LOG.warn("Registration IDs mismatched: the "
        + nodeReg.getClass().getSimpleName() + " ID is " + id
        + " but the expected ID is " + expectedID);
     throw new UnregisteredNodeException(nodeReg);
  }
}
项目:FlexMap    文件:BlockManager.java   
/**
 * Get blocks to invalidate for <i>nodeId</i>
 * in {@link #invalidateBlocks}.
 *
 * @return number of blocks scheduled for removal during this iteration.
 */
private int invalidateWorkForOneNode(DatanodeInfo dn) {
  final List<Block> toInvalidate;

  namesystem.writeLock();
  try {
    // blocks should not be replicated or removed if safe mode is on
    if (namesystem.isInSafeMode()) {
      LOG.debug("In safemode, not computing replication work");
      return 0;
    }
    try {
      toInvalidate = invalidateBlocks.invalidateWork(datanodeManager.getDatanode(dn));

      if (toInvalidate == null) {
        return 0;
      }
    } catch(UnregisteredNodeException une) {
      return 0;
    }
  } finally {
    namesystem.writeUnlock();
  }
  if (NameNode.stateChangeLog.isInfoEnabled()) {
    NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
        + ": ask " + dn + " to delete " + toInvalidate);
  }
  return toInvalidate.size();
}
项目:hops    文件:NameNodeRpcServer.java   
/**
 * Verifies the given registration.
 *
 * @param nodeReg
 *     node registration
 * @throws UnregisteredNodeException
 *     if the registration is invalid
 */
void verifyRequest(NodeRegistration nodeReg) throws IOException {
  verifyLayoutVersion(nodeReg.getVersion());
  if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
    LOG.warn("Invalid registrationID - expected: " +
        namesystem.getRegistrationID() + " received: " +
        nodeReg.getRegistrationID());
    throw new UnregisteredNodeException(nodeReg);
  }
}
项目:hops    文件:DatanodeManager.java   
/**
 * Remove a datanode
 *
 * @throws UnregisteredNodeException
 */
public void removeDatanode(final DatanodeID node
    //Called my NameNodeRpcServer
) throws UnregisteredNodeException, IOException {
  final DatanodeDescriptor descriptor = getDatanode(node);
  if (descriptor != null) {
    removeDatanode(descriptor);
  } else {
    NameNode.stateChangeLog
        .warn("BLOCK* removeDatanode: " + node + " does not exist");
  }
}
项目:hadoop-TCP    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
void verifyRequest(NodeRegistration nodeReg) throws IOException {
  verifyLayoutVersion(nodeReg.getVersion());
  if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
    LOG.warn("Invalid registrationID - expected: "
        + namesystem.getRegistrationID() + " received: "
        + nodeReg.getRegistrationID());
    throw new UnregisteredNodeException(nodeReg);
  }
}
项目:hardfs    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
void verifyRequest(NodeRegistration nodeReg) throws IOException {
  verifyLayoutVersion(nodeReg.getVersion());
  if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
    LOG.warn("Invalid registrationID - expected: "
        + namesystem.getRegistrationID() + " received: "
        + nodeReg.getRegistrationID());
    throw new UnregisteredNodeException(nodeReg);
  }
}
项目:hadoop-on-lustre2    文件:NameNodeRpcServer.java   
/** 
 * Verifies the given registration.
 * 
 * @param nodeReg node registration
 * @throws UnregisteredNodeException if the registration is invalid
 */
private void verifyRequest(NodeRegistration nodeReg) throws IOException {
  // verify registration ID
  final String id = nodeReg.getRegistrationID();
  final String expectedID = namesystem.getRegistrationID();
  if (!expectedID.equals(id)) {
    LOG.warn("Registration IDs mismatched: the "
        + nodeReg.getClass().getSimpleName() + " ID is " + id
        + " but the expected ID is " + expectedID);
     throw new UnregisteredNodeException(nodeReg);
  }
}
项目:hadoop    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
        " unrecorded node {}", datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfoContiguous> iter = node.getBlockIterator();
  int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfoContiguous curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:aliyun-oss-hadoop-fs    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
        " unrecorded node {}", datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfo> iter = node.getBlockIterator();
  // starting from a random block
  int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfo curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:big-c    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
        " unrecorded node {}", datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfoContiguous> iter = node.getBlockIterator();
  int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfoContiguous curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
        " unrecorded node {}", datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfo> iter = node.getBlockIterator();
  int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfo curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:hadoop-plus    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: "
        + "Asking for blocks from an unrecorded node " + datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfo> iter = node.getBlockIterator();
  int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfo curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:FlexMap    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: "
        + "Asking for blocks from an unrecorded node " + datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfo> iter = node.getBlockIterator();
  int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfo curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:hops    文件:BlockManager.java   
/**
 * Get all blocks with location information from a datanode.
 */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException, IOException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn(
        "BLOCK* getBlocks: " + "Asking for blocks from an unrecorded node " +
            datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if (numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfo> iter = node.getBlockIterator();
  int startBlock =
      DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for (int i = 0; i < startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<>();
  long totalSize = 0;
  BlockInfo curBlock;
  while (totalSize < size && iter.hasNext()) {
    curBlock = iter.next();
    if (!curBlock.isComplete()) {
      continue;
    }
    totalSize += addBlock(curBlock, results);
  }
  if (totalSize < size) {
    iter = node.getBlockIterator(); // start from the beginning
    for (int i = 0; i < startBlock && totalSize < size; i++) {
      curBlock = iter.next();
      if (!curBlock.isComplete()) {
        continue;
      }
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:hops    文件:DatanodeManager.java   
/**
 * Handle heartbeat from datanodes.
 */
public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
    final String blockPoolId, long capacity, long dfsUsed, long remaining,
    long blockPoolUsed, int xceiverCount, int maxTransfers, int failedVolumes)
    throws IOException {
  synchronized (heartbeatManager) {
    synchronized (datanodeMap) {
      DatanodeDescriptor nodeinfo = null;
      try {
        nodeinfo = getDatanode(nodeReg);
      } catch (UnregisteredNodeException e) {
        return new DatanodeCommand[]{RegisterCommand.REGISTER};
      }

      // Check if this datanode should actually be shutdown instead. 
      if (nodeinfo != null && nodeinfo.isDisallowed()) {
        setDatanodeDead(nodeinfo);
        throw new DisallowedDatanodeException(nodeinfo);
      }

      if (nodeinfo == null || !nodeinfo.isAlive) {
        return new DatanodeCommand[]{RegisterCommand.REGISTER};
      }

      heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed, remaining,
          blockPoolUsed, xceiverCount, failedVolumes);

      //check lease recovery
      BlockInfoUnderConstruction[] blocks =
          nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
      if (blocks != null) {
        BlockRecoveryCommand brCommand =
            new BlockRecoveryCommand(blocks.length);
        for (BlockInfoUnderConstruction b : blocks) {
          brCommand.add(new RecoveringBlock(new ExtendedBlock(blockPoolId, b),
              getDataNodeDescriptorsTx(b), b.getBlockRecoveryId()));
        }
        return new DatanodeCommand[]{brCommand};
      }

      final List<DatanodeCommand> cmds = new ArrayList<>();
      //check pending replication
      List<BlockTargetPair> pendingList =
          nodeinfo.getReplicationCommand(maxTransfers);
      if (pendingList != null) {
        cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
            pendingList));
      }
      //check block invalidation
      Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
      if (blks != null) {
        cmds.add(
            new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId,
                blks));
      }

      blockManager.addKeyUpdateCommand(cmds, nodeinfo);

      // check for balancer bandwidth update
      if (nodeinfo.getBalancerBandwidth() > 0) {
        cmds.add(
            new BalancerBandwidthCommand(nodeinfo.getBalancerBandwidth()));
        // set back to 0 to indicate that datanode has been sent the new value
        nodeinfo.setBalancerBandwidth(0);
      }

      if (!cmds.isEmpty()) {
        return cmds.toArray(new DatanodeCommand[cmds.size()]);
      }
    }
  }

  return new DatanodeCommand[0];
}
项目:hadoop-TCP    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: "
        + "Asking for blocks from an unrecorded node " + datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfo> iter = node.getBlockIterator();
  int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfo curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:hardfs    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: "
        + "Asking for blocks from an unrecorded node " + datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfo> iter = node.getBlockIterator();
  int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfo curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:hadoop-on-lustre2    文件:BlockManager.java   
/** Get all blocks with location information from a datanode. */
private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
    final long size) throws UnregisteredNodeException {
  final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
  if (node == null) {
    blockLog.warn("BLOCK* getBlocks: "
        + "Asking for blocks from an unrecorded node " + datanode);
    throw new HadoopIllegalArgumentException(
        "Datanode " + datanode + " not found.");
  }

  int numBlocks = node.numBlocks();
  if(numBlocks == 0) {
    return new BlocksWithLocations(new BlockWithLocations[0]);
  }
  Iterator<BlockInfo> iter = node.getBlockIterator();
  int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
  // skip blocks
  for(int i=0; i<startBlock; i++) {
    iter.next();
  }
  List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
  long totalSize = 0;
  BlockInfo curBlock;
  while(totalSize<size && iter.hasNext()) {
    curBlock = iter.next();
    if(!curBlock.isComplete())  continue;
    totalSize += addBlock(curBlock, results);
  }
  if(totalSize<size) {
    iter = node.getBlockIterator(); // start from the beginning
    for(int i=0; i<startBlock&&totalSize<size; i++) {
      curBlock = iter.next();
      if(!curBlock.isComplete())  continue;
      totalSize += addBlock(curBlock, results);
    }
  }

  return new BlocksWithLocations(
      results.toArray(new BlockWithLocations[results.size()]));
}
项目:cumulus    文件:NameNode.java   
/** 
 * Verify request.
 * 
 * Verifies correctness of the datanode version, registration ID, and 
 * if the datanode does not need to be shutdown.
 * 
 * @param nodeReg data node registration
 * @throws IOException
 */
public void verifyRequest(NodeRegistration nodeReg) throws IOException {
  verifyVersion(nodeReg.getVersion());
  if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID()))
    throw new UnregisteredNodeException(nodeReg);
}