Java 类org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates 实例源码

项目:hadoop    文件:ClusterJspHelper.java   
/**
 * Get the number of live datanodes.
 * 
 * @param json JSON string that contains live node status.
 * @param nn namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (innerMap.get("adminState")
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
    }
  }
}
项目:hadoop    文件:ClusterJspHelper.java   
/**
 * Get the decommisioning datanode information.
 * 
 * @param dataNodeStatusMap map with key being datanode, value being an
 *          inner map (key:namenode, value:decommisionning state).
 * @param host datanode
 * @param json String
 */
private static void getDecommissionNodeStatus(
    Map<String, Map<String, String>> dataNodeStatusMap, String host,
    String json) throws IOException {
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }
  List<String> decomming = new ArrayList<String>();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    String dn = entry.getKey();
    decomming.add(dn);
    // nn-status
    Map<String, String> nnStatus = new HashMap<String, String>();
    if (dataNodeStatusMap.containsKey(dn)) {
      nnStatus = dataNodeStatusMap.get(dn);
    }
    nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
    // dn-nn-status
    dataNodeStatusMap.put(dn, nnStatus);
  }
}
项目:hadoop    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:hadoop    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlockNoStorageMedia() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
                                       AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
                                       AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
                                       AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:aliyun-oss-hadoop-fs    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlockNoStorageMedia() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
                                       AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
                                       AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
                                       AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  lb.setStartOffset(5);
  return lb;
}
项目:big-c    文件:ClusterJspHelper.java   
/**
 * Get the number of live datanodes.
 * 
 * @param json JSON string that contains live node status.
 * @param nn namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (innerMap.get("adminState")
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
    }
  }
}
项目:big-c    文件:ClusterJspHelper.java   
/**
 * Get the decommisioning datanode information.
 * 
 * @param dataNodeStatusMap map with key being datanode, value being an
 *          inner map (key:namenode, value:decommisionning state).
 * @param host datanode
 * @param json String
 */
private static void getDecommissionNodeStatus(
    Map<String, Map<String, String>> dataNodeStatusMap, String host,
    String json) throws IOException {
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }
  List<String> decomming = new ArrayList<String>();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    String dn = entry.getKey();
    decomming.add(dn);
    // nn-status
    Map<String, String> nnStatus = new HashMap<String, String>();
    if (dataNodeStatusMap.containsKey(dn)) {
      nnStatus = dataNodeStatusMap.get(dn);
    }
    nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
    // dn-nn-status
    dataNodeStatusMap.put(dn, nnStatus);
  }
}
项目:big-c    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:big-c    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlockNoStorageMedia() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
                                       AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
                                       AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
                                       AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClusterJspHelper.java   
/**
 * Get the number of live datanodes.
 * 
 * @param json JSON string that contains live node status.
 * @param nn namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (innerMap.get("adminState")
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClusterJspHelper.java   
/**
 * Get the decommisioning datanode information.
 * 
 * @param dataNodeStatusMap map with key being datanode, value being an
 *          inner map (key:namenode, value:decommisionning state).
 * @param host datanode
 * @param json String
 */
private static void getDecommissionNodeStatus(
    Map<String, Map<String, String>> dataNodeStatusMap, String host,
    String json) throws IOException {
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }
  List<String> decomming = new ArrayList<String>();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    String dn = entry.getKey();
    decomming.add(dn);
    // nn-status
    Map<String, String> nnStatus = new HashMap<String, String>();
    if (dataNodeStatusMap.containsKey(dn)) {
      nnStatus = dataNodeStatusMap.get(dn);
    }
    nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
    // dn-nn-status
    dataNodeStatusMap.put(dn, nnStatus);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlockNoStorageMedia() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
                                       AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
                                       AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
                                       AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestDecommission.java   
public void testClusterStats(int numNameNodes) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);

  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);

    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    DatanodeInfo downnode = decommissionNode(i, null, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);

    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
项目:hadoop-EAR    文件:TestDecommission.java   
public void testClusterStats(int numNameNodes, boolean federation) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf, federation);

  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);

    NameNode namenode = cluster.getNameNode(i);
    FSNamesystem fsn = namenode.namesystem;
    DatanodeInfo downnode = decommissionNode(i, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);

    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    fsn.refreshNodes(conf);
    DatanodeInfo ret = fsn.getDatanode(downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
项目:hadoop-EAR    文件:ClusterJspHelper.java   
/**
 * Process JSON string returned from connection to get the number of
 * live datanodes.
 * 
 * @param json JSON output that contains live node status.
 * @param nn namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (((String) innerMap.get("adminState"))
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
      if (((Boolean) innerMap.get("excluded"))
          .booleanValue() == true) {
        nn.liveExcludeCount++;
      }
    }
  }
}
项目:hadoop-EAR    文件:ClusterJspHelper.java   
/**
 * We process the JSON string returned from http or local fsnamesystem
 * to get the decommisioning datanode information.
 * 
 * @param dataNodeStatusMap map with key being datanode, value being an
 *          inner map (key:namenode, value:decommisionning state).
 * @param address 
 * @param json JSON string returned 
 */
private static void getDecommissionNodeStatus(
    Map<String, Map<String, String>> dataNodeStatusMap, String address,
    String json) throws IOException {
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }
  List<String> decomming = new ArrayList<String>();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    String dn = entry.getKey();
    decomming.add(dn);
    // nn-status
    Map<String, String> nnStatus = new HashMap<String, String>();
    if (dataNodeStatusMap.containsKey(dn)) {
      nnStatus = dataNodeStatusMap.get(dn);
    }
    nnStatus.put(address, AdminStates.DECOMMISSION_INPROGRESS.toString());
    // dn-nn-status
    dataNodeStatusMap.put(dn, nnStatus);
  }
}
项目:hadoop-plus    文件:JsonUtil.java   
/** Convert a Json map to an DatanodeInfo object. */
private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
  if (m == null) {
    return null;
  }

  return new DatanodeInfo(
      (String)m.get("ipAddr"),
      (String)m.get("hostName"),
      (String)m.get("storageID"),
      (int)(long)(Long)m.get("xferPort"),
      (int)(long)(Long)m.get("infoPort"),
      (int)(long)(Long)m.get("ipcPort"),

      (Long)m.get("capacity"),
      (Long)m.get("dfsUsed"),
      (Long)m.get("remaining"),
      (Long)m.get("blockPoolUsed"),
      (Long)m.get("lastUpdate"),
      (int)(long)(Long)m.get("xceiverCount"),
      (String)m.get("networkLocation"),
      AdminStates.valueOf((String)m.get("adminState")));
}
项目:hadoop-plus    文件:ClusterJspHelper.java   
/**
 * Get the number of live datanodes.
 * 
 * @param json JSON string that contains live node status.
 * @param nn namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (((String) innerMap.get("adminState"))
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
    }
  }
}
项目:hadoop-plus    文件:ClusterJspHelper.java   
/**
 * Get the decommisioning datanode information.
 * 
 * @param dataNodeStatusMap map with key being datanode, value being an
 *          inner map (key:namenode, value:decommisionning state).
 * @param host datanode
 * @param decomnode DecommissionNode
 * @param json String
 */
private static void getDecommissionNodeStatus(
    Map<String, Map<String, String>> dataNodeStatusMap, String host,
    String json) throws IOException {
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }
  List<String> decomming = new ArrayList<String>();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    String dn = entry.getKey();
    decomming.add(dn);
    // nn-status
    Map<String, String> nnStatus = new HashMap<String, String>();
    if (dataNodeStatusMap.containsKey(dn)) {
      nnStatus = dataNodeStatusMap.get(dn);
    }
    nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
    // dn-nn-status
    dataNodeStatusMap.put(dn, nnStatus);
  }
}
项目:hadoop-plus    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:hadoop-plus    文件:TestDecommission.java   
public void testClusterStats(int numNameNodes) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);

  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);

    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    DatanodeInfo downnode = decommissionNode(i, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);

    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
项目:FlexMap    文件:ClusterJspHelper.java   
/**
 * Get the number of live datanodes.
 * 
 * @param json JSON string that contains live node status.
 * @param nn namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (innerMap.get("adminState")
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
    }
  }
}
项目:FlexMap    文件:ClusterJspHelper.java   
/**
 * Get the decommisioning datanode information.
 * 
 * @param dataNodeStatusMap map with key being datanode, value being an
 *          inner map (key:namenode, value:decommisionning state).
 * @param host datanode
 * @param json String
 */
private static void getDecommissionNodeStatus(
    Map<String, Map<String, String>> dataNodeStatusMap, String host,
    String json) throws IOException {
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }
  List<String> decomming = new ArrayList<String>();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    String dn = entry.getKey();
    decomming.add(dn);
    // nn-status
    Map<String, String> nnStatus = new HashMap<String, String>();
    if (dataNodeStatusMap.containsKey(dn)) {
      nnStatus = dataNodeStatusMap.get(dn);
    }
    nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
    // dn-nn-status
    dataNodeStatusMap.put(dn, nnStatus);
  }
}
项目:FlexMap    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
          AdminStates.NORMAL),
  };
  String[] storageIDs = {"s1", "s2", "s3", "s4"};
  StorageType[] media = {
      StorageType.DISK,
      StorageType.SSD,
      StorageType.DISK,
      StorageType.RAM_DISK
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53),
      dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:FlexMap    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlockNoStorageMedia() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
                                       AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
                                       AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
                                       AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:FlexMap    文件:TestDecommission.java   
public void testClusterStats(int numNameNodes) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);

  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);

    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    DatanodeInfo downnode = decommissionNode(i, null, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);

    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
项目:hops    文件:JsonUtil.java   
/**
 * Convert a Json map to an DatanodeInfo object.
 */
private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
  if (m == null) {
    return null;
  }

  return new DatanodeInfo((String) m.get("ipAddr"),
      (String) m.get("hostName"), (String) m.get("storageID"),
      (int) (long) (Long) m.get("xferPort"),
      (int) (long) (Long) m.get("infoPort"),
      (int) (long) (Long) m.get("ipcPort"),

      (Long) m.get("capacity"), (Long) m.get("dfsUsed"),
      (Long) m.get("remaining"), (Long) m.get("blockPoolUsed"),
      (Long) m.get("lastUpdate"), (int) (long) (Long) m.get("xceiverCount"),
      (String) m.get("networkLocation"),
      AdminStates.valueOf((String) m.get("adminState")));
}
项目:hops    文件:ClusterJspHelper.java   
/**
 * Get the number of live datanodes.
 *
 * @param json
 *     JSON string that contains live node status.
 * @param nn
 *     namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (((String) innerMap.get("adminState"))
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
    }
  }
}
项目:hops    文件:ClusterJspHelper.java   
/**
 * Get the decommisioning datanode information.
 *
 * @param dataNodeStatusMap
 *     map with key being datanode, value being an
 *     inner map (key:namenode, value:decommisionning state).
 * @param host
 *     datanode
 * @param decomnode
 *     DecommissionNode
 * @param json
 *     String
 */
private static void getDecommissionNodeStatus(
    Map<String, Map<String, String>> dataNodeStatusMap, String host,
    String json) throws IOException {
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }
  List<String> decomming = new ArrayList<>();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    String dn = entry.getKey();
    decomming.add(dn);
    // nn-status
    Map<String, String> nnStatus = new HashMap<>();
    if (dataNodeStatusMap.containsKey(dn)) {
      nnStatus = dataNodeStatusMap.get(dn);
    }
    nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
    // dn-nn-status
    dataNodeStatusMap.put(dn, nnStatus);
  }
}
项目:hops    文件:TestDecommission.java   
public void testClusterStats(int numNameNodes)
    throws IOException, InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);

  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);

    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    DatanodeInfo downnode =
        decommissionNode(i, null, AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);

    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
项目:hadoop-TCP    文件:JsonUtil.java   
/** Convert a Json map to an DatanodeInfo object. */
private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
  if (m == null) {
    return null;
  }

  return new DatanodeInfo(
      (String)m.get("ipAddr"),
      (String)m.get("hostName"),
      (String)m.get("storageID"),
      (int)(long)(Long)m.get("xferPort"),
      (int)(long)(Long)m.get("infoPort"),
      (int)(long)(Long)m.get("infoSecurePort"),
      (int)(long)(Long)m.get("ipcPort"),

      (Long)m.get("capacity"),
      (Long)m.get("dfsUsed"),
      (Long)m.get("remaining"),
      (Long)m.get("blockPoolUsed"),
      (Long)m.get("lastUpdate"),
      (int)(long)(Long)m.get("xceiverCount"),
      (String)m.get("networkLocation"),
      AdminStates.valueOf((String)m.get("adminState")));
}
项目:hadoop-TCP    文件:ClusterJspHelper.java   
/**
 * Get the number of live datanodes.
 * 
 * @param json JSON string that contains live node status.
 * @param nn namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (((String) innerMap.get("adminState"))
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
    }
  }
}
项目:hadoop-TCP    文件:ClusterJspHelper.java   
/**
 * Get the decommisioning datanode information.
 * 
 * @param dataNodeStatusMap map with key being datanode, value being an
 *          inner map (key:namenode, value:decommisionning state).
 * @param host datanode
 * @param decomnode DecommissionNode
 * @param json String
 */
private static void getDecommissionNodeStatus(
    Map<String, Map<String, String>> dataNodeStatusMap, String host,
    String json) throws IOException {
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }
  List<String> decomming = new ArrayList<String>();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    String dn = entry.getKey();
    decomming.add(dn);
    // nn-status
    Map<String, String> nnStatus = new HashMap<String, String>();
    if (dataNodeStatusMap.containsKey(dn)) {
      nnStatus = dataNodeStatusMap.get(dn);
    }
    nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString());
    // dn-nn-status
    dataNodeStatusMap.put(dn, nnStatus);
  }
}
项目:hadoop-TCP    文件:TestPBHelper.java   
private LocatedBlock createLocatedBlock() {
  DatanodeInfo[] dnInfos = {
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
          AdminStates.DECOMMISSION_INPROGRESS),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
          AdminStates.DECOMMISSIONED),
      DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
          AdminStates.NORMAL)
  };
  LocatedBlock lb = new LocatedBlock(
      new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
  lb.setBlockToken(new Token<BlockTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(), new Text("kind"),
      new Text("service")));
  return lb;
}
项目:hadoop-TCP    文件:TestDecommission.java   
public void testClusterStats(int numNameNodes) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);

  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);

    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    DatanodeInfo downnode = decommissionNode(i, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);

    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
项目:hadoop-on-lustre    文件:JsonUtil.java   
/** Convert a Json map to an DatanodeInfo object. */
private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
  if (m == null) {
    return null;
  }

  return new DatanodeInfo(
      (String)m.get("name"),
      (String)m.get("storageID"),
      (int)(long)(Long)m.get("infoPort"),
      (int)(long)(Long)m.get("ipcPort"),

      (Long)m.get("capacity"),
      (Long)m.get("dfsUsed"),
      (Long)m.get("remaining"),
      (Long)m.get("lastUpdate"),
      (int)(long)(Long)m.get("xceiverCount"),
      (String)m.get("networkLocation"),
      (String)m.get("hostName"),
      AdminStates.valueOf((String)m.get("adminState")));
}
项目:hardfs    文件:JsonUtil.java   
/** Convert a Json map to an DatanodeInfo object. */
private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
  if (m == null) {
    return null;
  }

  return new DatanodeInfo(
      (String)m.get("ipAddr"),
      (String)m.get("hostName"),
      (String)m.get("storageID"),
      (int)(long)(Long)m.get("xferPort"),
      (int)(long)(Long)m.get("infoPort"),
      (int)(long)(Long)m.get("infoSecurePort"),
      (int)(long)(Long)m.get("ipcPort"),

      (Long)m.get("capacity"),
      (Long)m.get("dfsUsed"),
      (Long)m.get("remaining"),
      (Long)m.get("blockPoolUsed"),
      (Long)m.get("lastUpdate"),
      (int)(long)(Long)m.get("xceiverCount"),
      (String)m.get("networkLocation"),
      AdminStates.valueOf((String)m.get("adminState")));
}
项目:hardfs    文件:ClusterJspHelper.java   
/**
 * Get the number of live datanodes.
 * 
 * @param json JSON string that contains live node status.
 * @param nn namenode status to return information in
 */
private static void getLiveNodeCount(String json, NamenodeStatus nn)
    throws IOException {
  // Map of datanode host to (map of attribute name to value)
  Map<String, Map<String, Object>> nodeMap = getNodeMap(json);
  if (nodeMap == null || nodeMap.isEmpty()) {
    return;
  }

  nn.liveDatanodeCount = nodeMap.size();
  for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) {
    // Inner map of attribute name to value
    Map<String, Object> innerMap = entry.getValue();
    if (innerMap != null) {
      if (((String) innerMap.get("adminState"))
          .equals(AdminStates.DECOMMISSIONED.toString())) {
        nn.liveDecomCount++;
      }
    }
  }
}