Java 类org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics 实例源码

项目:hadoop-EAR    文件:PendingReplicationBlocks.java   
PendingReplicationBlocks(long timeoutPeriod, int maxBlocksToCheck, FSNamesystemMetrics metrics) {
  if ( timeoutPeriod > 0 ) {
    this.timeout = timeoutPeriod;
  }
  if ( maxBlocksToCheck > 0 ) {
    this.maxBlocksToCheck = maxBlocksToCheck;
  }
  fsnamesystemMetrics = metrics;
  init();
}
项目:RDFS    文件:PendingReplicationBlocks.java   
PendingReplicationBlocks(long timeoutPeriod, FSNamesystemMetrics metrics) {
  if ( timeoutPeriod > 0 ) {
    this.timeout = timeoutPeriod;
  }
  fsnamesystemMetrics = metrics;
  init();
}
项目:hadoop-EAR    文件:FSNamesystem.java   
/**
 * get FSNamesystemMetrics
 */
final public FSNamesystemMetrics getFSNamesystemMetrics() {
  return myFSMetrics;
}
项目:hadoop-EAR    文件:PendingReplicationBlocks.java   
PendingReplicationBlocks(long timeoutPeriod, FSNamesystemMetrics metrics) {
  this(timeoutPeriod, 0, null);
}
项目:cumulus    文件:FSNamesystem.java   
/**
 * get FSNamesystemMetrics
 */
public FSNamesystemMetrics getFSNamesystemMetrics() {
  return myFSMetrics;
}
项目:cumulus    文件:TestDatanodeReport.java   
/**
 * This test attempts to different types of datanode report.
 */
public void testDatanodeReport() throws Exception {
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
  conf.setLong("dfs.heartbeat.interval", 1L);
  MiniDFSCluster cluster = 
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
  try {
    //wait until the cluster is up
    cluster.waitActive();

    InetSocketAddress addr = new InetSocketAddress("localhost",
        cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);

    assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
                 NUM_OF_DATANODES);
    assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
                 NUM_OF_DATANODES);
    assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0);

    // bring down one datanode
    ArrayList<DataNode> datanodes = cluster.getDataNodes();
    datanodes.remove(datanodes.size()-1).shutdown();

    DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
    while (nodeInfo.length != 1) {
      try {
        Thread.sleep(500);
      } catch (Exception e) {
      }
      nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
    }

    assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
                 NUM_OF_DATANODES-1);
    assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
                 NUM_OF_DATANODES);

    Thread.sleep(5000);
    FSNamesystemMetrics fsMetrics = 
                   cluster.getNamesystem().getFSNamesystemMetrics();
    assertEquals(1,fsMetrics.numExpiredHeartbeats.getCurrentIntervalValue());
  }finally {
    cluster.shutdown();
  }
}
项目:RDFS    文件:FSNamesystem.java   
/**
 * get FSNamesystemMetrics
 */
final public FSNamesystemMetrics getFSNamesystemMetrics() {
  return myFSMetrics;
}
项目:hadoop-0.20    文件:FSNamesystem.java   
/**
 * get FSNamesystemMetrics
 */
public FSNamesystemMetrics getFSNamesystemMetrics() {
  return myFSMetrics;
}
项目:hadoop-gpu    文件:FSNamesystem.java   
/**
 * get FSNamesystemMetrics
 */
public FSNamesystemMetrics getFSNamesystemMetrics() {
  return myFSMetrics;
}