Java 类org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressMetrics 实例源码

项目:hadoop-on-lustre2    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }
  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  final String nsId = getNameServiceId(conf);
  tokenServiceName = HAUtil.isHAEnabled(conf, nsId) ? nsId : NetUtils
          .getHostPortString(rpcServer.getRpcAddress());
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }

  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  startCommonServices(conf);
}
项目:hadoop    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }

  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);

  startCommonServices(conf);
}
项目:aliyun-oss-hadoop-fs    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }

  pauseMonitor = new JvmPauseMonitor();
  pauseMonitor.init(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);

  startCommonServices(conf);
  startMetricsLogger(conf);
}
项目:big-c    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }

  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);

  startCommonServices(conf);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost = SpanReceiverHost.getInstance(conf);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }

  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);

  startCommonServices(conf);
}
项目:FlexMap    文件:NameNode.java   
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost = SpanReceiverHost.getInstance(conf);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }

  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);

  startCommonServices(conf);
}