Java 类org.apache.hadoop.mapreduce.ClusterMetrics 实例源码

项目:hadoop    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:aliyun-oss-hadoop-fs    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:aliyun-oss-hadoop-fs    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:big-c    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:big-c    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:big-c    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobTracker.java   
InfoMap getSummary() {
  final ClusterMetrics metrics = getClusterMetrics();
  InfoMap map = new InfoMap();
  map.put("nodes", metrics.getTaskTrackerCount()
          + getBlacklistedTrackerCount());
  map.put("alive", metrics.getTaskTrackerCount());
  map.put("blacklisted", getBlacklistedTrackerCount());
  map.put("slots", new InfoMap() {{
    put("map_slots", metrics.getMapSlotCapacity());
    put("map_slots_used", metrics.getOccupiedMapSlots());
    put("reduce_slots", metrics.getReduceSlotCapacity());
    put("reduce_slots_used", metrics.getOccupiedReduceSlots());
  }});
  map.put("jobs", metrics.getTotalJobSubmissions());
  return map;
}
项目:hadoop-plus    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop-plus    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop-plus    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:FlexMap    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:FlexMap    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:FlexMap    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:hops    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hops    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hops    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:glusterfs-hadoop-examples    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:hadoop-TCP    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop-TCP    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop-TCP    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:hadoop-on-lustre    文件:JobTracker.java   
InfoMap getSummary() {
  final ClusterMetrics metrics = getClusterMetrics();
  InfoMap map = new InfoMap();
  map.put("nodes", metrics.getTaskTrackerCount()
          + getBlacklistedTrackerCount());
  map.put("alive", metrics.getTaskTrackerCount());
  map.put("blacklisted", getBlacklistedTrackerCount());
  map.put("graylisted", getGraylistedTrackerCount());
  map.put("slots", new InfoMap() {{
    put("map_slots", metrics.getMapSlotCapacity());
    put("map_slots_used", metrics.getOccupiedMapSlots());
    put("reduce_slots", metrics.getReduceSlotCapacity());
    put("reduce_slots_used", metrics.getOccupiedReduceSlots());
  }});
  map.put("jobs", metrics.getTotalJobSubmissions());
  return map;
}
项目:hardfs    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hardfs    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hardfs    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:hadoop-on-lustre2    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus() throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
        ClusterMetrics metrics = cluster.getClusterStatus();
        return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
          .getBlackListedTaskTrackerCount(), cluster
          .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
          metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
          metrics.getDecommissionedTaskTrackerCount(), metrics
            .getGrayListedTaskTrackerCount());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop-on-lustre2    文件:JobClient.java   
/**
 * Get status information about the Map-Reduce cluster.
 *  
 * @param  detailed if true then get a detailed status including the
 *         tracker names
 * @return the status information about the Map-Reduce cluster as an object
 *         of {@link ClusterStatus}.
 * @throws IOException
 */
public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
  try {
    return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
      public ClusterStatus run() throws IOException, InterruptedException {
      ClusterMetrics metrics = cluster.getClusterStatus();
      return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
        arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
        cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
        metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
        metrics.getReduceSlotCapacity(), 
        cluster.getJobTrackerStatus());
      }
    });
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop-on-lustre2    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:hanoi-hadoop-2.0.0-cdh    文件:JobTracker.java   
InfoMap getSummary() {
  final ClusterMetrics metrics = getClusterMetrics();
  InfoMap map = new InfoMap();
  map.put("nodes", metrics.getTaskTrackerCount()
          + getBlacklistedTrackerCount());
  map.put("alive", metrics.getTaskTrackerCount());
  map.put("blacklisted", getBlacklistedTrackerCount());
  map.put("slots", new InfoMap() {{
    put("map_slots", metrics.getMapSlotCapacity());
    put("map_slots_used", metrics.getOccupiedMapSlots());
    put("reduce_slots", metrics.getReduceSlotCapacity());
    put("reduce_slots_used", metrics.getOccupiedReduceSlots());
  }});
  map.put("jobs", metrics.getTotalJobSubmissions());
  return map;
}
项目:mapreduce-fork    文件:DistSum.java   
/**
 * Choose a Machine in runtime according to the cluster status.
 */
private Machine chooseMachine(Configuration conf) throws IOException {
  final int parts = conf.getInt(N_PARTS, Integer.MAX_VALUE);
  try {
    for(;; Thread.sleep(2000)) {
      //get cluster status
      final ClusterMetrics status = cluster.getClusterStatus();
      final int m = 
        status.getMapSlotCapacity() - status.getOccupiedMapSlots();
      final int r = 
        status.getReduceSlotCapacity() - status.getOccupiedReduceSlots();
      if (m >= parts || r >= parts) {
        //favor ReduceSide machine
        final Machine value = r >= parts?
            ReduceSide.INSTANCE: MapSide.INSTANCE;
        Util.out.println("  " + this + " is " + value + " (m=" + m + ", r=" + r + ")");
        return value;
      }
    }
  } catch (InterruptedException e) {
    throw new IOException(e);
  }    
}
项目:mapreduce-fork    文件:TestTrackerReservation.java   
/**
 * Test case to check task tracker reservation for a job which 
 * has a job blacklisted tracker.
 * <ol>
 * <li>Run a job which fails on one of the tracker.</li>
 * <li>Check if the job succeeds and has no reservation.</li>
 * </ol>
 * 
 * @throws Exception
 */

public void testTrackerReservationWithJobBlackListedTracker() throws Exception {
  FakeJobInProgress job = TestTaskTrackerBlacklisting.runBlackListingJob(
      jobTracker, trackers);
  assertEquals("Job has no blacklisted trackers", 1, job
      .getBlackListedTrackers().size());
  assertTrue("Tracker 1 not blacklisted for the job", job
      .getBlackListedTrackers().contains(
          JobInProgress.convertTrackerNameToHostName(trackers[0])));
  assertEquals("Job didnt complete successfully complete", job.getStatus()
      .getRunState(), JobStatus.SUCCEEDED);
  assertEquals("Reservation for the job not released: Maps", 
      0, job.getNumReservedTaskTrackersForMaps());
  assertEquals("Reservation for the job not released : Reduces", 
      0, job.getNumReservedTaskTrackersForReduces());
  ClusterMetrics metrics = jobTracker.getClusterMetrics();
  assertEquals("reserved map slots do not match",
      0, metrics.getReservedMapSlots());
  assertEquals("reserved reduce slots do not match",
      0, metrics.getReservedReduceSlots());
}
项目:mammoth    文件:JobTracker.java   
InfoMap getSummary() {
  final ClusterMetrics metrics = getClusterMetrics();
  InfoMap map = new InfoMap();
  map.put("nodes", metrics.getTaskTrackerCount()
          + getBlacklistedTrackerCount());
  map.put("alive", metrics.getTaskTrackerCount());
  map.put("blacklisted", getBlacklistedTrackerCount());
  map.put("graylisted", getGraylistedTrackerCount());
  map.put("slots", new InfoMap() {{
    put("map_slots", metrics.getMapSlotCapacity());
    put("map_slots_used", metrics.getOccupiedMapSlots());
    put("reduce_slots", metrics.getReduceSlotCapacity());
    put("reduce_slots_used", metrics.getOccupiedReduceSlots());
  }});
  map.put("jobs", metrics.getTotalJobSubmissions());
  return map;
}
项目:hortonworks-extension    文件:JobTracker.java   
InfoMap getSummary() {
  final ClusterMetrics metrics = getClusterMetrics();
  InfoMap map = new InfoMap();
  map.put("nodes", metrics.getTaskTrackerCount()
          + getBlacklistedTrackerCount());
  map.put("alive", metrics.getTaskTrackerCount());
  map.put("blacklisted", getBlacklistedTrackerCount());
  map.put("graylisted", getGraylistedTrackerCount());
  map.put("slots", new InfoMap() {{
    put("map_slots", metrics.getMapSlotCapacity());
    put("map_slots_used", metrics.getOccupiedMapSlots());
    put("reduce_slots", metrics.getReduceSlotCapacity());
    put("reduce_slots_used", metrics.getOccupiedReduceSlots());
  }});
  map.put("jobs", metrics.getTotalJobSubmissions());
  return map;
}
项目:hortonworks-extension    文件:JobTracker.java   
InfoMap getSummary() {
  final ClusterMetrics metrics = getClusterMetrics();
  InfoMap map = new InfoMap();
  map.put("nodes", metrics.getTaskTrackerCount()
          + getBlacklistedTrackerCount());
  map.put("alive", metrics.getTaskTrackerCount());
  map.put("blacklisted", getBlacklistedTrackerCount());
  map.put("graylisted", getGraylistedTrackerCount());
  map.put("slots", new InfoMap() {{
    put("map_slots", metrics.getMapSlotCapacity());
    put("map_slots_used", metrics.getOccupiedMapSlots());
    put("reduce_slots", metrics.getReduceSlotCapacity());
    put("reduce_slots_used", metrics.getOccupiedReduceSlots());
  }});
  map.put("jobs", metrics.getTotalJobSubmissions());
  return map;
}
项目:hadoop    文件:ResourceMgrDelegate.java   
public ClusterMetrics getClusterMetrics() throws IOException,
    InterruptedException {
  try {
    YarnClusterMetrics metrics = client.getYarnClusterMetrics();
    ClusterMetrics oldMetrics =
        new ClusterMetrics(1, 1, 1, 1, 1, 1,
            metrics.getNumNodeManagers() * 10,
            metrics.getNumNodeManagers() * 2, 1,
            metrics.getNumNodeManagers(), 0, 0);
    return oldMetrics;
  } catch (YarnException e) {
    throw new IOException(e);
  }
}