Java 类org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:ClusterStatus.java   
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, 
    int maps, int reduces, int maxMaps, int maxReduces, 
    JobTrackerStatus status, int numDecommissionedNodes,
    long used_memory, long max_memory) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.used_memory = used_memory;
  this.max_memory = max_memory;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobTracker.java   
public synchronized ClusterStatus getClusterStatus(boolean detailed) {
  synchronized (taskTrackers) {
    if (detailed) {
      List<List<String>> trackerNames = taskTrackerNames();
      return new ClusterStatus(trackerNames.get(0),
          trackerNames.get(1),
          TASKTRACKER_EXPIRY_INTERVAL,
          totalMaps,
          totalReduces,
          totalMapTaskCapacity,
          totalReduceTaskCapacity, 
          JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size()
          );
    } else {
      return new ClusterStatus(taskTrackers.size() - 
          getBlacklistedTrackerCount(),
          getBlacklistedTrackerCount(),
          TASKTRACKER_EXPIRY_INTERVAL,
          totalMaps,
          totalReduces,
          totalMapTaskCapacity,
          totalReduceTaskCapacity, 
          JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size());
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestHAStateTransitions.java   
@Test(timeout=60000)
public void testClientFailover() throws Exception {
  LOG.info("Running testClientFailover");
  startCluster();

  // Test with client. c.f. HATestUtil.setFailoverConfigurations
  JobClient jc = new JobClient(conf);
  assertEquals("client sees jt running", JobTrackerStatus.RUNNING,
      jc.getClusterStatus().getJobTrackerStatus());

  // failover to jt2
  FailoverController fc = new FailoverController(conf, 
      RequestSource.REQUEST_BY_USER);
  fc.failover(target1, target2, false, false);

  cluster.waitActive();

  assertEquals("jt2 running", JobTrackerStatus.RUNNING,
      jt2.getJobTracker().getClusterStatus().getJobTrackerStatus());
  assertNull("jt1 not running", jt1.getJobTracker());

  assertEquals("client still sees jt running", JobTrackerStatus.RUNNING,
      jc.getClusterStatus().getJobTrackerStatus());
}
项目:hanoi-hadoop-2.0.0-cdh    文件:ClusterStatus.java   
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, 
    int maps, int reduces, int maxMaps, int maxReduces, 
    JobTrackerStatus status, int numDecommissionedNodes,
    long used_memory, long max_memory) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.used_memory = used_memory;
  this.max_memory = max_memory;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:JobTracker.java   
public synchronized ClusterStatus getClusterStatus(boolean detailed) {
  synchronized (taskTrackers) {
    if (detailed) {
      List<List<String>> trackerNames = taskTrackerNames();
      return new ClusterStatus(trackerNames.get(0),
          trackerNames.get(1),
          TASKTRACKER_EXPIRY_INTERVAL,
          totalMaps,
          totalReduces,
          totalMapTaskCapacity,
          totalReduceTaskCapacity, 
          JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size()
          );
    } else {
      return new ClusterStatus(taskTrackers.size() - 
          getBlacklistedTrackerCount(),
          getBlacklistedTrackerCount(),
          TASKTRACKER_EXPIRY_INTERVAL,
          totalMaps,
          totalReduces,
          totalMapTaskCapacity,
          totalReduceTaskCapacity, 
          JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size());
    }
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestHAStateTransitions.java   
@Test(timeout=60000)
public void testClientFailover() throws Exception {
  LOG.info("Running testClientFailover");
  startCluster();

  // Test with client. c.f. HATestUtil.setFailoverConfigurations
  JobClient jc = new JobClient(conf);
  assertEquals("client sees jt running", JobTrackerStatus.RUNNING,
      jc.getClusterStatus().getJobTrackerStatus());

  // failover to jt2
  FailoverController fc = new FailoverController(conf, 
      RequestSource.REQUEST_BY_USER);
  fc.failover(target1, target2, false, false);

  cluster.waitActive();

  assertEquals("jt2 running", JobTrackerStatus.RUNNING,
      jt2.getJobTracker().getClusterStatus().getJobTrackerStatus());
  assertNull("jt1 not running", jt1.getJobTracker());

  assertEquals("client still sees jt running", JobTrackerStatus.RUNNING,
      jc.getClusterStatus().getJobTrackerStatus());
}
项目:mapreduce-fork    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = Text.readString(in);
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
}
项目:hadoop    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:hadoop    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:hadoop    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
项目:aliyun-oss-hadoop-fs    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:aliyun-oss-hadoop-fs    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:aliyun-oss-hadoop-fs    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
项目:big-c    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:big-c    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:big-c    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MiniMRHACluster.java   
public void waitActive() throws IOException {
  while (true) {
    for (JobTrackerHADaemon jtHaDaemon : jtHaDaemonList) {
      JobTracker jt = jtHaDaemon.getJobTracker();
      if (jt != null) {
        if (jt.getClusterStatus().getJobTrackerStatus() == JobTrackerStatus.RUNNING) {
          return;
        }
      }
    }
    try {
      Thread.sleep(1000);
    } catch (InterruptedException ie) {}
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestJobQueueTaskScheduler.java   
@Override
public ClusterStatus getClusterStatus() {
  int numTrackers = trackers.size();
  return new ClusterStatus(numTrackers, 0, 
                           JobTracker.TASKTRACKER_EXPIRY_INTERVAL,
                           maps, reduces,
                           numTrackers * maxMapTasksPerTracker,
                           numTrackers * maxReduceTasksPerTracker,
                           JobTrackerStatus.RUNNING);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestParallelInitialization.java   
public ClusterStatus getClusterStatus() {
  int numTrackers = trackers.size();
  return new ClusterStatus(numTrackers, 0, 
                           JobTracker.TASKTRACKER_EXPIRY_INTERVAL,
                           maps, reduces,
                           numTrackers * maxMapTasksPerTracker,
                           numTrackers * maxReduceTasksPerTracker,
                           JobTrackerStatus.RUNNING);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestRecoveryManager.java   
@After
public void tearDown() {
  ClusterStatus status = mr.getJobTrackerRunner().getJobTracker()
      .getClusterStatus(false);
  if (status.getJobTrackerStatus() == JobTrackerStatus.RUNNING) {
    mr.shutdown();
  }
  if (dfs != null) {
    dfs.shutdown();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCapacityScheduler.java   
public ClusterStatus getClusterStatus() {
  int numTrackers = trackers.size();
  return new ClusterStatus(numTrackers, maps, reduces,
      numTrackers * maxMapTasksPerTracker,
      numTrackers * maxReduceTasksPerTracker,
      JobTrackerStatus.RUNNING);
}
项目:hadoop-plus    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:hadoop-plus    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:hadoop-plus    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
项目:FlexMap    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:FlexMap    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:FlexMap    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
项目:hops    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:hops    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:hops    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
项目:hadoop-TCP    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:hadoop-TCP    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:hadoop-TCP    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}
项目:hardfs    文件:UtilsForTests.java   
/**
 * Wait for the jobtracker to be RUNNING.
 */
static void waitForJobTracker(JobClient jobClient) {
  while (true) {
    try {
      ClusterStatus status = jobClient.getClusterStatus();
      while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) {
        waitFor(100);
        status = jobClient.getClusterStatus();
      }
      break; // means that the jt is ready
    } catch (IOException ioe) {}
  }
}
项目:hardfs    文件:ClusterStatus.java   
/**
 * Construct a new cluster status.
 * 
 * @param trackers no. of tasktrackers in the cluster
 * @param blacklists no of blacklisted task trackers in the cluster
 * @param ttExpiryInterval the tasktracker expiry interval
 * @param maps no. of currently running map-tasks in the cluster
 * @param reduces no. of currently running reduce-tasks in the cluster
 * @param maxMaps the maximum no. of map tasks in the cluster
 * @param maxReduces the maximum no. of reduce tasks in the cluster
 * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code>
 * @param numDecommissionedNodes number of decommission trackers
 * @param numGrayListedTrackers number of graylisted trackers
 */
ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps,
    int reduces, int maxMaps, int maxReduces, JobTrackerStatus status,
    int numDecommissionedNodes, int numGrayListedTrackers) {
  numActiveTrackers = trackers;
  numBlacklistedTrackers = blacklists;
  this.numExcludedNodes = numDecommissionedNodes;
  this.ttExpiryInterval = ttExpiryInterval;
  map_tasks = maps;
  reduce_tasks = reduces;
  max_map_tasks = maxMaps;
  max_reduce_tasks = maxReduces;
  this.status = status;
  this.grayListedTrackers = numGrayListedTrackers;
}
项目:hardfs    文件:ClusterStatus.java   
public void readFields(DataInput in) throws IOException {
  numActiveTrackers = in.readInt();
  int numTrackerNames = in.readInt();
  if (numTrackerNames > 0) {
    for (int i = 0; i < numTrackerNames; i++) {
      String name = StringInterner.weakIntern(Text.readString(in));
      activeTrackers.add(name);
    }
  }
  numBlacklistedTrackers = in.readInt();
  int blackListTrackerInfoSize = in.readInt();
  if(blackListTrackerInfoSize > 0) {
    for (int i = 0; i < blackListTrackerInfoSize; i++) {
      BlackListInfo info = new BlackListInfo();
      info.readFields(in);
      blacklistedTrackersInfo.add(info);
    }
  }
  numExcludedNodes = in.readInt();
  ttExpiryInterval = in.readLong();
  map_tasks = in.readInt();
  reduce_tasks = in.readInt();
  max_map_tasks = in.readInt();
  max_reduce_tasks = in.readInt();
  status = WritableUtils.readEnum(in, JobTrackerStatus.class);
  grayListedTrackers = in.readInt();
}