ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps, int reduces, int maxMaps, int maxReduces, JobTrackerStatus status, int numDecommissionedNodes, long used_memory, long max_memory) { numActiveTrackers = trackers; numBlacklistedTrackers = blacklists; this.numExcludedNodes = numDecommissionedNodes; this.ttExpiryInterval = ttExpiryInterval; map_tasks = maps; reduce_tasks = reduces; max_map_tasks = maxMaps; max_reduce_tasks = maxReduces; this.status = status; this.used_memory = used_memory; this.max_memory = max_memory; }
public synchronized ClusterStatus getClusterStatus(boolean detailed) { synchronized (taskTrackers) { if (detailed) { List<List<String>> trackerNames = taskTrackerNames(); return new ClusterStatus(trackerNames.get(0), trackerNames.get(1), TASKTRACKER_EXPIRY_INTERVAL, totalMaps, totalReduces, totalMapTaskCapacity, totalReduceTaskCapacity, JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size() ); } else { return new ClusterStatus(taskTrackers.size() - getBlacklistedTrackerCount(), getBlacklistedTrackerCount(), TASKTRACKER_EXPIRY_INTERVAL, totalMaps, totalReduces, totalMapTaskCapacity, totalReduceTaskCapacity, JobTrackerStatus.valueOf(state.name()), getExcludedNodes().size()); } } }
@Test(timeout=60000) public void testClientFailover() throws Exception { LOG.info("Running testClientFailover"); startCluster(); // Test with client. c.f. HATestUtil.setFailoverConfigurations JobClient jc = new JobClient(conf); assertEquals("client sees jt running", JobTrackerStatus.RUNNING, jc.getClusterStatus().getJobTrackerStatus()); // failover to jt2 FailoverController fc = new FailoverController(conf, RequestSource.REQUEST_BY_USER); fc.failover(target1, target2, false, false); cluster.waitActive(); assertEquals("jt2 running", JobTrackerStatus.RUNNING, jt2.getJobTracker().getClusterStatus().getJobTrackerStatus()); assertNull("jt1 not running", jt1.getJobTracker()); assertEquals("client still sees jt running", JobTrackerStatus.RUNNING, jc.getClusterStatus().getJobTrackerStatus()); }
public void readFields(DataInput in) throws IOException { numActiveTrackers = in.readInt(); int numTrackerNames = in.readInt(); if (numTrackerNames > 0) { for (int i = 0; i < numTrackerNames; i++) { String name = Text.readString(in); activeTrackers.add(name); } } numBlacklistedTrackers = in.readInt(); int blackListTrackerInfoSize = in.readInt(); if(blackListTrackerInfoSize > 0) { for (int i = 0; i < blackListTrackerInfoSize; i++) { BlackListInfo info = new BlackListInfo(); info.readFields(in); blacklistedTrackersInfo.add(info); } } numExcludedNodes = in.readInt(); ttExpiryInterval = in.readLong(); map_tasks = in.readInt(); reduce_tasks = in.readInt(); max_map_tasks = in.readInt(); max_reduce_tasks = in.readInt(); status = WritableUtils.readEnum(in, JobTrackerStatus.class); }
/** * Wait for the jobtracker to be RUNNING. */ static void waitForJobTracker(JobClient jobClient) { while (true) { try { ClusterStatus status = jobClient.getClusterStatus(); while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) { waitFor(100); status = jobClient.getClusterStatus(); } break; // means that the jt is ready } catch (IOException ioe) {} } }
/** * Construct a new cluster status. * * @param trackers no. of tasktrackers in the cluster * @param blacklists no of blacklisted task trackers in the cluster * @param ttExpiryInterval the tasktracker expiry interval * @param maps no. of currently running map-tasks in the cluster * @param reduces no. of currently running reduce-tasks in the cluster * @param maxMaps the maximum no. of map tasks in the cluster * @param maxReduces the maximum no. of reduce tasks in the cluster * @param status the {@link JobTrackerStatus} of the <code>JobTracker</code> * @param numDecommissionedNodes number of decommission trackers * @param numGrayListedTrackers number of graylisted trackers */ ClusterStatus(int trackers, int blacklists, long ttExpiryInterval, int maps, int reduces, int maxMaps, int maxReduces, JobTrackerStatus status, int numDecommissionedNodes, int numGrayListedTrackers) { numActiveTrackers = trackers; numBlacklistedTrackers = blacklists; this.numExcludedNodes = numDecommissionedNodes; this.ttExpiryInterval = ttExpiryInterval; map_tasks = maps; reduce_tasks = reduces; max_map_tasks = maxMaps; max_reduce_tasks = maxReduces; this.status = status; this.grayListedTrackers = numGrayListedTrackers; }
public void readFields(DataInput in) throws IOException { numActiveTrackers = in.readInt(); int numTrackerNames = in.readInt(); if (numTrackerNames > 0) { for (int i = 0; i < numTrackerNames; i++) { String name = StringInterner.weakIntern(Text.readString(in)); activeTrackers.add(name); } } numBlacklistedTrackers = in.readInt(); int blackListTrackerInfoSize = in.readInt(); if(blackListTrackerInfoSize > 0) { for (int i = 0; i < blackListTrackerInfoSize; i++) { BlackListInfo info = new BlackListInfo(); info.readFields(in); blacklistedTrackersInfo.add(info); } } numExcludedNodes = in.readInt(); ttExpiryInterval = in.readLong(); map_tasks = in.readInt(); reduce_tasks = in.readInt(); max_map_tasks = in.readInt(); max_reduce_tasks = in.readInt(); status = WritableUtils.readEnum(in, JobTrackerStatus.class); grayListedTrackers = in.readInt(); }
public void waitActive() throws IOException { while (true) { for (JobTrackerHADaemon jtHaDaemon : jtHaDaemonList) { JobTracker jt = jtHaDaemon.getJobTracker(); if (jt != null) { if (jt.getClusterStatus().getJobTrackerStatus() == JobTrackerStatus.RUNNING) { return; } } } try { Thread.sleep(1000); } catch (InterruptedException ie) {} } }
@Override public ClusterStatus getClusterStatus() { int numTrackers = trackers.size(); return new ClusterStatus(numTrackers, 0, JobTracker.TASKTRACKER_EXPIRY_INTERVAL, maps, reduces, numTrackers * maxMapTasksPerTracker, numTrackers * maxReduceTasksPerTracker, JobTrackerStatus.RUNNING); }
public ClusterStatus getClusterStatus() { int numTrackers = trackers.size(); return new ClusterStatus(numTrackers, 0, JobTracker.TASKTRACKER_EXPIRY_INTERVAL, maps, reduces, numTrackers * maxMapTasksPerTracker, numTrackers * maxReduceTasksPerTracker, JobTrackerStatus.RUNNING); }
@After public void tearDown() { ClusterStatus status = mr.getJobTrackerRunner().getJobTracker() .getClusterStatus(false); if (status.getJobTrackerStatus() == JobTrackerStatus.RUNNING) { mr.shutdown(); } if (dfs != null) { dfs.shutdown(); } }
public ClusterStatus getClusterStatus() { int numTrackers = trackers.size(); return new ClusterStatus(numTrackers, maps, reduces, numTrackers * maxMapTasksPerTracker, numTrackers * maxReduceTasksPerTracker, JobTrackerStatus.RUNNING); }