Java 类org.apache.hadoop.mapreduce.TaskReport 实例源码

项目:hadoop    文件:CLI.java   
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
      org.apache.hadoop.util.StringUtils.toUpperCase(type)));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:CLI.java   
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 * @throws IOException when there is an error communicating with the master
 * @throws InterruptedException
 * @throws IllegalArgumentException if an invalid type/state is passed
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {

  TaskReport[] reports=null;
  reports = job.getTaskReports(TaskType.valueOf(
      org.apache.hadoop.util.StringUtils.toUpperCase(type)));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
项目:big-c    文件:CLI.java   
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
      org.apache.hadoop.util.StringUtils.toUpperCase(type)));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
项目:hops    文件:CLI.java   
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
      org.apache.hadoop.util.StringUtils.toUpperCase(type)));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
项目:sequenceiq-samples    文件:MRJobStatus.java   
public JobStatus printJobStatus(YARNRunner yarnRunner, JobID jobID) throws IOException, InterruptedException {
    JobStatus jobStatus;
    jobStatus = yarnRunner.getJobStatus(jobID);

    // print overall job M/R progresses
    LOGGER.info("\nJob " + jobStatus.getJobName() + "in queue (" + jobStatus.getQueue() + ")" + " progress M/R: " + jobStatus.getMapProgress() + "/" + jobStatus.getReduceProgress());
    LOGGER.info("Tracking URL : " + jobStatus.getTrackingUrl());
    LOGGER.info("Reserved memory : " + jobStatus.getReservedMem() + ", used memory : "+ jobStatus.getUsedMem() + " and used slots : "+ jobStatus.getNumUsedSlots());

    // list map & reduce tasks statuses and progress        
    TaskReport[] reports = yarnRunner.getTaskReports(jobID, TaskType.MAP);
    for (int i = 0; i < reports.length; i++) {
        LOGGER.info("MAP: Status " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress()); 
    }
    reports = yarnRunner.getTaskReports(jobID, TaskType.REDUCE);
    for (int i = 0; i < reports.length; i++) {
        LOGGER.info("REDUCE: " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress()); 
    }
    return jobStatus;
}
项目:incubator-blur    文件:IndexerJobDriver.java   
PartitionedInputResult(Path partitionedInputData, Counters counters, int shards, TaskReport[] taskReports) {
  _partitionedInputData = partitionedInputData;
  _counters = counters;
  _rowIdsFromNewData = new long[shards];
  _rowIdsToUpdateFromNewData = new long[shards];
  _rowIdsFromIndex = new long[shards];
  for (TaskReport tr : taskReports) {
    int id = tr.getTaskID().getId();
    Counters taskCounters = tr.getTaskCounters();
    Counter total = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_FROM_NEW_DATA);
    _rowIdsFromNewData[id] = total.getValue();
    Counter update = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_TO_UPDATE_FROM_NEW_DATA);
    _rowIdsToUpdateFromNewData[id] = update.getValue();
    Counter index = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_FROM_INDEX);
    _rowIdsFromIndex[id] = index.getValue();
  }
}
项目:hadoop    文件:JobClientUnitTest.java   
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getMapTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop    文件:JobClientUnitTest.java   
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getReduceTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop    文件:JobClientUnitTest.java   
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getSetupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop    文件:JobClientUnitTest.java   
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getCleanupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop    文件:CLI.java   
private void printTaskAttempts(TaskReport report) {
  if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
    System.out.println(report.getSuccessfulTaskAttemptId());
  } else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
    for (TaskAttemptID t : 
      report.getRunningTaskAttemptIds()) {
      System.out.println(t);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:JobClientUnitTest.java   
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getMapTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:aliyun-oss-hadoop-fs    文件:JobClientUnitTest.java   
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getReduceTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:aliyun-oss-hadoop-fs    文件:JobClientUnitTest.java   
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getSetupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:aliyun-oss-hadoop-fs    文件:JobClientUnitTest.java   
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getCleanupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:aliyun-oss-hadoop-fs    文件:CLI.java   
private void printTaskAttempts(TaskReport report) {
  if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
    System.out.println(report.getSuccessfulTaskAttemptId());
  } else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
    for (TaskAttemptID t : 
      report.getRunningTaskAttemptIds()) {
      System.out.println(t);
    }
  }
}
项目:big-c    文件:JobClientUnitTest.java   
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getMapTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:big-c    文件:JobClientUnitTest.java   
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getReduceTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:big-c    文件:JobClientUnitTest.java   
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getSetupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:big-c    文件:JobClientUnitTest.java   
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getCleanupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:big-c    文件:CLI.java   
private void printTaskAttempts(TaskReport report) {
  if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
    System.out.println(report.getSuccessfulTaskAttemptId());
  } else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
    for (TaskAttemptID t : 
      report.getRunningTaskAttemptIds()) {
      System.out.println(t);
    }
  }
}
项目:indexr    文件:UpdateColumnJob.java   
public boolean doRun(Config upcolConfig) throws Exception {
    JobConf jobConf = new JobConf(getConf(), UpdateColumnJob.class);
    jobConf.setKeepFailedTaskFiles(false);
    jobConf.setNumReduceTasks(0);
    String jobName = String.format("indexr-upcol-%s-%s-%s",
            upcolConfig.table,
            LocalDateTime.now().format(timeFormatter),
            RandomStringUtils.randomAlphabetic(5));
    jobConf.setJobName(jobName);
    jobConf.set(CONFKEY, JsonUtil.toJson(upcolConfig));
    Path workDir = new Path(jobConf.getWorkingDirectory(), jobName);
    jobConf.setWorkingDirectory(workDir);

    Job job = Job.getInstance(jobConf);
    job.setInputFormatClass(SegmentInputFormat.class);
    job.setMapperClass(UpColSegmentMapper.class);
    job.setJarByClass(UpdateColumnJob.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);
    job.setMapSpeculativeExecution(false);
    job.setOutputFormatClass(UpColSegmentOutputFormat.class);

    job.submit();
    boolean ok = job.waitForCompletion(true);
    if (!ok) {
        TaskReport[] reports = job.getTaskReports(TaskType.MAP);
        if (reports != null) {
            for (TaskReport report : reports) {
                log.error("Error in task [%s] : %s", report.getTaskId(), Arrays.toString(report.getDiagnostics()));
            }
        }
    }
    return ok;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobClientUnitTest.java   
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getMapTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobClientUnitTest.java   
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getReduceTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobClientUnitTest.java   
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getSetupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobClientUnitTest.java   
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getCleanupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CLI.java   
private void printTaskAttempts(TaskReport report) {
  if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
    System.out.println(report.getSuccessfulTaskAttemptId());
  } else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
    for (TaskAttemptID t : 
      report.getRunningTaskAttemptIds()) {
      System.out.println(t);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:CLI.java   
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(type.toUpperCase()));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
项目:hadoop-plus    文件:JobClientUnitTest.java   
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getMapTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop-plus    文件:JobClientUnitTest.java   
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getReduceTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop-plus    文件:JobClientUnitTest.java   
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getSetupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop-plus    文件:JobClientUnitTest.java   
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getCleanupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:hadoop-plus    文件:CLI.java   
private void printTaskAttempts(TaskReport report) {
  if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
    System.out.println(report.getSuccessfulTaskAttemptId());
  } else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
    for (TaskAttemptID t : 
      report.getRunningTaskAttemptIds()) {
      System.out.println(t);
    }
  }
}
项目:hadoop-plus    文件:CLI.java   
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(type.toUpperCase()));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equals("pending") && status ==TIPStatus.PENDING) ||
        (state.equals("running") && status ==TIPStatus.RUNNING) ||
        (state.equals("completed") && status == TIPStatus.COMPLETE) ||
        (state.equals("failed") && status == TIPStatus.FAILED) ||
        (state.equals("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
项目:FlexMap    文件:JobClientUnitTest.java   
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getMapTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:FlexMap    文件:JobClientUnitTest.java   
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getReduceTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:FlexMap    文件:JobClientUnitTest.java   
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getSetupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:FlexMap    文件:JobClientUnitTest.java   
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);

  when(mockCluster.getJob(id)).thenReturn(null);

  TaskReport[] result = client.getCleanupTaskReports(id);
  assertEquals(0, result.length);

  verify(mockCluster).getJob(id);
}
项目:FlexMap    文件:CLI.java   
private void printTaskAttempts(TaskReport report) {
  if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
    System.out.println(report.getSuccessfulTaskAttemptId());
  } else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
    for (TaskAttemptID t : 
      report.getRunningTaskAttemptIds()) {
      System.out.println(t);
    }
  }
}
项目:FlexMap    文件:CLI.java   
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(type.toUpperCase()));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}