Java 类org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent 实例源码

项目:hadoop    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:aliyun-oss-hadoop-fs    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:big-c    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop-plus    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:FlexMap    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hops    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop-TCP    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hardfs    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop-on-lustre2    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    String hostname = taskAttempt.container == null ? "UNKNOWN"
        : taskAttempt.container.getNodeId().getHost();
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps,
            hostname));
  }
}
项目:big-c    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
  }
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
  }
}
项目:FlexMap    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;    
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
  }
}
项目:hops    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    String hostname = taskAttempt.container == null ? "UNKNOWN"
        : taskAttempt.container.getNodeId().getHost();
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps,
            hostname));
  }
}
项目:hadoop-TCP    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
  }
}
项目:hardfs    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
  }
}
项目:hadoop-on-lustre2    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  // Status update calls don't really change the state of the attempt.
  TaskAttemptStatus newReportedStatus =
      ((TaskAttemptStatusUpdateEvent) event)
          .getReportedTaskAttemptStatus();
  // Now switch the information in the reportedStatus
  taskAttempt.reportedStatus = newReportedStatus;
  taskAttempt.reportedStatus.taskState = taskAttempt.getState();

  // send event to speculator about the reported status
  taskAttempt.eventHandler.handle
      (new SpeculatorEvent
          (taskAttempt.reportedStatus, taskAttempt.clock.getTime()));

  taskAttempt.updateProgressSplits();

  //if fetch failures are present, send the fetch failure event to job
  //this only will happen in reduce attempt type
  if (taskAttempt.reportedStatus.fetchFailedMaps != null && 
      taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
    taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
        taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps));
  }
}
项目:hadoop    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
项目:big-c    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
项目:hadoop-plus    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    LOG.info("Status update from " + taskAttemptID.toString());
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
项目:FlexMap    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    //task current time, we set this because different machines may have different clocks

    taskAttemptStatus.currentTime = taskStatus.getCurrentTime();

    //Map Begin time set by the task(map only)
    if(taskStatus.getIsMap() && taskStatus.getMapBeginTime() !=0 ){
      taskAttemptStatus.mapBeginTime = taskStatus.getMapBeginTime();
    }
    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
项目:hops    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
项目:hadoop-TCP    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    LOG.info("Status update from " + taskAttemptID.toString());
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
项目:hardfs    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    LOG.info("Status update from " + taskAttemptID.toString());
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
项目:hadoop-on-lustre2    文件:TaskAttemptListenerImpl.java   
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());

    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT

//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }