Java 类org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent 实例源码

项目:hadoop    文件:MRApp.java   
@Override
public void handle(ContainerAllocatorEvent event) {
  ContainerId cId =
      ContainerId.newContainerId(getContext().getApplicationAttemptId(),
        containerCount++);
  NodeId nodeId = NodeId.newInstance(NM_HOST, NM_PORT);
  Resource resource = Resource.newInstance(1234, 2, 2);
  ContainerTokenIdentifier containerTokenIdentifier =
      new ContainerTokenIdentifier(cId, nodeId.toString(), "user",
      resource, System.currentTimeMillis() + 10000, 42, 42,
      Priority.newInstance(0), 0);
  Token containerToken = newContainerToken(nodeId, "password".getBytes(),
        containerTokenIdentifier);
  Container container = Container.newInstance(cId, nodeId,
      NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
  JobID id = TypeConverter.fromYarn(applicationId);
  JobId jobId = TypeConverter.toYarn(id);
  getContext().getEventHandler().handle(new JobHistoryEvent(jobId, 
      new NormalizedResourceEvent(
          org.apache.hadoop.mapreduce.TaskType.REDUCE,
      100)));
  getContext().getEventHandler().handle(new JobHistoryEvent(jobId, 
      new NormalizedResourceEvent(
          org.apache.hadoop.mapreduce.TaskType.MAP,
      100)));
  getContext().getEventHandler().handle(
      new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
          container, null));
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // set the finish time
  taskAttempt.setFinishTime();

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
    // taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.FAILED); Not
    // handling failed map/reduce events.
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // too many fetch failure can only happen for map tasks
  Preconditions
      .checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
  //add to diagnostic
  taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:hadoop    文件:JobImpl.java   
/**
 * This transition executes in the event-dispatcher thread, though it's
 * triggered in MRAppMaster's startJobs() method.
 */
@Override
public void transition(JobImpl job, JobEvent event) {
  JobStartEvent jse = (JobStartEvent) event;
  if (jse.getRecoveredJobStartTime() != 0) {
    job.startTime = jse.getRecoveredJobStartTime();
  } else {
    job.startTime = job.clock.getTime();
  }
  JobInitedEvent jie =
    new JobInitedEvent(job.oldJobId,
         job.startTime,
         job.numMapTasks, job.numReduceTasks,
         job.getState().toString(),
         job.isUber());
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
  JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
      job.appSubmitTime, job.startTime);
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
  job.metrics.runningJob(job);

  job.eventHandler.handle(new CommitterJobSetupEvent(
          job.jobId, job.jobContext));
}
项目:hadoop    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskAttemptId taskAttemptId =
      ((TaskTAttemptEvent) event).getTaskAttemptID();
  task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
  task.finishedAttempts.add(taskAttemptId);
  // check whether all attempts are finished
  if (task.finishedAttempts.size() == task.attempts.size()) {
    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
          finalState, null); // TODO JH verify failedAttempt null
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent)); 
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }

    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, getExternalState(finalState)));
    return finalState;
  }
  return task.getInternalState();
}
项目:hadoop    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {

  if (task.historyTaskStartGenerated) {
  TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
        TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
  task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
      taskFailedEvent));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
  }

  task.eventHandler.handle(new JobTaskEvent(task.taskId,
      getExternalState(TaskStateInternal.KILLED)));
  task.metrics.endWaitingTask(task);
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private static void notifyTaskAttemptFailed(TaskAttemptImpl taskAttempt) {
  if (taskAttempt.getLaunchTime() == 0) {
    sendJHStartEventForAssignedFailTask(taskAttempt);
  }
  // set the finish time
  taskAttempt.setFinishTime();
  taskAttempt.eventHandler
      .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
  TaskAttemptUnsuccessfulCompletionEvent tauce =
      createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
          TaskAttemptStateInternal.FAILED);
  taskAttempt.eventHandler.handle(new JobHistoryEvent(
      taskAttempt.attemptId.getTaskId().getJobId(), tauce));

  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));

}
项目:aliyun-oss-hadoop-fs    文件:JobImpl.java   
/**
 * This transition executes in the event-dispatcher thread, though it's
 * triggered in MRAppMaster's startJobs() method.
 */
@Override
public void transition(JobImpl job, JobEvent event) {
  JobStartEvent jse = (JobStartEvent) event;
  if (jse.getRecoveredJobStartTime() != -1L) {
    job.startTime = jse.getRecoveredJobStartTime();
  } else {
    job.startTime = job.clock.getTime();
  }
  JobInitedEvent jie =
    new JobInitedEvent(job.oldJobId,
         job.startTime,
         job.numMapTasks, job.numReduceTasks,
         job.getState().toString(),
         job.isUber());
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
  JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
      job.appSubmitTime, job.startTime);
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
  job.metrics.runningJob(job);

  job.eventHandler.handle(new CommitterJobSetupEvent(
          job.jobId, job.jobContext));
}
项目:aliyun-oss-hadoop-fs    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskAttemptId taskAttemptId =
      ((TaskTAttemptEvent) event).getTaskAttemptID();
  task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
  task.finishedAttempts.add(taskAttemptId);
  // check whether all attempts are finished
  if (task.finishedAttempts.size() == task.attempts.size()) {
    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
          finalState, null); // TODO JH verify failedAttempt null
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent)); 
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }

    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, getExternalState(finalState)));
    return finalState;
  }
  return task.getInternalState();
}
项目:aliyun-oss-hadoop-fs    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {

  if (task.historyTaskStartGenerated) {
  TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
        TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
  task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
      taskFailedEvent));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
  }

  task.eventHandler.handle(new JobTaskEvent(task.taskId,
      getExternalState(TaskStateInternal.KILLED)));
  task.metrics.endWaitingTask(task);
}
项目:big-c    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:big-c    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // set the finish time
  taskAttempt.setFinishTime();

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
    // taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.FAILED); Not
    // handling failed map/reduce events.
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:big-c    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // too many fetch failure can only happen for map tasks
  Preconditions
      .checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
  //add to diagnostic
  taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:big-c    文件:JobImpl.java   
/**
 * This transition executes in the event-dispatcher thread, though it's
 * triggered in MRAppMaster's startJobs() method.
 */
@Override
public void transition(JobImpl job, JobEvent event) {
  JobStartEvent jse = (JobStartEvent) event;
  if (jse.getRecoveredJobStartTime() != 0) {
    job.startTime = jse.getRecoveredJobStartTime();
  } else {
    job.startTime = job.clock.getTime();
  }
  JobInitedEvent jie =
    new JobInitedEvent(job.oldJobId,
         job.startTime,
         job.numMapTasks, job.numReduceTasks,
         job.getState().toString(),
         job.isUber());
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
  JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
      job.appSubmitTime, job.startTime);
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
  job.metrics.runningJob(job);

  job.eventHandler.handle(new CommitterJobSetupEvent(
          job.jobId, job.jobContext));
}
项目:big-c    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskAttemptId taskAttemptId =
      ((TaskTAttemptEvent) event).getTaskAttemptID();
  task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
  task.finishedAttempts.add(taskAttemptId);
  // check whether all attempts are finished
  if (task.finishedAttempts.size() == task.attempts.size()) {
    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
          finalState, null); // TODO JH verify failedAttempt null
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent)); 
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }

    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, getExternalState(finalState)));
    return finalState;
  }
  return task.getInternalState();
}
项目:big-c    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {

  if (task.historyTaskStartGenerated) {
  TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
        TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
  task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
      taskFailedEvent));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
  }

  task.eventHandler.handle(new JobTaskEvent(task.taskId,
      getExternalState(TaskStateInternal.KILLED)));
  task.metrics.endWaitingTask(task);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hops    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {

  if (task.historyTaskStartGenerated) {
  TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
        TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
  task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
      taskFailedEvent));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
  }

  task.eventHandler.handle(new JobTaskEvent(task.taskId,
      getExternalState(TaskStateInternal.KILLED)));
  task.metrics.endWaitingTask(task);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // too many fetch failure can only happen for map tasks
  Preconditions
      .checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
  //add to diagnostic
  taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskAttemptId taskAttemptId =
      ((TaskTAttemptEvent) event).getTaskAttemptID();
  task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
  task.finishedAttempts.add(taskAttemptId);
  // check whether all attempts are finished
  if (task.finishedAttempts.size() == task.attempts.size()) {
    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
          finalState, null); // TODO JH verify failedAttempt null
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent)); 
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }

    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, getExternalState(finalState)));
    return finalState;
  }
  return task.getInternalState();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {

  if (task.historyTaskStartGenerated) {
  TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
        TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
  task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
      taskFailedEvent));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
  }

  task.eventHandler.handle(new JobTaskEvent(task.taskId,
      getExternalState(TaskStateInternal.KILLED)));
  task.metrics.endWaitingTask(task);
}
项目:hops    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskAttemptId taskAttemptId =
      ((TaskTAttemptEvent) event).getTaskAttemptID();
  task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
  task.finishedAttempts.add(taskAttemptId);
  // check whether all attempts are finished
  if (task.finishedAttempts.size() == task.attempts.size()) {
    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
          finalState, null); // TODO JH verify failedAttempt null
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent)); 
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }

    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, getExternalState(finalState)));
    return finalState;
  }
  return task.getInternalState();
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // set the finish time
  taskAttempt.setFinishTime();

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
    // taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.FAILED); Not
    // handling failed map/reduce events.
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // too many fetch failure can only happen for map tasks
  Preconditions
      .checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
  //add to diagnostic
  taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");
  //set the finish time
  taskAttempt.setFinishTime();

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:hops    文件:JobImpl.java   
/**
 * This transition executes in the event-dispatcher thread, though it's
 * triggered in MRAppMaster's startJobs() method.
 */
@Override
public void transition(JobImpl job, JobEvent event) {
  JobStartEvent jse = (JobStartEvent) event;
  if (jse.getRecoveredJobStartTime() != -1L) {
    job.startTime = jse.getRecoveredJobStartTime();
  } else {
    job.startTime = job.clock.getTime();
  }
  JobInitedEvent jie =
    new JobInitedEvent(job.oldJobId,
         job.startTime,
         job.numMapTasks, job.numReduceTasks,
         job.getState().toString(),
         job.isUber());
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
  JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
      job.appSubmitTime, job.startTime);
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
  job.metrics.runningJob(job);

  job.eventHandler.handle(new CommitterJobSetupEvent(
          job.jobId, job.jobContext));
}
项目:hadoop-plus    文件:JobImpl.java   
/**
 * This transition executes in the event-dispatcher thread, though it's
 * triggered in MRAppMaster's startJobs() method.
 */
@Override
public void transition(JobImpl job, JobEvent event) {
  JobStartEvent jse = (JobStartEvent) event;
  if (jse.getRecoveredJobStartTime() != 0) {
    job.startTime = jse.getRecoveredJobStartTime();
  } else {
    job.startTime = job.clock.getTime();
  }
  JobInitedEvent jie =
    new JobInitedEvent(job.oldJobId,
         job.startTime,
         job.numMapTasks, job.numReduceTasks,
         job.getState().toString(),
         job.isUber());
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
  JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
      job.appSubmitTime, job.startTime);
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
  job.metrics.runningJob(job);

  job.eventHandler.handle(new CommitterJobSetupEvent(
          job.jobId, job.jobContext));
}
项目:hadoop-plus    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {

  if (task.historyTaskStartGenerated) {
  TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
        TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
  task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
      taskFailedEvent));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
  }

  task.eventHandler.handle(new JobTaskEvent(task.taskId,
      getExternalState(TaskStateInternal.KILLED)));
  task.metrics.endWaitingTask(task);
}
项目:FlexMap    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // set the finish time
  taskAttempt.setFinishTime();

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
    // taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.FAILED); Not
    // handling failed map/reduce events.
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:FlexMap    文件:JobImpl.java   
/**
 * This transition executes in the event-dispatcher thread, though it's
 * triggered in MRAppMaster's startJobs() method.
 */
@Override
public void transition(JobImpl job, JobEvent event) {
  JobStartEvent jse = (JobStartEvent) event;
  if (jse.getRecoveredJobStartTime() != 0) {
    job.startTime = jse.getRecoveredJobStartTime();
  } else {
    job.startTime = job.clock.getTime();
  }
  JobInitedEvent jie =
    new JobInitedEvent(job.oldJobId,
         job.startTime,
         job.numMapTasks, job.numReduceTasks,
         job.getState().toString(),
         job.isUber());
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
  JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
      job.appSubmitTime, job.startTime);
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
  job.metrics.runningJob(job);

  job.eventHandler.handle(new CommitterJobSetupEvent(
          job.jobId, job.jobContext));
}
项目:FlexMap    文件:TaskImpl.java   
private void sendTaskSucceededEvents() {
JobTaskEvent jobTaskEvent = new JobTaskEvent(taskId, TaskState.SUCCEEDED);
long    totalTime   = this.getFinishTime() - this.getLaunchTime();
long    HDFSRecords = this.getSuccessfulAttempt().getCounters().findCounter(TaskCounter.MAP_INPUT_RECORDS).getValue();
long    executionTime  = this.getSuccessfulAttempt().getEndExecutionTime() - this.getSuccessfulAttempt().getBeginExecutionTime();
double  executionSpeed = HDFSRecords*1.0 / executionTime*1.0;
double  executionRatio = 1.0*executionTime/ totalTime;
LOG.info("inform");
LOG.info("hdfsRecrds:"+HDFSRecords);
LOG.info("excutuinTime:"+executionTime);
LOG.info("totalTime:"+executionTime);
LOG.info("excutionSpeed:"+executionSpeed);
LOG.info("excutionRatio:"+executionRatio);
LOG.info("host:"+this.getSuccessfulAttempt().getNodeId().getHost());
LOG.info("/inform");
jobTaskEvent.setTaskExecutionTime((long)executionSpeed);
jobTaskEvent.setTaskExecutionRatio(executionRatio);
jobTaskEvent.setAttemptId(successfulAttempt);
   eventHandler.handle(jobTaskEvent);
   if (historyTaskStartGenerated) {
     TaskFinishedEvent tfe = createTaskFinishedEvent(this,
         TaskStateInternal.SUCCEEDED);
     eventHandler.handle(new JobHistoryEvent(taskId.getJobId(), tfe));
   }
 }
项目:FlexMap    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskAttemptId taskAttemptId =
      ((TaskTAttemptEvent) event).getTaskAttemptID();
  LOG.info("receive attempt killed from"+task.getID().toString());

  task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
  task.finishedAttempts.add(taskAttemptId);
  // check whether all attempts are finished
  if (task.finishedAttempts.size() == task.attempts.size()) {
    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
          finalState, null); // TODO JH verify failedAttempt null
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent)); 
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }

    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, getExternalState(finalState)));
    return finalState;
  }
  return task.getInternalState();
}
项目:FlexMap    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {

  if (task.historyTaskStartGenerated) {
  TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
        TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
  task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
      taskFailedEvent));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
  }

  task.eventHandler.handle(new JobTaskEvent(task.taskId,
      getExternalState(TaskStateInternal.KILLED)));
  task.metrics.endWaitingTask(task);
}
项目:hops    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public TaskAttemptStateInternal transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) {
    // after a reduce task has succeeded, its outputs are in safe in HDFS.
    // logically such a task should not be killed. we only come here when
    // there is a race condition in the event queue. E.g. some logic sends
    // a kill request to this attempt when the successful completion event
    // for this task is already in the event queue. so the kill event will
    // get executed immediately after the attempt is marked successful and 
    // result in this transition being exercised.
    // ignore this for reduce tasks
    LOG.info("Ignoring killed event for successful reduce task attempt" +
              taskAttempt.getID().toString());
    return TaskAttemptStateInternal.SUCCEEDED;
  }
  if(event instanceof TaskAttemptKillEvent) {
    TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
    //add to diagnostic
    taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
  }

  // not setting a finish time since it was set on success
  assert (taskAttempt.getFinishTime() != 0);

  assert (taskAttempt.getLaunchTime() != 0);
  taskAttempt.eventHandler
      .handle(createJobCounterUpdateEventTAKilled(taskAttempt, true));
  TaskAttemptUnsuccessfulCompletionEvent tauce = createTaskAttemptUnsuccessfulCompletionEvent(
      taskAttempt, TaskAttemptStateInternal.KILLED);
  taskAttempt.eventHandler.handle(new JobHistoryEvent(taskAttempt.attemptId
      .getTaskId().getJobId(), tauce));
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED));
  return TaskAttemptStateInternal.KILLED;
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
    @Override
    public void transition(TaskAttemptImpl taskAttempt,
        TaskAttemptEvent event) {
      //set the finish time
      taskAttempt.setFinishTime();
      if (taskAttempt.getLaunchTime() != 0) {
        taskAttempt.eventHandler
            .handle(createJobCounterUpdateEventTAKilled(taskAttempt, false));
        TaskAttemptUnsuccessfulCompletionEvent tauce =
            createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
                TaskAttemptStateInternal.KILLED);
        taskAttempt.eventHandler.handle(new JobHistoryEvent(
            taskAttempt.attemptId.getTaskId().getJobId(), tauce));
      }else {
        LOG.debug("Not generating HistoryFinish event since start event not " +
            "generated for taskAttempt: " + taskAttempt.getID());
      }

      if (event instanceof TaskAttemptKillEvent) {
        taskAttempt.addDiagnosticInfo(
            ((TaskAttemptKillEvent) event).getMessage());
      }

//      taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.KILLED); Not logging Map/Reduce attempts in case of failure.
      taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
          taskAttempt.attemptId,
          TaskEventType.T_ATTEMPT_KILLED));
    }
项目:hadoop    文件:JobImpl.java   
private void unsuccessfulFinish(JobStateInternal finalState) {
    if (finishTime == 0) setFinishTime();
    cleanupProgress = 1.0f;
    JobUnsuccessfulCompletionEvent unsuccessfulJobEvent =
        new JobUnsuccessfulCompletionEvent(oldJobId,
            finishTime,
            succeededMapTaskCount,
            succeededReduceTaskCount,
            finalState.toString(),
            diagnostics);
    eventHandler.handle(new JobHistoryEvent(jobId,
        unsuccessfulJobEvent));
    finished(finalState);
}
项目:hadoop    文件:JobImpl.java   
@Override
public void transition(JobImpl job, JobEvent event) {
  job.setFinishTime();
  JobUnsuccessfulCompletionEvent failedEvent =
      new JobUnsuccessfulCompletionEvent(job.oldJobId,
          job.finishTime, 0, 0,
          JobStateInternal.KILLED.toString(), job.diagnostics);
  job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
  job.finished(JobStateInternal.KILLED);
}