Java 类org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent 实例源码

项目:hadoop-plus    文件:TestRMContainerAllocator.java   
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
    String host, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  return new ContainerFailedEvent(attemptId, host);    
}
项目:hadoop-TCP    文件:TestRMContainerAllocator.java   
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
    String host, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  return new ContainerFailedEvent(attemptId, host);    
}
项目:hardfs    文件:TestRMContainerAllocator.java   
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
    String host, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  return new ContainerFailedEvent(attemptId, host);    
}
项目:hadoop-on-lustre2    文件:TestRMContainerAllocator.java   
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
    String host, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  return new ContainerFailedEvent(attemptId, host);    
}
项目:hadoop    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.inProgressAttempts.size() == 0
        && task.successfulAttempt == null) {
      task.addAndScheduleAttempt(Avataar.VIRGIN);
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:aliyun-oss-hadoop-fs    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.successfulAttempt == null) {
      boolean shouldAddNewAttempt = true;
      if (task.inProgressAttempts.size() > 0) {
        // if not all of the inProgressAttempts are hanging for resource
        for (TaskAttemptId attemptId : task.inProgressAttempts) {
          if (((TaskAttemptImpl) task.getAttempt(attemptId))
              .isContainerAssigned()) {
            shouldAddNewAttempt = false;
            break;
          }
        }
      }
      if (shouldAddNewAttempt) {
        task.addAndScheduleAttempt(Avataar.VIRGIN);
      }
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
          " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:big-c    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.inProgressAttempts.size() == 0
        && task.successfulAttempt == null) {
      task.addAndScheduleAttempt(Avataar.VIRGIN);
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.inProgressAttempts.size() == 0
        && task.successfulAttempt == null) {
      task.addAndScheduleAttempt(Avataar.VIRGIN);
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:hadoop-plus    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.inProgressAttempts.size() == 0
        && task.successfulAttempt == null) {
      task.addAndScheduleAttempt(Avataar.VIRGIN);
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:hadoop-plus    文件:TestRMContainerAllocator.java   
public void sendFailure(ContainerFailedEvent f) {
  super.handleEvent(f);
}
项目:FlexMap    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.inProgressAttempts.size() == 0
        && task.successfulAttempt == null) {
      task.addAndScheduleAttempt(Avataar.VIRGIN);
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:hops    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.successfulAttempt == null) {
      boolean shouldAddNewAttempt = true;
      if (task.inProgressAttempts.size() > 0) {
        // if not all of the inProgressAttempts are hanging for resource
        for (TaskAttemptId attemptId : task.inProgressAttempts) {
          if (((TaskAttemptImpl) task.getAttempt(attemptId))
              .isContainerAssigned()) {
            shouldAddNewAttempt = false;
            break;
          }
        }
      }
      if (shouldAddNewAttempt) {
        task.addAndScheduleAttempt(Avataar.VIRGIN);
      }
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
          " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:hadoop-TCP    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.inProgressAttempts.size() == 0
        && task.successfulAttempt == null) {
      task.addAndScheduleAttempt(Avataar.VIRGIN);
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:hadoop-TCP    文件:TestRMContainerAllocator.java   
public void sendFailure(ContainerFailedEvent f) {
  super.handleEvent(f);
}
项目:hardfs    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.inProgressAttempts.size() == 0
        && task.successfulAttempt == null) {
      task.addAndScheduleAttempt(Avataar.VIRGIN);
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:hardfs    文件:TestRMContainerAllocator.java   
public void sendFailure(ContainerFailedEvent f) {
  super.handleEvent(f);
}
项目:hadoop-on-lustre2    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
  task.failedAttempts.add(taskAttemptId); 
  if (taskAttemptId.equals(task.commitAttempt)) {
    task.commitAttempt = null;
  }
  TaskAttempt attempt = task.attempts.get(taskAttemptId);
  if (attempt.getAssignedContainerMgrAddress() != null) {
    //container was assigned
    task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(), 
        attempt.getAssignedContainerMgrAddress()));
  }

  task.finishedAttempts.add(taskAttemptId);
  if (task.failedAttempts.size() < task.maxAttempts) {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.FAILED);
    // we don't need a new event if we already have a spare
    task.inProgressAttempts.remove(taskAttemptId);
    if (task.inProgressAttempts.size() == 0
        && task.successfulAttempt == null) {
      task.addAndScheduleAttempt(Avataar.VIRGIN);
    }
  } else {
    task.handleTaskAttemptCompletion(
        taskAttemptId, 
        TaskAttemptCompletionEventStatus.TIPFAILED);

    // issue kill to all non finished attempts
    for (TaskAttempt taskAttempt : task.attempts.values()) {
      task.killUnfinishedAttempt
        (taskAttempt, "Task has failed. Killing attempt!");
    }
    task.inProgressAttempts.clear();

    if (task.historyTaskStartGenerated) {
    TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
        TaskStateInternal.FAILED, taskAttemptId);
    task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
        taskFailedEvent));
    } else {
      LOG.debug("Not generating HistoryFinish event since start event not" +
            " generated for task: " + task.getID());
    }
    task.eventHandler.handle(
        new JobTaskEvent(task.taskId, TaskState.FAILED));
    return task.finished(TaskStateInternal.FAILED);
  }
  return getDefaultState(task);
}
项目:hadoop-on-lustre2    文件:TestRMContainerAllocator.java   
public void sendFailure(ContainerFailedEvent f) {
  super.handleEvent(f);
}