Java 类org.apache.hadoop.mapreduce.v2.api.records.TaskType 实例源码

项目:hadoop    文件:DefaultSpeculator.java   
private AtomicInteger containerNeed(TaskId taskID) {
  JobId jobID = taskID.getJobId();
  TaskType taskType = taskID.getTaskType();

  ConcurrentMap<JobId, AtomicInteger> relevantMap
      = taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;

  AtomicInteger result = relevantMap.get(jobID);

  if (result == null) {
    relevantMap.putIfAbsent(jobID, new AtomicInteger(0));
    result = relevantMap.get(jobID);
  }

  return result;
}
项目:hadoop    文件:ReduceTaskAttemptInfo.java   
public ReduceTaskAttemptInfo(TaskAttempt ta, TaskType type) {
  super(ta, type, false);

  this.shuffleFinishTime = ta.getShuffleFinishTime();
  this.mergeFinishTime = ta.getSortFinishTime();
  this.elapsedShuffleTime = Times.elapsed(this.startTime,
      this.shuffleFinishTime, false);
  if (this.elapsedShuffleTime == -1) {
    this.elapsedShuffleTime = 0;
  }
  this.elapsedMergeTime = Times.elapsed(this.shuffleFinishTime,
      this.mergeFinishTime, false);
  if (this.elapsedMergeTime == -1) {
    this.elapsedMergeTime = 0;
  }
  this.elapsedReduceTime = Times.elapsed(this.mergeFinishTime,
      this.finishTime, false);
  if (this.elapsedReduceTime == -1) {
    this.elapsedReduceTime = 0;
  }
}
项目:hadoop    文件:TaskInfo.java   
public TaskInfo(Task task) {
  TaskType ttype = task.getType();
  this.type = ttype.toString();
  TaskReport report = task.getReport();
  this.startTime = report.getStartTime();
  this.finishTime = report.getFinishTime();
  this.state = report.getTaskState();
  this.elapsedTime = Times.elapsed(this.startTime, this.finishTime,
    this.state == TaskState.RUNNING);
  if (this.elapsedTime == -1) {
    this.elapsedTime = 0;
  }
  this.progress = report.getProgress() * 100;
  this.status =  report.getStatus();
  this.id = MRApps.toString(task.getID());
  this.taskNum = task.getID().getId();
  this.successful = getSuccessfulAttempt(task);
  if (successful != null) {
    this.successfulAttempt = MRApps.toString(successful.getID());
  } else {
    this.successfulAttempt = "";
  }
}
项目:hadoop    文件:AMWebServices.java   
@GET
@Path("/jobs/{jobid}/tasks")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
    @PathParam("jobid") String jid, @QueryParam("type") String type) {

  init();
  Job job = getJobFromJobIdString(jid, appCtx);
  checkAccess(job, hsr);
  TasksInfo allTasks = new TasksInfo();
  for (Task task : job.getTasks().values()) {
    TaskType ttype = null;
    if (type != null && !type.isEmpty()) {
      try {
        ttype = MRApps.taskType(type);
      } catch (YarnRuntimeException e) {
        throw new BadRequestException("tasktype must be either m or r");
      }
    }
    if (ttype != null && task.getType() != ttype) {
      continue;
    }
    allTasks.add(new TaskInfo(task));
  }
  return allTasks;
}
项目:hadoop    文件:AMWebServices.java   
@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
    @PathParam("jobid") String jid, @PathParam("taskid") String tid) {

  init();
  TaskAttemptsInfo attempts = new TaskAttemptsInfo();
  Job job = getJobFromJobIdString(jid, appCtx);
  checkAccess(job, hsr);
  Task task = getTaskFromTaskIdString(tid, job);

  for (TaskAttempt ta : task.getAttempts().values()) {
    if (ta != null) {
      if (task.getType() == TaskType.REDUCE) {
        attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
      } else {
        attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
      }
    }
  }
  return attempts;
}
项目:hadoop    文件:HsAttemptsPage.java   
@Override
protected Collection<TaskAttempt> getTaskAttempts() {
  List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>();
  String taskTypeStr = $(TASK_TYPE);
  TaskType taskType = MRApps.taskType(taskTypeStr);
  String attemptStateStr = $(ATTEMPT_STATE);
  TaskAttemptStateUI neededState = MRApps
      .taskAttemptState(attemptStateStr);
  Job j = app.getJob();
  Map<TaskId, Task> tasks = j.getTasks(taskType);
  for (Task task : tasks.values()) {
    Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
    for (TaskAttempt attempt : attempts.values()) {
      if (neededState.correspondsTo(attempt.getState())) {
        fewTaskAttemps.add(attempt);
      }
    }
  }
  return fewTaskAttemps;
}
项目:hadoop    文件:TestRMContainerAllocator.java   
private ContainerRequestEvent
    createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts,
        boolean earlierFailedAttempt, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  Resource containerNeed = Resource.newInstance(memory, 1);
  if (earlierFailedAttempt) {
    return ContainerRequestEvent
        .createContainerRequestEventForFailedContainer(attemptId,
            containerNeed);
  }
  return new ContainerRequestEvent(attemptId, containerNeed, hosts,
      new String[] { NetworkTopology.DEFAULT_RACK });
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
  // too many fetch failure can only happen for map tasks
  Preconditions
      .checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
  //add to diagnostic
  taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");

  if (taskAttempt.getLaunchTime() != 0) {
    taskAttempt.eventHandler
        .handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
    TaskAttemptUnsuccessfulCompletionEvent tauce =
        createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
            TaskAttemptStateInternal.FAILED);
    taskAttempt.eventHandler.handle(new JobHistoryEvent(
        taskAttempt.attemptId.getTaskId().getJobId(), tauce));
  }else {
    LOG.debug("Not generating HistoryFinish event since start event not " +
        "generated for taskAttempt: " + taskAttempt.getID());
  }
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
项目:hadoop    文件:JobImpl.java   
private void computeProgress() {
  this.readLock.lock();
  try {
    float mapProgress = 0f;
    float reduceProgress = 0f;
    for (Task task : this.tasks.values()) {
      if (task.getType() == TaskType.MAP) {
        mapProgress += (task.isFinished() ? 1f : task.getProgress());
      } else {
        reduceProgress += (task.isFinished() ? 1f : task.getProgress());
      }
    }
    if (this.numMapTasks != 0) {
      mapProgress = mapProgress / this.numMapTasks;
    }
    if (this.numReduceTasks != 0) {
      reduceProgress = reduceProgress / this.numReduceTasks;
    }
    this.mapProgress = mapProgress;
    this.reduceProgress = reduceProgress;
  } finally {
    this.readLock.unlock();
  }
}
项目:hadoop    文件:JobImpl.java   
@Override
public Map<TaskId,Task> getTasks(TaskType taskType) {
  Map<TaskId, Task> localTasksCopy = tasks;
  Map<TaskId, Task> result = new HashMap<TaskId, Task>();
  Set<TaskId> tasksOfGivenType = null;
  readLock.lock();
  try {
    if (TaskType.MAP == taskType) {
      tasksOfGivenType = mapTasks;
    } else {
      tasksOfGivenType = reduceTasks;
    }
    for (TaskId taskID : tasksOfGivenType)
    result.put(taskID, localTasksCopy.get(taskID));
    return result;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:hadoop    文件:RMContainerAllocator.java   
boolean remove(TaskAttemptId tId) {
  ContainerId containerId = null;
  if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
    containerId = maps.remove(tId).getId();
  } else {
    containerId = reduces.remove(tId).getId();
    if (containerId != null) {
      boolean preempted = preemptionWaitingReduces.remove(tId);
      if (preempted) {
        LOG.info("Reduce preemption successful " + tId);
      }
    }
  }

  if (containerId != null) {
    containerToAttemptMap.remove(containerId);
    return true;
  }
  return false;
}
项目:hadoop    文件:TestRuntimeEstimators.java   
private float getReduceProgress() {
  Job job = myAppContext.getJob(myAttemptID.getTaskId().getJobId());
  float runtime = getCodeRuntime();

  Collection<Task> allMapTasks = job.getTasks(TaskType.MAP).values();

  int numberMaps = allMapTasks.size();
  int numberDoneMaps = 0;

  for (Task mapTask : allMapTasks) {
    if (mapTask.isFinished()) {
      ++numberDoneMaps;
    }
  }

  if (numberMaps == numberDoneMaps) {
    shuffleCompletedTime = Math.min(shuffleCompletedTime, clock.getTime());

    return Math.min
        ((float) (clock.getTime() - shuffleCompletedTime)
                    / (runtime * 2000.0F) + 0.5F,
         1.0F);
  } else {
    return ((float) numberDoneMaps) / numberMaps * 0.5F;
  }
}
项目:hadoop    文件:TestAMWebServicesAttempts.java   
public void verifyAMTaskAttempt(JSONObject info, TaskAttempt att,
    TaskType ttype) throws JSONException {
  if (ttype == TaskType.REDUCE) {
    assertEquals("incorrect number of elements", 17, info.length());
  } else {
    assertEquals("incorrect number of elements", 12, info.length());
  }

  verifyTaskAttemptGeneric(att, ttype, info.getString("id"),
      info.getString("state"), info.getString("type"),
      info.getString("rack"), info.getString("nodeHttpAddress"),
      info.getString("diagnostics"), info.getString("assignedContainerId"),
      info.getLong("startTime"), info.getLong("finishTime"),
      info.getLong("elapsedTime"), (float) info.getDouble("progress"));

  if (ttype == TaskType.REDUCE) {
    verifyReduceTaskAttemptGeneric(att, info.getLong("shuffleFinishTime"),
        info.getLong("mergeFinishTime"), info.getLong("elapsedShuffleTime"),
        info.getLong("elapsedMergeTime"), info.getLong("elapsedReduceTime"));
  }
}
项目:hadoop    文件:TestJobImpl.java   
private static void completeJobTasks(JobImpl job) {
  // complete the map tasks and the reduce tasks so we start committing
  int numMaps = job.getTotalMaps();
  for (int i = 0; i < numMaps; ++i) {
    job.handle(new JobTaskEvent(
        MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
        TaskState.SUCCEEDED));
    Assert.assertEquals(JobState.RUNNING, job.getState());
  }
  int numReduces = job.getTotalReduces();
  for (int i = 0; i < numReduces; ++i) {
    job.handle(new JobTaskEvent(
        MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
        TaskState.SUCCEEDED));
    Assert.assertEquals(JobState.RUNNING, job.getState());
  }
}
项目:hadoop    文件:TestJobHistoryParsing.java   
/**
 * Simple test PartialJob
 */
@Test(timeout = 3000)
public void testPartialJob() throws Exception {
  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user",
      "jobName", jobId, 3, 2, "JobStatus");
  PartialJob test = new PartialJob(jii, jobId);
  assertEquals(1.0f, test.getProgress(), 0.001);
  assertNull(test.getAllCounters());
  assertNull(test.getTasks());
  assertNull(test.getTasks(TaskType.MAP));
  assertNull(test.getTask(new TaskIdPBImpl()));

  assertNull(test.getTaskAttemptCompletionEvents(0, 100));
  assertNull(test.getMapAttemptCompletionEvents(0, 100));
  assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
  assertNull(test.getAMInfos());

}
项目:hadoop    文件:CompletedJob.java   
private void loadAllTasks() {
  if (tasksLoaded.get()) {
    return;
  }
  tasksLock.lock();
  try {
    if (tasksLoaded.get()) {
      return;
    }
    for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
      TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
      TaskInfo taskInfo = entry.getValue();
      Task task = new CompletedTask(yarnTaskID, taskInfo);
      tasks.put(yarnTaskID, task);
      if (task.getType() == TaskType.MAP) {
        mapTasks.put(task.getID(), task);
      } else if (task.getType() == TaskType.REDUCE) {
        reduceTasks.put(task.getID(), task);
      }
    }
    tasksLoaded.set(true);
  } finally {
    tasksLock.unlock();
  }
}
项目:hadoop    文件:TestHsWebServicesAttempts.java   
public void verifyHsTaskAttemptXML(Element element, TaskAttempt att,
    TaskType ttype) {
  verifyTaskAttemptGeneric(att, ttype,
      WebServicesTestUtils.getXmlString(element, "id"),
      WebServicesTestUtils.getXmlString(element, "state"),
      WebServicesTestUtils.getXmlString(element, "type"),
      WebServicesTestUtils.getXmlString(element, "rack"),
      WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
      WebServicesTestUtils.getXmlString(element, "diagnostics"),
      WebServicesTestUtils.getXmlString(element, "assignedContainerId"),
      WebServicesTestUtils.getXmlLong(element, "startTime"),
      WebServicesTestUtils.getXmlLong(element, "finishTime"),
      WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
      WebServicesTestUtils.getXmlFloat(element, "progress"));

  if (ttype == TaskType.REDUCE) {
    verifyReduceTaskAttemptGeneric(att,
        WebServicesTestUtils.getXmlLong(element, "shuffleFinishTime"),
        WebServicesTestUtils.getXmlLong(element, "mergeFinishTime"),
        WebServicesTestUtils.getXmlLong(element, "elapsedShuffleTime"),
        WebServicesTestUtils.getXmlLong(element, "elapsedMergeTime"),
        WebServicesTestUtils.getXmlLong(element, "elapsedReduceTime"));
  }
}
项目:hadoop    文件:TestBlocks.java   
private Task getTask(long timestamp) {

  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  jobId.setAppId(ApplicationIdPBImpl.newInstance(timestamp,1));

  TaskId taskId = new TaskIdPBImpl();
  taskId.setId(0);
  taskId.setTaskType(TaskType.REDUCE);
  taskId.setJobId(jobId);
  Task task = mock(Task.class);
  when(task.getID()).thenReturn(taskId);
  TaskReport report = mock(TaskReport.class);
  when(report.getProgress()).thenReturn(0.7f);
  when(report.getTaskState()).thenReturn(TaskState.SUCCEEDED);
  when(report.getStartTime()).thenReturn(100001L);
  when(report.getFinishTime()).thenReturn(100011L);

  when(task.getReport()).thenReturn(report);
  when(task.getType()).thenReturn(TaskType.REDUCE);
  return task;
}
项目:hadoop    文件:TestContainerLauncherImpl.java   
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, 
    TaskType taskType, int id) {
  ApplicationId aID = ApplicationId.newInstance(ts, appId);
  JobId jID = MRBuilderUtils.newJobId(aID, id);
  TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
  return MRBuilderUtils.newTaskAttemptId(tID, id);
}
项目:hadoop    文件:MockJobs.java   
void incr(Task task) {
  TaskType type = task.getType();
  boolean finished = task.isFinished();
  if (type == TaskType.MAP) {
    if (finished) {
      ++completedMaps;
    }
    ++maps;
  } else if (type == TaskType.REDUCE) {
    if (finished) {
      ++completedReduces;
    }
    ++reduces;
  }
}
项目:hadoop    文件:StartEndTimesBase.java   
@Override
public long thresholdRuntime(TaskId taskID) {
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  TaskType type = taskID.getTaskType();

  DataStatistics statistics
      = dataStatisticsForTask(taskID);

  int completedTasksOfType
      = type == TaskType.MAP
          ? job.getCompletedMaps() : job.getCompletedReduces();

  int totalTasksOfType
      = type == TaskType.MAP
          ? job.getTotalMaps() : job.getTotalReduces();

  if (completedTasksOfType < MINIMUM_COMPLETE_NUMBER_TO_SPECULATE
      || (((float)completedTasksOfType) / totalTasksOfType)
            < MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE ) {
    return Long.MAX_VALUE;
  }

  long result =  statistics == null
      ? Long.MAX_VALUE
      : (long)statistics.outlier(slowTaskRelativeTresholds.get(job));
  return result;
}
项目:hadoop    文件:CompletedJob.java   
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
  loadAllTasks();
  if (TaskType.MAP.equals(taskType)) {
    return mapTasks;
  } else {//we have only two types of tasks
    return reduceTasks;
  }
}
项目:hadoop    文件:TaskIdPBImpl.java   
@Override
public synchronized void setTaskType(TaskType taskType) {
  maybeInitBuilder();
  if (taskType == null) {
    builder.clearTaskType();
    return;
  }
  builder.setTaskType(convertToProtoFormat(taskType));
}
项目:hadoop    文件:TaskAttemptImpl.java   
private int getCpuRequired(Configuration conf, TaskType taskType) {
  int vcores = 1;
  if (taskType == TaskType.MAP)  {
    vcores =
        conf.getInt(MRJobConfig.MAP_CPU_VCORES,
            MRJobConfig.DEFAULT_MAP_CPU_VCORES);
  } else if (taskType == TaskType.REDUCE) {
    vcores =
        conf.getInt(MRJobConfig.REDUCE_CPU_VCORES,
            MRJobConfig.DEFAULT_REDUCE_CPU_VCORES);
  }

  return vcores;
}
项目:hadoop    文件:TaskAttemptImpl.java   
private int getGpuRequired(Configuration conf, TaskType taskType) {
  int gcores = 0;
  if (taskType == TaskType.MAP)  {
    gcores =
        conf.getInt(MRJobConfig.MAP_GPU_CORES,
            MRJobConfig.DEFAULT_MAP_GPU_CORES);
  } else if (taskType == TaskType.REDUCE) {
    gcores =
        conf.getInt(MRJobConfig.REDUCE_GPU_CORES,
            MRJobConfig.DEFAULT_REDUCE_GPU_CORES);
  }

  return gcores;
}
项目:hadoop    文件:TaskAttemptImpl.java   
private static void updateMillisCounters(JobCounterUpdateEvent jce,
    TaskAttemptImpl taskAttempt) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  long duration = (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
  int mbRequired =
      taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
  int vcoresRequired = taskAttempt.getCpuRequired(taskAttempt.conf, taskType);
  int gcoresRequired = taskAttempt.getGpuRequired(taskAttempt.conf, taskType);

  int minSlotMemSize = taskAttempt.conf.getInt(
    YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
    YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);

  int simSlotsRequired =
      minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbRequired
          / minSlotMemSize);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsRequired * duration);
    jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbRequired);
    jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * vcoresRequired);
    jce.addCounterUpdate(JobCounter.GCORES_MILLIS_MAPS, duration * gcoresRequired);
    jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
  } else {
    jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsRequired * duration);
    jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * mbRequired);
    jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * vcoresRequired);
    jce.addCounterUpdate(JobCounter.GCORES_MILLIS_REDUCES, duration * gcoresRequired);
    jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
  }
}
项目:hadoop    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
  } else {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
  }
  if (!taskAlreadyCompleted) {
    updateMillisCounters(jce, taskAttempt);
  }
  return jce;
}
项目:hadoop    文件:TypeConverter.java   
public static TaskType
    toYarn(org.apache.hadoop.mapreduce.TaskType taskType) {
  switch (taskType) {
  case MAP:
    return TaskType.MAP;
  case REDUCE:
    return TaskType.REDUCE;
  default:
    throw new YarnRuntimeException("Unrecognized task type: " + taskType);
  }
}
项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
@Override
public TaskAttemptStateInternal transition(TaskAttemptImpl taskAttempt, 
    TaskAttemptEvent event) {
  if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) {
    // after a reduce task has succeeded, its outputs are in safe in HDFS.
    // logically such a task should not be killed. we only come here when
    // there is a race condition in the event queue. E.g. some logic sends
    // a kill request to this attempt when the successful completion event
    // for this task is already in the event queue. so the kill event will
    // get executed immediately after the attempt is marked successful and 
    // result in this transition being exercised.
    // ignore this for reduce tasks
    LOG.info("Ignoring killed event for successful reduce task attempt" +
              taskAttempt.getID().toString());
    return TaskAttemptStateInternal.SUCCEEDED;
  }
  if(event instanceof TaskAttemptKillEvent) {
    TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
    //add to diagnostic
    taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
  }

  // not setting a finish time since it was set on success
  assert (taskAttempt.getFinishTime() != 0);

  assert (taskAttempt.getLaunchTime() != 0);
  taskAttempt.eventHandler
      .handle(createJobCounterUpdateEventTAKilled(taskAttempt, true));
  TaskAttemptUnsuccessfulCompletionEvent tauce = createTaskAttemptUnsuccessfulCompletionEvent(
      taskAttempt, TaskAttemptStateInternal.KILLED);
  taskAttempt.eventHandler.handle(new JobHistoryEvent(taskAttempt.attemptId
      .getTaskId().getJobId(), tauce));
  taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
      taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED));
  return TaskAttemptStateInternal.KILLED;
}
项目:hadoop    文件:MapTaskImpl.java   
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
    Path remoteJobConfFile, JobConf conf,
    TaskSplitMetaInfo taskSplitMetaInfo,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  super(jobId, TaskType.MAP, partition, eventHandler, remoteJobConfFile,
      conf, taskAttemptListener, jobToken, credentials, clock,
      appAttemptId, metrics, appContext);
  this.taskSplitMetaInfo = taskSplitMetaInfo;
}
项目:hadoop    文件:MockJobs.java   
public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) {
  ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
      id.getTaskId().getJobId().getAppId(), 0);
  ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
  TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
  report.setTaskAttemptId(id);
  report
      .setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
  report.setFinishTime(System.currentTimeMillis()
      + (int) (Math.random() * DT) + 1);

  if (id.getTaskId().getTaskType() == TaskType.REDUCE) {
    report.setShuffleFinishTime(
        (report.getFinishTime() + report.getStartTime()) / 2);
    report.setSortFinishTime(
        (report.getFinishTime() + report.getShuffleFinishTime()) / 2);
  }

  report.setPhase(PHASES.next());
  report.setTaskAttemptState(TASK_ATTEMPT_STATES.next());
  report.setProgress((float) Math.random());
  report.setCounters(TypeConverter.toYarn(newCounters()));
  report.setContainerId(containerId);
  report.setDiagnosticInfo(DIAGS.next());
  report.setStateString("Moving average " + Math.random());
  return report;
}
项目:hadoop    文件:MRApps.java   
public static String taskSymbol(TaskType type) {
  switch (type) {
    case MAP:           return "m";
    case REDUCE:        return "r";
  }
  throw new YarnRuntimeException("Unknown task type: "+ type.toString());
}
项目:hadoop    文件:TestRMContainerAllocator.java   
private ContainerAllocatorEvent createDeallocateEvent(JobId jobId,
    int taskAttemptId, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId =
      MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId);
  return new ContainerAllocatorEvent(attemptId,
      ContainerAllocator.EventType.CONTAINER_DEALLOCATE);
}
项目:hadoop    文件:JobImpl.java   
private void taskSucceeded(JobImpl job, Task task) {
  if (task.getType() == TaskType.MAP) {
    job.succeededMapTaskCount++;
  } else {
    job.succeededReduceTaskCount++;
  }
  job.metrics.completedTask(task);
}
项目:hadoop    文件:JobImpl.java   
private void taskFailed(JobImpl job, Task task) {
  if (task.getType() == TaskType.MAP) {
    job.failedMapTaskCount++;
  } else if (task.getType() == TaskType.REDUCE) {
    job.failedReduceTaskCount++;
  }
  job.addDiagnostic("Task failed " + task.getID());
  job.metrics.failedTask(task);
}
项目:hadoop    文件:TestMRApps.java   
@Test (timeout = 120000)
public void testTaskAttemptIDtoString() {
  TaskAttemptId taid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class);
  taid.setTaskId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class));
  taid.getTaskId().setTaskType(TaskType.MAP);
  taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
  taid.getTaskId().getJobId().setAppId(ApplicationId.newInstance(0, 0));
  assertEquals("attempt_0_0000_m_000000_0", MRApps.toString(taid));
}
项目:hadoop    文件:TestRMContainerAllocator.java   
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
    String host, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  return new ContainerFailedEvent(attemptId, host);    
}
项目:hadoop    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
  if (task.getInternalState() == TaskStateInternal.SUCCEEDED &&
      !castEvent.getTaskAttemptID().equals(task.successfulAttempt)) {
    // don't allow a different task attempt to override a previous
    // succeeded state
    task.finishedAttempts.add(castEvent.getTaskAttemptID());
    task.inProgressAttempts.remove(castEvent.getTaskAttemptID());
    return TaskStateInternal.SUCCEEDED;
  }

  // a successful REDUCE task should not be overridden
  //TODO: consider moving it to MapTaskImpl
  if (!TaskType.MAP.equals(task.getType())) {
    LOG.error("Unexpected event for REDUCE task " + event.getType());
    task.internalError(event.getType());
  }

  // tell the job about the rescheduling
  task.eventHandler.handle(
      new JobMapTaskRescheduledEvent(task.taskId));
  // super.transition is mostly coded for the case where an
  //  UNcompleted task failed.  When a COMPLETED task retroactively
  //  fails, we have to let AttemptFailedTransition.transition
  //  believe that there's no redundancy.
  unSucceed(task);
  // fake increase in Uncomplete attempts for super.transition
  task.inProgressAttempts.add(castEvent.getTaskAttemptID());
  return super.transition(task, event);
}