Java 类org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId 实例源码

项目:hadoop    文件:CompletedTask.java   
private void constructTaskReport() {
  loadAllTaskAttempts();
  this.report = Records.newRecord(TaskReport.class);
  report.setTaskId(taskId);
  long minLaunchTime = Long.MAX_VALUE;
  for(TaskAttempt attempt: attempts.values()) {
    minLaunchTime = Math.min(minLaunchTime, attempt.getLaunchTime());
  }
  minLaunchTime = minLaunchTime == Long.MAX_VALUE ? -1 : minLaunchTime;
  report.setStartTime(minLaunchTime);
  report.setFinishTime(taskInfo.getFinishTime());
  report.setTaskState(getState());
  report.setProgress(getProgress());
  Counters counters = getCounters();
  if (counters == null) {
    counters = EMPTY_COUNTERS;
  }
  report.setCounters(TypeConverter.toYarn(counters));
  if (successfulAttempt != null) {
    report.setSuccessfulAttempt(successfulAttempt);
  }
  report.addAllDiagnostics(reportDiagnostics);
  report
      .addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
}
项目:hadoop    文件:TestHsWebServicesAttempts.java   
public void verifyHsTaskAttemptsXML(NodeList nodes, Task task) {
  assertEquals("incorrect number of elements", 1, nodes.getLength());

  for (TaskAttempt att : task.getAttempts().values()) {
    TaskAttemptId id = att.getID();
    String attid = MRApps.toString(id);
    Boolean found = false;
    for (int i = 0; i < nodes.getLength(); i++) {
      Element element = (Element) nodes.item(i);

      if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
        found = true;
        verifyHsTaskAttemptXML(element, att, task.getType());
      }
    }
    assertTrue("task with id: " + attid + " not in web service output", found);
  }
}
项目:hadoop    文件:AttemptsPage.java   
@Override
protected Collection<TaskAttempt> getTaskAttempts() {
  List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>();
  String taskTypeStr = $(TASK_TYPE);
  TaskType taskType = MRApps.taskType(taskTypeStr);
  String attemptStateStr = $(ATTEMPT_STATE);
  TaskAttemptStateUI neededState = MRApps
      .taskAttemptState(attemptStateStr);
  for (Task task : super.app.getJob().getTasks(taskType).values()) {
    Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
    for (TaskAttempt attempt : attempts.values()) {
      if (neededState.correspondsTo(attempt.getState())) {
        fewTaskAttemps.add(attempt);
      }
    }
  }
  return fewTaskAttemps;
}
项目:hadoop    文件:TestFail.java   
@Test
//All Task attempts are timed out, leading to Job failure
public void testTimedOutTask() throws Exception {
  MRApp app = new TimeOutTaskMRApp(1, 0);
  Configuration conf = new Configuration();
  int maxAttempts = 2;
  conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
  // disable uberization (requires entire job to be reattempted, so max for
  // subtask attempts is overridden to 1)
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.FAILED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.FAILED,
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts =
      tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", maxAttempts,
      attempts.size());
  for (TaskAttempt attempt : attempts.values()) {
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
        attempt.getReport().getTaskAttemptState());
  }
}
项目:hadoop    文件:JobImpl.java   
private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) {
  // rerun previously successful map tasks
  List<TaskAttemptId> taskAttemptIdList = nodesToSucceededTaskAttempts.get(nodeId);
  if(taskAttemptIdList != null) {
    String mesg = "TaskAttempt killed because it ran on unusable node "
        + nodeId;
    for(TaskAttemptId id : taskAttemptIdList) {
      if(TaskType.MAP == id.getTaskId().getTaskType()) {
        // reschedule only map tasks because their outputs maybe unusable
        LOG.info(mesg + ". AttemptId:" + id);
        eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
      }
    }
  }
  // currently running task attempts on unusable nodes are handled in
  // RMContainerAllocator
}
项目:hadoop    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
  // The nextAttemptNumber is commit pending, decide on set the commitAttempt
  TaskAttemptId attemptID = ev.getTaskAttemptID();
  if (task.commitAttempt == null) {
    // TODO: validate attemptID
    task.commitAttempt = attemptID;
    LOG.info(attemptID + " given a go for committing the task output.");
  } else {
    // Don't think this can be a pluggable decision, so simply raise an
    // event for the TaskAttempt to delete its output.
    LOG.info(task.commitAttempt
        + " already given a go for committing the task output, so killing "
        + attemptID);
    task.eventHandler.handle(new TaskAttemptKillEvent(attemptID,
        SPECULATION + task.commitAttempt + " committed first!"));
  }
}
项目:hadoop    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {
  TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
  TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
  task.handleTaskAttemptCompletion(
      taskAttemptId, 
      TaskAttemptCompletionEventStatus.SUCCEEDED);
  task.finishedAttempts.add(taskAttemptId);
  task.inProgressAttempts.remove(taskAttemptId);
  task.successfulAttempt = taskAttemptId;
  task.sendTaskSucceededEvents();
  for (TaskAttempt attempt : task.attempts.values()) {
    if (attempt.getID() != task.successfulAttempt &&
        // This is okay because it can only talk us out of sending a
        //  TA_KILL message to an attempt that doesn't need one for
        //  other reasons.
        !attempt.isFinished()) {
      LOG.info("Issuing kill to other attempt " + attempt.getID());
      task.eventHandler.handle(new TaskAttemptKillEvent(attempt.getID(),
          SPECULATION + task.successfulAttempt + " succeeded first!"));
    }
  }
  task.finished(TaskStateInternal.SUCCEEDED);
}
项目:hadoop    文件:TaskImpl.java   
@Override
public void transition(TaskImpl task, TaskEvent event) {
  TaskAttemptId taskAttemptId =
      ((TaskTAttemptEvent) event).getTaskAttemptID();
  task.handleTaskAttemptCompletion(
      taskAttemptId, 
      TaskAttemptCompletionEventStatus.KILLED);
  task.finishedAttempts.add(taskAttemptId);
  task.inProgressAttempts.remove(taskAttemptId);
  if (task.successfulAttempt == null) {
    task.addAndScheduleAttempt(Avataar.VIRGIN);
  }
  if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) {
    task.commitAttempt = null;
  }
}
项目:hadoop    文件:HsAttemptsPage.java   
@Override
protected Collection<TaskAttempt> getTaskAttempts() {
  List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>();
  String taskTypeStr = $(TASK_TYPE);
  TaskType taskType = MRApps.taskType(taskTypeStr);
  String attemptStateStr = $(ATTEMPT_STATE);
  TaskAttemptStateUI neededState = MRApps
      .taskAttemptState(attemptStateStr);
  Job j = app.getJob();
  Map<TaskId, Task> tasks = j.getTasks(taskType);
  for (Task task : tasks.values()) {
    Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
    for (TaskAttempt attempt : attempts.values()) {
      if (neededState.correspondsTo(attempt.getState())) {
        fewTaskAttemps.add(attempt);
      }
    }
  }
  return fewTaskAttemps;
}
项目:hadoop    文件:TestKill.java   
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
  if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
    //this blocks the first task's first attempt
    //the subsequent ones are completed
    try {
      latch.await();
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  } else {
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(attemptID,
            TaskAttemptEventType.TA_DONE));
  }
}
项目:hadoop    文件:MRClientService.java   
@SuppressWarnings("unchecked")
@Override
public KillTaskAttemptResponse killTaskAttempt(
    KillTaskAttemptRequest request) throws IOException {
  TaskAttemptId taskAttemptId = request.getTaskAttemptId();
  UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
  String message = "Kill task attempt " + taskAttemptId
      + " received from " + callerUGI + " at "
      + Server.getRemoteAddress();
  LOG.info(message);
  verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
  appContext.getEventHandler().handle(
      new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
  appContext.getEventHandler().handle(
      new TaskAttemptEvent(taskAttemptId, 
          TaskAttemptEventType.TA_KILL));
  KillTaskAttemptResponse response = 
    recordFactory.newRecordInstance(KillTaskAttemptResponse.class);
  return response;
}
项目:hadoop    文件:TestAMWebServicesAttempts.java   
public void verifyAMTaskAttemptsXML(NodeList nodes, Task task) {
  assertEquals("incorrect number of elements", 1, nodes.getLength());

  for (TaskAttempt att : task.getAttempts().values()) {
    TaskAttemptId id = att.getID();
    String attid = MRApps.toString(id);
    Boolean found = false;
    for (int i = 0; i < nodes.getLength(); i++) {
      Element element = (Element) nodes.item(i);

      if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
        found = true;
        verifyAMTaskAttemptXML(element, att, task.getType());
      }
    }
    assertTrue("task with id: " + attid + " not in web service output", found);
  }
}
项目:hadoop    文件:HistoryClientService.java   
@Override
public GetTaskAttemptReportResponse getTaskAttemptReport(
    GetTaskAttemptReportRequest request) throws IOException {
  TaskAttemptId taskAttemptId = request.getTaskAttemptId();
  Job job = verifyAndGetJob(taskAttemptId.getTaskId().getJobId(), true);
  GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class);
  response.setTaskAttemptReport(job.getTask(taskAttemptId.getTaskId()).getAttempt(taskAttemptId).getReport());
  return response;
}
项目:hadoop    文件:TestFetchFailure.java   
private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt, 
    TaskAttempt mapAttempt) {
  app.getContext().getEventHandler().handle(
      new JobTaskAttemptFetchFailureEvent(
          reduceAttempt.getID(), 
          Arrays.asList(new TaskAttemptId[] {mapAttempt.getID()})));
}
项目:hadoop    文件:TestJobHistoryEvents.java   
private void verifyTask(Task task) {
  Assert.assertEquals("Task state not currect", TaskState.SUCCEEDED,
      task.getState());
  Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
  Assert.assertEquals("No of attempts not correct", 1, attempts.size());
  for (TaskAttempt attempt : attempts.values()) {
    verifyAttempt(attempt);
  }
}
项目:hadoop    文件:TestFail.java   
@Test
public void testTaskFailWithUnusedContainer() throws Exception {
  MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
  Configuration conf = new Configuration();
  int maxAttempts = 1;
  conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
  // disable uberization (requires entire job to be reattempted, so max for
  // subtask attempts is overridden to 1)
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  Map<TaskId, Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  app.waitForState(task, TaskState.SCHEDULED);
  Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
      .next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
      .size());
  TaskAttempt attempt = attempts.values().iterator().next();
  app.waitForInternalState((TaskAttemptImpl) attempt,
      TaskAttemptStateInternal.ASSIGNED);
  app.getDispatcher().getEventHandler().handle(
      new TaskAttemptEvent(attempt.getID(),
          TaskAttemptEventType.TA_CONTAINER_COMPLETED));
  app.waitForState(job, JobState.FAILED);
}
项目:hadoop    文件:TaskAttemptCompletionEventPBImpl.java   
@Override
public TaskAttemptId getAttemptId() {
  TaskAttemptCompletionEventProtoOrBuilder p = viaProto ? proto : builder;
  if (this.taskAttemptId != null) {
    return this.taskAttemptId;
  }
  if (!p.hasAttemptId()) {
    return null;
  }
  this.taskAttemptId = convertFromProtoFormat(p.getAttemptId());
  return this.taskAttemptId;
}
项目:hadoop    文件:ExponentiallySmoothedTaskRuntimeEstimator.java   
private void incorporateReading
    (TaskAttemptId attemptID, float newProgress, long newTime) {
  //TODO: Refactor this method, it seems more complicated than necessary.
  AtomicReference<EstimateVector> vectorRef = estimates.get(attemptID);

  if (vectorRef == null) {
    estimates.putIfAbsent(attemptID, new AtomicReference<EstimateVector>(null));
    incorporateReading(attemptID, newProgress, newTime);
    return;
  }

  EstimateVector oldVector = vectorRef.get();

  if (oldVector == null) {
    if (vectorRef.compareAndSet(null,
           new EstimateVector(-1.0, 0.0F, Long.MIN_VALUE))) {
      return;
    }

    incorporateReading(attemptID, newProgress, newTime);
    return;
  }

  while (!vectorRef.compareAndSet
          (oldVector, oldVector.incorporate(newProgress, newTime))) {
    oldVector = vectorRef.get();
  }
}
项目:hadoop    文件:ExponentiallySmoothedTaskRuntimeEstimator.java   
private EstimateVector getEstimateVector(TaskAttemptId attemptID) {
  AtomicReference<EstimateVector> vectorRef = estimates.get(attemptID);

  if (vectorRef == null) {
    return null;
  }

  return vectorRef.get();
}
项目:hadoop    文件:ExponentiallySmoothedTaskRuntimeEstimator.java   
@Override
public long estimatedRuntime(TaskAttemptId id) {
  Long startTime = startTimes.get(id);

  if (startTime == null) {
    return -1L;
  }

  EstimateVector vector = getEstimateVector(id);

  if (vector == null) {
    return -1L;
  }

  long sunkTime = vector.atTime - startTime;

  double value = vector.value;
  float progress = vector.basedOnProgress;

  if (value == 0) {
    return -1L;
  }

  double rate = smoothedValue == SmoothedValue.RATE ? value : 1.0 / value;

  if (rate == 0.0) {
    return -1L;
  }

  double remainingTime = (1.0 - progress) / rate;

  return sunkTime + (long)remainingTime;
}
项目:hadoop    文件:TestHsWebServicesAttempts.java   
@Test
public void testTaskAttemptId() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();

  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    for (Task task : jobsMap.get(id).getTasks().values()) {
      String tid = MRApps.toString(task.getID());

      for (TaskAttempt att : task.getAttempts().values()) {
        TaskAttemptId attemptid = att.getID();
        String attid = MRApps.toString(attemptid);

        ClientResponse response = r.path("ws").path("v1").path("history")
            .path("mapreduce").path("jobs").path(jobId).path("tasks")
            .path(tid).path("attempts").path(attid)
            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
        JSONObject json = response.getEntity(JSONObject.class);
        assertEquals("incorrect number of elements", 1, json.length());
        JSONObject info = json.getJSONObject("taskAttempt");
        verifyHsTaskAttempt(info, att, task.getType());
      }
    }
  }
}
项目:hadoop    文件:GetTaskAttemptReportRequestPBImpl.java   
@Override
public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
  maybeInitBuilder();
  if (taskAttemptId == null) 
    builder.clearTaskAttemptId();
  this.taskAttemptId = taskAttemptId;
}
项目:hadoop    文件:TestFail.java   
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
  if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
    //check if it is first task's first attempt
    // send the Fail event
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(attemptID, 
            TaskAttemptEventType.TA_FAILMSG));
  } else {
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(attemptID,
            TaskAttemptEventType.TA_DONE));
  }
}
项目:hadoop    文件:TaskAttemptRecoverEvent.java   
public TaskAttemptRecoverEvent(TaskAttemptId id, TaskAttemptInfo taInfo,
    OutputCommitter committer, boolean recoverOutput) {
  super(id, TaskAttemptEventType.TA_RECOVER);
  this.taInfo = taInfo;
  this.committer = committer;
  this.recoverAttemptOutput = recoverOutput;
}
项目:hadoop    文件:TestJobHistoryParsing.java   
@SuppressWarnings("unchecked")
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
  if (attemptID.getTaskId().getId() == 0) {
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
  } else {
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
  }
}
项目:hadoop    文件:FailTaskAttemptRequestPBImpl.java   
@Override
public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
  maybeInitBuilder();
  if (taskAttemptId == null) 
    builder.clearTaskAttemptId();
  this.taskAttemptId = taskAttemptId;
}
项目:hadoop    文件:TestSpeculativeExecutionWithMRApp.java   
private TaskAttemptStatus createTaskAttemptStatus(TaskAttemptId id,
    float progress, TaskAttemptState state) {
  TaskAttemptStatus status = new TaskAttemptStatus();
  status.id = id;
  status.progress = progress;
  status.taskState = state;
  return status;
}
项目:hadoop    文件:MRApp.java   
protected void attemptLaunched(TaskAttemptId attemptID) {
  if (autoComplete) {
    // send the done event
    getContext().getEventHandler().handle(
        new TaskAttemptEvent(attemptID,
            TaskAttemptEventType.TA_DONE));
  }
}
项目:hadoop    文件:FailTaskAttemptRequestPBImpl.java   
@Override
public TaskAttemptId getTaskAttemptId() {
  FailTaskAttemptRequestProtoOrBuilder p = viaProto ? proto : builder;
  if (this.taskAttemptId != null) {
    return this.taskAttemptId;
  }
  if (!p.hasTaskAttemptId()) {
    return null;
  }
  this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
  return this.taskAttemptId;
}
项目:hadoop    文件:TaskImpl.java   
@Override
public TaskAttempt getAttempt(TaskAttemptId attemptID) {
  readLock.lock();
  try {
    return attempts.get(attemptID);
  } finally {
    readLock.unlock();
  }
}
项目:hadoop    文件:TestAMWebServicesAttempts.java   
@Test
public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);
    for (Task task : jobsMap.get(id).getTasks().values()) {

      String tid = MRApps.toString(task.getID());
      for (TaskAttempt att : task.getAttempts().values()) {
        TaskAttemptId attemptid = att.getID();
        String attid = MRApps.toString(attemptid);

        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
            .path("jobs").path(jobId).path("tasks").path(tid)
            .path("attempts").path(attid).path("counters")
            .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);

        assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
        String xml = response.getEntity(String.class);
        DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
        DocumentBuilder db = dbf.newDocumentBuilder();
        InputSource is = new InputSource();
        is.setCharacterStream(new StringReader(xml));
        Document dom = db.parse(is);
        NodeList nodes = dom.getElementsByTagName("jobTaskAttemptCounters");

        verifyAMTaskCountersXML(nodes, att);
      }
    }
  }
}
项目:hadoop    文件:TaskImpl.java   
private TaskAttemptImpl addAttempt(Avataar avataar) {
  TaskAttemptImpl attempt = createAttempt();
  attempt.setAvataar(avataar);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Created attempt " + attempt.getID());
  }
  switch (attempts.size()) {
    case 0:
      attempts = Collections.singletonMap(attempt.getID(),
          (TaskAttempt) attempt);
      break;

    case 1:
      Map<TaskAttemptId, TaskAttempt> newAttempts
          = new LinkedHashMap<TaskAttemptId, TaskAttempt>(maxAttempts);
      newAttempts.putAll(attempts);
      attempts = newAttempts;
      attempts.put(attempt.getID(), attempt);
      break;

    default:
      attempts.put(attempt.getID(), attempt);
      break;
  }

  ++nextAttemptNumber;
  return attempt;
}
项目:hadoop    文件:TestJobHistoryEntities.java   
@Test (timeout=10000)
public void testCompletedTaskAttempt() throws Exception {
  HistoryFileInfo info = mock(HistoryFileInfo.class);
  when(info.getConfFile()).thenReturn(fullConfPath);
  completedJob =
    new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
        info, jobAclsManager);
  TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0);
  TaskAttemptId rta1Id = MRBuilderUtils.newTaskAttemptId(rt1Id, 0);

  Task mt1 = completedJob.getTask(mt1Id);
  Task rt1 = completedJob.getTask(rt1Id);

  TaskAttempt mta1 = mt1.getAttempt(mta1Id);
  assertEquals(TaskAttemptState.SUCCEEDED, mta1.getState());
  assertEquals("localhost:45454", mta1.getAssignedContainerMgrAddress());
  assertEquals("localhost:9999", mta1.getNodeHttpAddress());
  TaskAttemptReport mta1Report = mta1.getReport();
  assertEquals(TaskAttemptState.SUCCEEDED, mta1Report.getTaskAttemptState());
  assertEquals("localhost", mta1Report.getNodeManagerHost());
  assertEquals(45454, mta1Report.getNodeManagerPort());
  assertEquals(9999, mta1Report.getNodeManagerHttpPort());

  TaskAttempt rta1 = rt1.getAttempt(rta1Id);
  assertEquals(TaskAttemptState.SUCCEEDED, rta1.getState());
  assertEquals("localhost:45454", rta1.getAssignedContainerMgrAddress());
  assertEquals("localhost:9999", rta1.getNodeHttpAddress());
  TaskAttemptReport rta1Report = rta1.getReport();
  assertEquals(TaskAttemptState.SUCCEEDED, rta1Report.getTaskAttemptState());
  assertEquals("localhost", rta1Report.getNodeManagerHost());
  assertEquals(45454, rta1Report.getNodeManagerPort());
  assertEquals(9999, rta1Report.getNodeManagerHttpPort());
}
项目:hadoop    文件:MockJobs.java   
public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) {
  ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
      id.getTaskId().getJobId().getAppId(), 0);
  ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
  TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
  report.setTaskAttemptId(id);
  report
      .setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
  report.setFinishTime(System.currentTimeMillis()
      + (int) (Math.random() * DT) + 1);

  if (id.getTaskId().getTaskType() == TaskType.REDUCE) {
    report.setShuffleFinishTime(
        (report.getFinishTime() + report.getStartTime()) / 2);
    report.setSortFinishTime(
        (report.getFinishTime() + report.getShuffleFinishTime()) / 2);
  }

  report.setPhase(PHASES.next());
  report.setTaskAttemptState(TASK_ATTEMPT_STATES.next());
  report.setProgress((float) Math.random());
  report.setCounters(TypeConverter.toYarn(newCounters()));
  report.setContainerId(containerId);
  report.setDiagnosticInfo(DIAGS.next());
  report.setStateString("Moving average " + Math.random());
  return report;
}
项目:hadoop    文件:TestRuntimeEstimators.java   
@Override
public Map<TaskAttemptId, TaskAttempt> getAttempts() {
  Map<TaskAttemptId, TaskAttempt> result
      = new HashMap<TaskAttemptId, TaskAttempt>(attempts.size());
  result.putAll(attempts);
  return result;
}
项目:hadoop    文件:TaskImpl.java   
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
  TaskAttemptId attemptId = null;
  if (event instanceof TaskTAttemptEvent) {
    TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
    attemptId = castEvent.getTaskAttemptID(); 
    if (task.getInternalState() == TaskStateInternal.SUCCEEDED &&
        !attemptId.equals(task.successfulAttempt)) {
      // don't allow a different task attempt to override a previous
      // succeeded state
      task.finishedAttempts.add(castEvent.getTaskAttemptID());
      task.inProgressAttempts.remove(castEvent.getTaskAttemptID());
      return TaskStateInternal.SUCCEEDED;
    }
  }

  // a successful REDUCE task should not be overridden
  // TODO: consider moving it to MapTaskImpl
  if (!TaskType.MAP.equals(task.getType())) {
    LOG.error("Unexpected event for REDUCE task " + event.getType());
    task.internalError(event.getType());
  }

  // successful attempt is now killed. reschedule
  // tell the job about the rescheduling
  unSucceed(task);
  task.handleTaskAttemptCompletion(attemptId,
      TaskAttemptCompletionEventStatus.KILLED);
  task.eventHandler.handle(new JobMapTaskRescheduledEvent(task.taskId));
  // typically we are here because this map task was run on a bad node and
  // we want to reschedule it on a different node.
  // Depending on whether there are previous failed attempts or not this
  // can SCHEDULE or RESCHEDULE the container allocate request. If this
  // SCHEDULE's then the dataLocal hosts of this taskAttempt will be used
  // from the map splitInfo. So the bad node might be sent as a location
  // to the RM. But the RM would ignore that just like it would ignore
  // currently pending container requests affinitized to bad nodes.
  task.addAndScheduleAttempt(Avataar.VIRGIN);
  return TaskStateInternal.SCHEDULED;
}
项目:hadoop    文件:TestSpeculativeExecution.java   
@Override
public long estimatedRuntime(TaskAttemptId id) {
  if ((id.getTaskId().getId() == 0) && (id.getId() == 0)) {
    return SPECULATE_THIS;
  }
  return super.estimatedRuntime(id);
}
项目:hadoop    文件:ContainerRemoteLaunchEvent.java   
public ContainerRemoteLaunchEvent(TaskAttemptId taskAttemptID,
    ContainerLaunchContext containerLaunchContext,
    Container allocatedContainer, Task remoteTask) {
  super(taskAttemptID, allocatedContainer.getId(), StringInterner
    .weakIntern(allocatedContainer.getNodeId().toString()),
    allocatedContainer.getContainerToken(),
    ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH);
  this.allocatedContainer = allocatedContainer;
  this.containerLaunchContext = containerLaunchContext;
  this.task = remoteTask;
}
项目:hadoop    文件:ContainerLauncherEvent.java   
public ContainerLauncherEvent(TaskAttemptId taskAttemptID, 
    ContainerId containerID,
    String containerMgrAddress,
    Token containerToken,
    ContainerLauncher.EventType type) {
  super(type);
  this.taskAttemptID = taskAttemptID;
  this.containerID = containerID;
  this.containerMgrAddress = containerMgrAddress;
  this.containerToken = containerToken;
}
项目:hadoop    文件:ContainerLauncherImpl.java   
public Container(TaskAttemptId taId, ContainerId containerID,
    String containerMgrAddress) {
  this.state = ContainerState.PREP;
  this.taskAttemptID = taId;
  this.containerMgrAddress = containerMgrAddress;
  this.containerID = containerID;
}