Java 类org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent 实例源码

项目:hadoop    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0
      && canAssignMaps()) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:aliyun-oss-hadoop-fs    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0
      && canAssignMaps()) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:big-c    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:big-c    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0
      && canAssignMaps()) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  long slotMillisIncrement = computeSlotMillis(taskAttempt);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
    }
  } else {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
    }
  }
  return jce;
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  long slotMillisIncrement = computeSlotMillis(taskAttempt);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
    }
  } else {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
    }
  }
  return jce;
}
项目:hadoop-plus    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop-plus    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:FlexMap    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());

  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:FlexMap    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:hops    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hops    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0
      && canAssignMaps()) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:hadoop-TCP    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  long slotMillisIncrement = computeSlotMillis(taskAttempt);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
    }
  } else {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
    }
  }
  return jce;
}
项目:hadoop-TCP    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  long slotMillisIncrement = computeSlotMillis(taskAttempt);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
    }
  } else {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
    }
  }
  return jce;
}
项目:hadoop-TCP    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop-TCP    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:hardfs    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  long slotMillisIncrement = computeSlotMillis(taskAttempt);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
    }
  } else {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
    }
  }
  return jce;
}
项目:hardfs    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  long slotMillisIncrement = computeSlotMillis(taskAttempt);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
    }
  } else {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
    if(!taskAlreadyCompleted) {
      // dont double count the elapsed time
      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
    }
  }
  return jce;
}
项目:hardfs    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hardfs    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:hadoop-on-lustre2    文件:TaskAttemptImpl.java   
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
      .getJobId());
  jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
      JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
  eventHandler.handle(jce);

  LOG.info("TaskAttempt: [" + attemptId
      + "] using containerId: [" + container.getId() + " on NM: ["
      + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
  TaskAttemptStartedEvent tase =
    new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
        TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
        launchTime, trackerName, httpPort, shufflePort, container.getId(),
        locality.toString(), avataar.toString());
  eventHandler.handle(
      new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
项目:hadoop-on-lustre2    文件:RMContainerAllocator.java   
@SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
  //try to assign to earlierFailedMaps if present
  ContainerRequest assigned = null;
  while (assigned == null && earlierFailedMaps.size() > 0) {
    TaskAttemptId tId = earlierFailedMaps.removeFirst();      
    if (maps.containsKey(tId)) {
      assigned = maps.remove(tId);
      JobCounterUpdateEvent jce =
        new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
      LOG.info("Assigned from earlierFailedMaps");
      break;
    }
  }
  return assigned;
}
项目:hadoop    文件:LocalContainerAllocator.java   
@SuppressWarnings("unchecked")
@Override
public void handle(ContainerAllocatorEvent event) {
  if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
    LOG.info("Processing the event " + event.toString());
    // Assign the same container ID as the AM
    ContainerId cID =
        ContainerId.newContainerId(getContext().getApplicationAttemptId(),
          this.containerId.getContainerId());
    Container container = recordFactory.newRecordInstance(Container.class);
    container.setId(cID);
    NodeId nodeId = NodeId.newInstance(this.nmHost, this.nmPort);
    container.setNodeId(nodeId);
    container.setContainerToken(null);
    container.setNodeHttpAddress(this.nmHost + ":" + this.nmHttpPort);
    // send the container-assigned event to task attempt

    if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {
      JobCounterUpdateEvent jce =
          new JobCounterUpdateEvent(event.getAttemptID().getTaskId()
              .getJobId());
      // TODO Setting OTHER_LOCAL_MAP for now.
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
    }
    eventHandler.handle(new TaskAttemptContainerAssignedEvent(
        event.getAttemptID(), container, applicationACLs));
  }
}
项目:hadoop    文件:TaskAttemptImpl.java   
private static void updateMillisCounters(JobCounterUpdateEvent jce,
    TaskAttemptImpl taskAttempt) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  long duration = (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
  int mbRequired =
      taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
  int vcoresRequired = taskAttempt.getCpuRequired(taskAttempt.conf, taskType);
  int gcoresRequired = taskAttempt.getGpuRequired(taskAttempt.conf, taskType);

  int minSlotMemSize = taskAttempt.conf.getInt(
    YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
    YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);

  int simSlotsRequired =
      minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbRequired
          / minSlotMemSize);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsRequired * duration);
    jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbRequired);
    jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * vcoresRequired);
    jce.addCounterUpdate(JobCounter.GCORES_MILLIS_MAPS, duration * gcoresRequired);
    jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
  } else {
    jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsRequired * duration);
    jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * mbRequired);
    jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * vcoresRequired);
    jce.addCounterUpdate(JobCounter.GCORES_MILLIS_REDUCES, duration * gcoresRequired);
    jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
  }
}
项目:hadoop    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(
    TaskAttemptImpl taskAttempt) {
  TaskId taskId = taskAttempt.attemptId.getTaskId();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
  updateMillisCounters(jce, taskAttempt);
  return jce;
}
项目:hadoop    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
  } else {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
  }
  if (!taskAlreadyCompleted) {
    updateMillisCounters(jce, taskAttempt);
  }
  return jce;
}
项目:hadoop    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
  } else {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
  }
  if (!taskAlreadyCompleted) {
    updateMillisCounters(jce, taskAttempt);
  }
  return jce;
}
项目:hadoop    文件:JobImpl.java   
@Override
public void transition(JobImpl job, JobEvent event) {
  JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
  for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
      .getCounterUpdates()) {
    job.jobCounters.findCounter(ci.getCounterKey()).increment(
      ci.getIncrementValue());
  }
}
项目:aliyun-oss-hadoop-fs    文件:LocalContainerAllocator.java   
@SuppressWarnings("unchecked")
@Override
public void handle(ContainerAllocatorEvent event) {
  if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
    LOG.info("Processing the event " + event.toString());
    // Assign the same container ID as the AM
    ContainerId cID =
        ContainerId.newContainerId(getContext().getApplicationAttemptId(),
          this.containerId.getContainerId());
    Container container = recordFactory.newRecordInstance(Container.class);
    container.setId(cID);
    NodeId nodeId = NodeId.newInstance(this.nmHost, this.nmPort);
    container.setNodeId(nodeId);
    container.setContainerToken(null);
    container.setNodeHttpAddress(this.nmHost + ":" + this.nmHttpPort);
    // send the container-assigned event to task attempt

    if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {
      JobCounterUpdateEvent jce =
          new JobCounterUpdateEvent(event.getAttemptID().getTaskId()
              .getJobId());
      // TODO Setting OTHER_LOCAL_MAP for now.
      jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
      eventHandler.handle(jce);
    }
    eventHandler.handle(new TaskAttemptContainerAssignedEvent(
        event.getAttemptID(), container, applicationACLs));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
private static void updateMillisCounters(JobCounterUpdateEvent jce,
    TaskAttemptImpl taskAttempt) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  long duration = (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
  int mbRequired =
      taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
  int vcoresRequired = taskAttempt.getCpuRequired(taskAttempt.conf, taskType);

  int minSlotMemSize = taskAttempt.conf.getInt(
    YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
    YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);

  int simSlotsRequired =
      minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbRequired
          / minSlotMemSize);

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsRequired * duration);
    jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbRequired);
    jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * vcoresRequired);
    jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
  } else {
    jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsRequired * duration);
    jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * mbRequired);
    jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * vcoresRequired);
    jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(
    TaskAttemptImpl taskAttempt) {
  TaskId taskId = taskAttempt.attemptId.getTaskId();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
  updateMillisCounters(jce, taskAttempt);
  return jce;
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
  } else {
    jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
  }
  if (!taskAlreadyCompleted) {
    updateMillisCounters(jce, taskAttempt);
  }
  return jce;
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
    TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
  TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());

  if (taskType == TaskType.MAP) {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
  } else {
    jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
  }
  if (!taskAlreadyCompleted) {
    updateMillisCounters(jce, taskAttempt);
  }
  return jce;
}
项目:aliyun-oss-hadoop-fs    文件:JobImpl.java   
@Override
public void transition(JobImpl job, JobEvent event) {
  JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
  for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
      .getCounterUpdates()) {
    job.jobCounters.findCounter(ci.getCounterKey()).increment(
      ci.getIncrementValue());
  }
}
项目:aliyun-oss-hadoop-fs    文件:KillAMPreemptionPolicy.java   
@SuppressWarnings("unchecked")
private void killContainer(Context ctxt, PreemptionContainer c){
  ContainerId reqCont = c.getId();
  TaskAttemptId reqTask = ctxt.getTaskAttempt(reqCont);
  LOG.info("Evicting " + reqTask);
  dispatcher.handle(new TaskAttemptEvent(reqTask,
      TaskAttemptEventType.TA_KILL));

  // add preemption to counters
  JobCounterUpdateEvent jce = new JobCounterUpdateEvent(reqTask
          .getTaskId().getJobId());
      jce.addCounterUpdate(JobCounter.TASKS_REQ_PREEMPT, 1);
      dispatcher.handle(jce);
}
项目:aliyun-oss-hadoop-fs    文件:CheckpointAMPreemptionPolicy.java   
@SuppressWarnings({ "unchecked" })
private void updatePreemptionCounters(TaskAttemptId yarnAttemptID) {
  if (!countedPreemptions.contains(yarnAttemptID)) {
    countedPreemptions.add(yarnAttemptID);
    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(yarnAttemptID
        .getTaskId().getJobId());
    jce.addCounterUpdate(JobCounter.TASKS_REQ_PREEMPT, 1);
    eventHandler.handle(jce);
  }
}