Java 类org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent 实例源码

项目:hadoop    文件:JobImpl.java   
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (taskAttemptCompletionEvents.size() > fromEventId) {
      int actualMax = Math.min(maxEvents,
          (taskAttemptCompletionEvents.size() - fromEventId));
      events = taskAttemptCompletionEvents.subList(fromEventId,
          actualMax + fromEventId).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:aliyun-oss-hadoop-fs    文件:JobImpl.java   
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (taskAttemptCompletionEvents.size() > fromEventId) {
      int actualMax = Math.min(maxEvents,
          (taskAttemptCompletionEvents.size() - fromEventId));
      events = taskAttemptCompletionEvents.subList(fromEventId,
          actualMax + fromEventId).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestTaskAttemptListenerImpl.java   
private static TaskAttemptCompletionEvent createTce(int eventId,
    boolean isMap, TaskAttemptCompletionEventStatus status) {
  JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
  TaskId tid = MRBuilderUtils.newTaskId(jid, 0,
      isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP
          : org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
  RecordFactory recordFactory =
    RecordFactoryProvider.getRecordFactory(null);
  TaskAttemptCompletionEvent tce = recordFactory
      .newRecordInstance(TaskAttemptCompletionEvent.class);
  tce.setEventId(eventId);
  tce.setAttemptId(attemptId);
  tce.setStatus(status);
  return tce;
}
项目:big-c    文件:JobImpl.java   
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (taskAttemptCompletionEvents.size() > fromEventId) {
      int actualMax = Math.min(maxEvents,
          (taskAttemptCompletionEvents.size() - fromEventId));
      events = taskAttemptCompletionEvents.subList(fromEventId,
          actualMax + fromEventId).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:big-c    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JobImpl.java   
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (taskAttemptCompletionEvents.size() > fromEventId) {
      int actualMax = Math.min(maxEvents,
          (taskAttemptCompletionEvents.size() - fromEventId));
      events = taskAttemptCompletionEvents.subList(fromEventId,
          actualMax + fromEventId).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:hadoop-plus    文件:JobImpl.java   
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (taskAttemptCompletionEvents.size() > fromEventId) {
      int actualMax = Math.min(maxEvents,
          (taskAttemptCompletionEvents.size() - fromEventId));
      events = taskAttemptCompletionEvents.subList(fromEventId,
          actualMax + fromEventId).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop-plus    文件:TaskImpl.java   
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
    TaskAttemptCompletionEventStatus status) {
  TaskAttempt attempt = attempts.get(attemptId);
  //raise the completion event only if the container is assigned
  // to nextAttemptNumber
  if (attempt.getNodeHttpAddress() != null) {
    TaskAttemptCompletionEvent tce = recordFactory
        .newRecordInstance(TaskAttemptCompletionEvent.class);
    tce.setEventId(-1);
    String scheme = (encryptedShuffle) ? "https://" : "http://";
    tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
       + attempt.getNodeHttpAddress().split(":")[0] + ":"
       + attempt.getShufflePort()));
    tce.setStatus(status);
    tce.setAttemptId(attempt.getID());
    int runTime = 0;
    if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
      runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
    tce.setAttemptRunTime(runTime);

    //raise the event to job so that it adds the completion event to its
    //data structures
    eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
  }
}
项目:FlexMap    文件:JobImpl.java   
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (taskAttemptCompletionEvents.size() > fromEventId) {
      int actualMax = Math.min(maxEvents,
          (taskAttemptCompletionEvents.size() - fromEventId));
      events = taskAttemptCompletionEvents.subList(fromEventId,
          actualMax + fromEventId).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hops    文件:JobImpl.java   
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
  readLock.lock();
  try {
    if (taskAttemptCompletionEvents.size() > fromEventId) {
      int actualMax = Math.min(maxEvents,
          (taskAttemptCompletionEvents.size() - fromEventId));
      events = taskAttemptCompletionEvents.subList(fromEventId,
          actualMax + fromEventId).toArray(events);
    }
    return events;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop    文件:TestTaskAttemptListenerImpl.java   
private static TaskAttemptCompletionEvent createTce(int eventId,
    boolean isMap, TaskAttemptCompletionEventStatus status) {
  JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
  TaskId tid = MRBuilderUtils.newTaskId(jid, 0,
      isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP
          : org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
  TaskAttemptCompletionEvent tce = recordFactory
      .newRecordInstance(TaskAttemptCompletionEvent.class);
  tce.setEventId(eventId);
  tce.setAttemptId(attemptId);
  tce.setStatus(status);
  return tce;
}
项目:hadoop    文件:NotRunningJob.java   
@Override
public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
    GetTaskAttemptCompletionEventsRequest request)
    throws IOException {
  GetTaskAttemptCompletionEventsResponse resp =
    recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class);
  resp.addAllCompletionEvents(new ArrayList<TaskAttemptCompletionEvent>());
  return resp;
}
项目:hadoop    文件:TypeConverter.java   
public static TaskCompletionEvent[] fromYarn(
    TaskAttemptCompletionEvent[] newEvents) {
  TaskCompletionEvent[] oldEvents =
      new TaskCompletionEvent[newEvents.length];
  int i = 0;
  for (TaskAttemptCompletionEvent newEvent
      : newEvents) {
    oldEvents[i++] = fromYarn(newEvent);
  }
  return oldEvents;
}
项目:hadoop    文件:TypeConverter.java   
public static TaskCompletionEvent fromYarn(
    TaskAttemptCompletionEvent newEvent) {
  return new TaskCompletionEvent(newEvent.getEventId(),
            fromYarn(newEvent.getAttemptId()), newEvent.getAttemptId().getId(),
            newEvent.getAttemptId().getTaskId().getTaskType().equals(TaskType.MAP),
            fromYarn(newEvent.getStatus()),
            newEvent.getMapOutputServerAddress());
}
项目:hadoop    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
private void initCompletionEvents() {
  if (this.completionEvents != null) {
    return;
  }
  GetTaskAttemptCompletionEventsResponseProtoOrBuilder p = viaProto ? proto : builder;
  List<TaskAttemptCompletionEventProto> list = p.getCompletionEventsList();
  this.completionEvents = new ArrayList<TaskAttemptCompletionEvent>();

  for (TaskAttemptCompletionEventProto c : list) {
    this.completionEvents.add(convertFromProtoFormat(c));
  }
}
项目:hadoop    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
@Override
public void addAllCompletionEvents(final List<TaskAttemptCompletionEvent> completionEvents) {
  if (completionEvents == null)
    return;
  initCompletionEvents();
  this.completionEvents.addAll(completionEvents);
}
项目:hadoop    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
private void addCompletionEventsToProto() {
  maybeInitBuilder();
  builder.clearCompletionEvents();
  if (completionEvents == null)
    return;
  Iterable<TaskAttemptCompletionEventProto> iterable = new Iterable<TaskAttemptCompletionEventProto>() {
    @Override
    public Iterator<TaskAttemptCompletionEventProto> iterator() {
      return new Iterator<TaskAttemptCompletionEventProto>() {

        Iterator<TaskAttemptCompletionEvent> iter = completionEvents.iterator();

        @Override
        public boolean hasNext() {
          return iter.hasNext();
        }

        @Override
        public TaskAttemptCompletionEventProto next() {
          return convertToProtoFormat(iter.next());
        }

        @Override
        public void remove() {
          throw new UnsupportedOperationException();

        }
      };

    }
  };
  builder.addAllCompletionEvents(iterable);
}
项目:hadoop    文件:CompletedJob.java   
@Override
public synchronized TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  if (completionEvents == null) {
    constructTaskAttemptCompletionEvents();
  }
  return getAttemptCompletionEvents(completionEvents,
      fromEventId, maxEvents);
}
项目:hadoop    文件:CompletedJob.java   
private static TaskAttemptCompletionEvent[] getAttemptCompletionEvents(
    List<TaskAttemptCompletionEvent> eventList,
    int startIndex, int maxEvents) {
  TaskAttemptCompletionEvent[] events = new TaskAttemptCompletionEvent[0];
  if (eventList.size() > startIndex) {
    int actualMax = Math.min(maxEvents,
        (eventList.size() - startIndex));
    events = eventList.subList(startIndex, actualMax + startIndex)
        .toArray(events);
  }
  return events;
}
项目:aliyun-oss-hadoop-fs    文件:NotRunningJob.java   
@Override
public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
    GetTaskAttemptCompletionEventsRequest request)
    throws IOException {
  GetTaskAttemptCompletionEventsResponse resp =
    recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class);
  resp.addAllCompletionEvents(new ArrayList<TaskAttemptCompletionEvent>());
  return resp;
}
项目:aliyun-oss-hadoop-fs    文件:TypeConverter.java   
public static TaskCompletionEvent[] fromYarn(
    TaskAttemptCompletionEvent[] newEvents) {
  TaskCompletionEvent[] oldEvents =
      new TaskCompletionEvent[newEvents.length];
  int i = 0;
  for (TaskAttemptCompletionEvent newEvent
      : newEvents) {
    oldEvents[i++] = fromYarn(newEvent);
  }
  return oldEvents;
}
项目:aliyun-oss-hadoop-fs    文件:TypeConverter.java   
public static TaskCompletionEvent fromYarn(
    TaskAttemptCompletionEvent newEvent) {
  return new TaskCompletionEvent(newEvent.getEventId(),
            fromYarn(newEvent.getAttemptId()), newEvent.getAttemptId().getId(),
            newEvent.getAttemptId().getTaskId().getTaskType().equals(TaskType.MAP),
            fromYarn(newEvent.getStatus()),
            newEvent.getMapOutputServerAddress());
}
项目:aliyun-oss-hadoop-fs    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
private void initCompletionEvents() {
  if (this.completionEvents != null) {
    return;
  }
  GetTaskAttemptCompletionEventsResponseProtoOrBuilder p = viaProto ? proto : builder;
  List<TaskAttemptCompletionEventProto> list = p.getCompletionEventsList();
  this.completionEvents = new ArrayList<TaskAttemptCompletionEvent>();

  for (TaskAttemptCompletionEventProto c : list) {
    this.completionEvents.add(convertFromProtoFormat(c));
  }
}
项目:aliyun-oss-hadoop-fs    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
@Override
public void addAllCompletionEvents(final List<TaskAttemptCompletionEvent> completionEvents) {
  if (completionEvents == null)
    return;
  initCompletionEvents();
  this.completionEvents.addAll(completionEvents);
}
项目:aliyun-oss-hadoop-fs    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
private void addCompletionEventsToProto() {
  maybeInitBuilder();
  builder.clearCompletionEvents();
  if (completionEvents == null)
    return;
  Iterable<TaskAttemptCompletionEventProto> iterable = new Iterable<TaskAttemptCompletionEventProto>() {
    @Override
    public Iterator<TaskAttemptCompletionEventProto> iterator() {
      return new Iterator<TaskAttemptCompletionEventProto>() {

        Iterator<TaskAttemptCompletionEvent> iter = completionEvents.iterator();

        @Override
        public boolean hasNext() {
          return iter.hasNext();
        }

        @Override
        public TaskAttemptCompletionEventProto next() {
          return convertToProtoFormat(iter.next());
        }

        @Override
        public void remove() {
          throw new UnsupportedOperationException();

        }
      };

    }
  };
  builder.addAllCompletionEvents(iterable);
}
项目:aliyun-oss-hadoop-fs    文件:CompletedJob.java   
@Override
public synchronized TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  if (completionEvents == null) {
    constructTaskAttemptCompletionEvents();
  }
  return getAttemptCompletionEvents(completionEvents,
      fromEventId, maxEvents);
}
项目:aliyun-oss-hadoop-fs    文件:CompletedJob.java   
private static TaskAttemptCompletionEvent[] getAttemptCompletionEvents(
    List<TaskAttemptCompletionEvent> eventList,
    int startIndex, int maxEvents) {
  TaskAttemptCompletionEvent[] events = new TaskAttemptCompletionEvent[0];
  if (eventList.size() > startIndex) {
    int actualMax = Math.min(maxEvents,
        (eventList.size() - startIndex));
    events = eventList.subList(startIndex, actualMax + startIndex)
        .toArray(events);
  }
  return events;
}
项目:big-c    文件:TestTaskAttemptListenerImpl.java   
private static TaskAttemptCompletionEvent createTce(int eventId,
    boolean isMap, TaskAttemptCompletionEventStatus status) {
  JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
  TaskId tid = MRBuilderUtils.newTaskId(jid, 0,
      isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP
          : org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
  TaskAttemptCompletionEvent tce = recordFactory
      .newRecordInstance(TaskAttemptCompletionEvent.class);
  tce.setEventId(eventId);
  tce.setAttemptId(attemptId);
  tce.setStatus(status);
  return tce;
}
项目:big-c    文件:NotRunningJob.java   
@Override
public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
    GetTaskAttemptCompletionEventsRequest request)
    throws IOException {
  GetTaskAttemptCompletionEventsResponse resp =
    recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class);
  resp.addAllCompletionEvents(new ArrayList<TaskAttemptCompletionEvent>());
  return resp;
}
项目:big-c    文件:TypeConverter.java   
public static TaskCompletionEvent[] fromYarn(
    TaskAttemptCompletionEvent[] newEvents) {
  TaskCompletionEvent[] oldEvents =
      new TaskCompletionEvent[newEvents.length];
  int i = 0;
  for (TaskAttemptCompletionEvent newEvent
      : newEvents) {
    oldEvents[i++] = fromYarn(newEvent);
  }
  return oldEvents;
}
项目:big-c    文件:TypeConverter.java   
public static TaskCompletionEvent fromYarn(
    TaskAttemptCompletionEvent newEvent) {
  return new TaskCompletionEvent(newEvent.getEventId(),
            fromYarn(newEvent.getAttemptId()), newEvent.getAttemptId().getId(),
            newEvent.getAttemptId().getTaskId().getTaskType().equals(TaskType.MAP),
            fromYarn(newEvent.getStatus()),
            newEvent.getMapOutputServerAddress());
}
项目:big-c    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
private void initCompletionEvents() {
  if (this.completionEvents != null) {
    return;
  }
  GetTaskAttemptCompletionEventsResponseProtoOrBuilder p = viaProto ? proto : builder;
  List<TaskAttemptCompletionEventProto> list = p.getCompletionEventsList();
  this.completionEvents = new ArrayList<TaskAttemptCompletionEvent>();

  for (TaskAttemptCompletionEventProto c : list) {
    this.completionEvents.add(convertFromProtoFormat(c));
  }
}
项目:big-c    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
@Override
public void addAllCompletionEvents(final List<TaskAttemptCompletionEvent> completionEvents) {
  if (completionEvents == null)
    return;
  initCompletionEvents();
  this.completionEvents.addAll(completionEvents);
}
项目:big-c    文件:GetTaskAttemptCompletionEventsResponsePBImpl.java   
private void addCompletionEventsToProto() {
  maybeInitBuilder();
  builder.clearCompletionEvents();
  if (completionEvents == null)
    return;
  Iterable<TaskAttemptCompletionEventProto> iterable = new Iterable<TaskAttemptCompletionEventProto>() {
    @Override
    public Iterator<TaskAttemptCompletionEventProto> iterator() {
      return new Iterator<TaskAttemptCompletionEventProto>() {

        Iterator<TaskAttemptCompletionEvent> iter = completionEvents.iterator();

        @Override
        public boolean hasNext() {
          return iter.hasNext();
        }

        @Override
        public TaskAttemptCompletionEventProto next() {
          return convertToProtoFormat(iter.next());
        }

        @Override
        public void remove() {
          throw new UnsupportedOperationException();

        }
      };

    }
  };
  builder.addAllCompletionEvents(iterable);
}
项目:big-c    文件:CompletedJob.java   
@Override
public synchronized TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
    int fromEventId, int maxEvents) {
  if (completionEvents == null) {
    constructTaskAttemptCompletionEvents();
  }
  return getAttemptCompletionEvents(completionEvents,
      fromEventId, maxEvents);
}
项目:big-c    文件:CompletedJob.java   
private static TaskAttemptCompletionEvent[] getAttemptCompletionEvents(
    List<TaskAttemptCompletionEvent> eventList,
    int startIndex, int maxEvents) {
  TaskAttemptCompletionEvent[] events = new TaskAttemptCompletionEvent[0];
  if (eventList.size() > startIndex) {
    int actualMax = Math.min(maxEvents,
        (eventList.size() - startIndex));
    events = eventList.subList(startIndex, actualMax + startIndex)
        .toArray(events);
  }
  return events;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskAttemptListenerImpl.java   
private static TaskAttemptCompletionEvent createTce(int eventId,
    boolean isMap, TaskAttemptCompletionEventStatus status) {
  JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
  TaskId tid = MRBuilderUtils.newTaskId(jid, 0,
      isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP
          : org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
  TaskAttemptCompletionEvent tce = recordFactory
      .newRecordInstance(TaskAttemptCompletionEvent.class);
  tce.setEventId(eventId);
  tce.setAttemptId(attemptId);
  tce.setStatus(status);
  return tce;
}