Java 类org.apache.hadoop.mapreduce.TaskType 实例源码

项目:hadoop    文件:ReduceAttemptFinishedEvent.java   
public void setDatum(Object oDatum) {
  this.datum = (ReduceAttemptFinished)oDatum;
  this.attemptId = TaskAttemptID.forName(datum.attemptId.toString());
  this.taskType = TaskType.valueOf(datum.taskType.toString());
  this.taskStatus = datum.taskStatus.toString();
  this.shuffleFinishTime = datum.shuffleFinishTime;
  this.sortFinishTime = datum.sortFinishTime;
  this.finishTime = datum.finishTime;
  this.hostname = datum.hostname.toString();
  this.rackName = datum.rackname.toString();
  this.port = datum.port;
  this.state = datum.state.toString();
  this.counters = EventReader.fromAvro(datum.counters);
  this.clockSplits = AvroArrayUtils.fromAvro(datum.clockSplits);
  this.cpuUsages = AvroArrayUtils.fromAvro(datum.cpuUsages);
  this.gpuUsages = AvroArrayUtils.fromAvro(datum.gpuUsages);
  this.vMemKbytes = AvroArrayUtils.fromAvro(datum.vMemKbytes);
  this.physMemKbytes = AvroArrayUtils.fromAvro(datum.physMemKbytes);
}
项目:hadoop    文件:TaskAttemptUnsuccessfulCompletionEvent.java   
public void setDatum(Object odatum) {
  this.datum =
      (TaskAttemptUnsuccessfulCompletion)odatum;
  this.attemptId =
      TaskAttemptID.forName(datum.attemptId.toString());
  this.taskType =
      TaskType.valueOf(datum.taskType.toString());
  this.finishTime = datum.finishTime;
  this.hostname = datum.hostname.toString();
  this.rackName = datum.rackname.toString();
  this.port = datum.port;
  this.status = datum.status.toString();
  this.error = datum.error.toString();
  this.counters =
      EventReader.fromAvro(datum.counters);
  this.clockSplits =
      AvroArrayUtils.fromAvro(datum.clockSplits);
  this.cpuUsages =
      AvroArrayUtils.fromAvro(datum.cpuUsages);
  this.gpuUsages =
      AvroArrayUtils.fromAvro(datum.gpuUsages);
  this.vMemKbytes =
      AvroArrayUtils.fromAvro(datum.vMemKbytes);
  this.physMemKbytes =
      AvroArrayUtils.fromAvro(datum.physMemKbytes);
}
项目:hadoop    文件:MapAttemptFinishedEvent.java   
public void setDatum(Object oDatum) {
  this.datum = (MapAttemptFinished)oDatum;
  this.attemptId = TaskAttemptID.forName(datum.attemptId.toString());
  this.taskType = TaskType.valueOf(datum.taskType.toString());
  this.taskStatus = datum.taskStatus.toString();
  this.mapFinishTime = datum.mapFinishTime;
  this.finishTime = datum.finishTime;
  this.hostname = datum.hostname.toString();
  this.rackName = datum.rackname.toString();
  this.port = datum.port;
  this.state = datum.state.toString();
  this.counters = EventReader.fromAvro(datum.counters);
  this.clockSplits = AvroArrayUtils.fromAvro(datum.clockSplits);
  this.cpuUsages = AvroArrayUtils.fromAvro(datum.cpuUsages);
  this.gpuUsages = AvroArrayUtils.fromAvro(datum.gpuUsages);
  this.vMemKbytes = AvroArrayUtils.fromAvro(datum.vMemKbytes);
  this.physMemKbytes = AvroArrayUtils.fromAvro(datum.physMemKbytes);
}
项目:hadoop    文件:TaskFailedEvent.java   
public void setDatum(Object odatum) {
  this.datum = (TaskFailed)odatum;
  this.id =
      TaskID.forName(datum.taskid.toString());
  this.taskType =
      TaskType.valueOf(datum.taskType.toString());
  this.finishTime = datum.finishTime;
  this.error = datum.error.toString();
  this.failedDueToAttempt =
      datum.failedDueToAttempt == null
      ? null
      : TaskAttemptID.forName(
          datum.failedDueToAttempt.toString());
  this.status = datum.status.toString();
  this.counters =
      EventReader.fromAvro(datum.counters);
}
项目:hadoop    文件:TaskAttemptStartedEvent.java   
/**
 * Create an event to record the start of an attempt
 * @param attemptId Id of the attempt
 * @param taskType Type of task
 * @param startTime Start time of the attempt
 * @param trackerName Name of the Task Tracker where attempt is running
 * @param httpPort The port number of the tracker
 * @param shufflePort The shuffle port number of the container
 * @param containerId The containerId for the task attempt.
 * @param locality The locality of the task attempt
 * @param avataar The avataar of the task attempt
 */
public TaskAttemptStartedEvent( TaskAttemptID attemptId,  
    TaskType taskType, long startTime, String trackerName,
    int httpPort, int shufflePort, ContainerId containerId,
    String locality, String avataar) {
  datum.attemptId = new Utf8(attemptId.toString());
  datum.taskid = new Utf8(attemptId.getTaskID().toString());
  datum.startTime = startTime;
  datum.taskType = new Utf8(taskType.name());
  datum.trackerName = new Utf8(trackerName);
  datum.httpPort = httpPort;
  datum.shufflePort = shufflePort;
  datum.containerId = new Utf8(containerId.toString());
  if (locality != null) {
    datum.locality = new Utf8(locality);
  }
  if (avataar != null) {
    datum.avataar = new Utf8(avataar);
  }
}
项目:hadoop    文件:ZombieJob.java   
/**
 * Mask the job ID part in a {@link TaskAttemptID}.
 * 
 * @param attemptId
 *          raw {@link TaskAttemptID} read from trace
 * @return masked {@link TaskAttemptID} with empty {@link JobID}.
 */
private TaskAttemptID maskAttemptID(TaskAttemptID attemptId) {
  JobID jobId = new JobID();
  TaskType taskType = attemptId.getTaskType();
  TaskID taskId = attemptId.getTaskID();
  return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), taskType,
      taskId.getId(), attemptId.getId());
}
项目:hadoop    文件:SleepJob.java   
private TaskAttemptInfo getSuccessfulAttemptInfo(TaskType type, int task) {
  TaskAttemptInfo ret;
  for (int i = 0; true; ++i) {
    // Rumen should make up an attempt if it's missing. Or this won't work
    // at all. It's hard to discern what is happening in there.
    ret = jobdesc.getTaskAttemptInfo(type, task, i);
    if (ret.getRunState() == TaskStatus.State.SUCCEEDED) {
      break;
    }
  }
  if(ret.getRunState() != TaskStatus.State.SUCCEEDED) {
    LOG.warn("No sucessful attempts tasktype " + type +" task "+ task);
  }

  return ret;
}
项目:hadoop    文件:TestEvents.java   
/**
 * test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
 * 
 * @throws Exception
 */
@Test(timeout = 10000)
public void testTaskAttemptFinishedEvent() throws Exception {

  JobID jid = new JobID("001", 1);
  TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
  TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3);
  Counters counters = new Counters();
  TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
      TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
      counters);
  assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());

  assertEquals(test.getCounters(), counters);
  assertEquals(test.getFinishTime(), 123L);
  assertEquals(test.getHostname(), "HOSTNAME");
  assertEquals(test.getRackName(), "RAKNAME");
  assertEquals(test.getState(), "STATUS");
  assertEquals(test.getTaskId(), tid);
  assertEquals(test.getTaskStatus(), "TEST");
  assertEquals(test.getTaskType(), TaskType.REDUCE);

}
项目:hadoop    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:hadoop    文件:TestTaskCommit.java   
private Task createDummyTask(TaskType type) throws IOException, ClassNotFoundException,
InterruptedException {
  JobConf conf = new JobConf();
  conf.setOutputCommitter(CommitterThatAlwaysRequiresCommit.class);
  Path outDir = new Path(rootDir, "output"); 
  FileOutputFormat.setOutputPath(conf, outDir);
  JobID jobId = JobID.forName("job_201002121132_0001");
  Task testTask;
  if (type == TaskType.MAP) {
    testTask = new MapTask();
  } else {
    testTask = new ReduceTask();
  }
  testTask.setConf(conf);
  testTask.initialize(conf, jobId, Reporter.NULL, false);
  return testTask;
}
项目:hadoop    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:hadoop    文件:TestOldMethodsJobID.java   
/**
 * test deprecated methods of TaskCompletionEvent
 */
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testTaskCompletionEvent() {
  TaskAttemptID taid = new TaskAttemptID("001", 1, TaskType.REDUCE, 2, 3);
  TaskCompletionEvent template = new TaskCompletionEvent(12, taid, 13, true,
      Status.SUCCEEDED, "httptracker");
  TaskCompletionEvent testEl = TaskCompletionEvent.downgrade(template);
  testEl.setTaskAttemptId(taid);
  testEl.setTaskTrackerHttp("httpTracker");

  testEl.setTaskId("attempt_001_0001_m_000002_04");
  assertEquals("attempt_001_0001_m_000002_4",testEl.getTaskId());

  testEl.setTaskStatus(Status.OBSOLETE);
  assertEquals(Status.OBSOLETE.toString(), testEl.getStatus().toString());

  testEl.setTaskRunTime(20);
  assertEquals(testEl.getTaskRunTime(), 20);
  testEl.setEventId(16);
  assertEquals(testEl.getEventId(), 16);

}
项目:hadoop    文件:LocalJobRunner.java   
private org.apache.hadoop.mapreduce.OutputCommitter 
createOutputCommitter(boolean newApiCommitter, JobID jobId, Configuration conf) throws Exception {
  org.apache.hadoop.mapreduce.OutputCommitter committer = null;

  LOG.info("OutputCommitter set in config "
      + conf.get("mapred.output.committer.class"));

  if (newApiCommitter) {
    org.apache.hadoop.mapreduce.TaskID taskId =
        new org.apache.hadoop.mapreduce.TaskID(jobId, TaskType.MAP, 0);
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID =
        new org.apache.hadoop.mapreduce.TaskAttemptID(taskId, 0);
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext = 
        new TaskAttemptContextImpl(conf, taskAttemptID);
    OutputFormat outputFormat =
      ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), conf);
    committer = outputFormat.getOutputCommitter(taskContext);
  } else {
    committer = ReflectionUtils.newInstance(conf.getClass(
        "mapred.output.committer.class", FileOutputCommitter.class,
        org.apache.hadoop.mapred.OutputCommitter.class), conf);
  }
  LOG.info("OutputCommitter is " + committer.getClass().getName());
  return committer;
}
项目:hadoop    文件:Merger.java   
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
                          Class<K> keyClass, Class<V> valueClass, 
                          CompressionCodec codec,
                          Path[] inputs, boolean deleteInputs, 
                          int mergeFactor, Path tmpDir,
                          RawComparator<K> comparator, Progressable reporter,
                          Counters.Counter readsCounter,
                          Counters.Counter writesCounter,
                          Progress mergePhase)
throws IOException {
  return 
    new MergeQueue<K, V>(conf, fs, inputs, deleteInputs, codec, comparator, 
                         reporter, null,
                         TaskType.REDUCE).merge(keyClass, valueClass,
                                         mergeFactor, tmpDir,
                                         readsCounter, writesCounter, 
                                         mergePhase);
}
项目:hadoop    文件:Merger.java   
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
                          Class<K> keyClass, Class<V> valueClass, 
                          CompressionCodec codec,
                          Path[] inputs, boolean deleteInputs, 
                          int mergeFactor, Path tmpDir,
                          RawComparator<K> comparator,
                          Progressable reporter,
                          Counters.Counter readsCounter,
                          Counters.Counter writesCounter,
                          Counters.Counter mergedMapOutputsCounter,
                          Progress mergePhase)
throws IOException {
  return 
    new MergeQueue<K, V>(conf, fs, inputs, deleteInputs, codec, comparator, 
                         reporter, mergedMapOutputsCounter,
                         TaskType.REDUCE).merge(
                                         keyClass, valueClass,
                                         mergeFactor, tmpDir,
                                         readsCounter, writesCounter,
                                         mergePhase);
}
项目:hadoop    文件:Merger.java   
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
                          Class<K> keyClass, Class<V> valueClass,
                          CompressionCodec codec,
                          List<Segment<K, V>> segments,
                          int mergeFactor, Path tmpDir,
                          RawComparator<K> comparator, Progressable reporter,
                          boolean sortSegments,
                          Counters.Counter readsCounter,
                          Counters.Counter writesCounter,
                          Progress mergePhase,
                          TaskType taskType)
    throws IOException {
  return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
                         sortSegments, codec,
                         taskType).merge(keyClass, valueClass,
                                             mergeFactor, tmpDir,
                                             readsCounter, writesCounter,
                                             mergePhase);
}
项目:hadoop    文件:Merger.java   
public static <K extends Object, V extends Object>
  RawKeyValueIterator merge(Configuration conf, FileSystem fs,
                          Class<K> keyClass, Class<V> valueClass,
                          List<Segment<K, V>> segments,
                          int mergeFactor, int inMemSegments, Path tmpDir,
                          RawComparator<K> comparator, Progressable reporter,
                          boolean sortSegments,
                          Counters.Counter readsCounter,
                          Counters.Counter writesCounter,
                          Progress mergePhase)
    throws IOException {
  return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
                         sortSegments,
                         TaskType.REDUCE).merge(keyClass, valueClass,
                                             mergeFactor, inMemSegments,
                                             tmpDir,
                                             readsCounter, writesCounter,
                                             mergePhase);
}
项目:hadoop    文件:Merger.java   
public static <K extends Object, V extends Object>
  RawKeyValueIterator merge(Configuration conf, FileSystem fs,
                          Class<K> keyClass, Class<V> valueClass,
                          CompressionCodec codec,
                          List<Segment<K, V>> segments,
                          int mergeFactor, int inMemSegments, Path tmpDir,
                          RawComparator<K> comparator, Progressable reporter,
                          boolean sortSegments,
                          Counters.Counter readsCounter,
                          Counters.Counter writesCounter,
                          Progress mergePhase)
    throws IOException {
  return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
                         sortSegments, codec,
                         TaskType.REDUCE).merge(keyClass, valueClass,
                                             mergeFactor, inMemSegments,
                                             tmpDir,
                                             readsCounter, writesCounter,
                                             mergePhase);
}
项目:hadoop    文件:CLI.java   
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
      org.apache.hadoop.util.StringUtils.toUpperCase(type)));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
项目:hadoop    文件:DebugJobProducer.java   
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
项目:hadoop    文件:ReduceAttemptFinishedEvent.java   
/**
 * Create an event to record completion of a reduce attempt
 * @param id Attempt Id
 * @param taskType Type of task
 * @param taskStatus Status of the task
 * @param shuffleFinishTime Finish time of the shuffle phase
 * @param sortFinishTime Finish time of the sort phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the attempt executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the attempt executed
 * @param state State of the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory.  
 */
public ReduceAttemptFinishedEvent
  (TaskAttemptID id, TaskType taskType, String taskStatus, 
   long shuffleFinishTime, long sortFinishTime, long finishTime,
   String hostname, int port,  String rackName, String state, 
   Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.shuffleFinishTime = shuffleFinishTime;
  this.sortFinishTime = sortFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.gpuUsages = ProgressSplitsBlock.arrayGetGPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
项目:hadoop    文件:TestStreamingStatus.java   
/**
 * Run a streaming job with the given script as mapper and validate.
 * Run another streaming job with the given script as reducer and validate.
 *
 * @param isEmptyInput Should the input to the script be empty ?
 * @param script The content of the script that will run as the streaming task
 */
private void testStreamJob(boolean isEmptyInput)
    throws IOException {

    createInputAndScript(isEmptyInput, script);

    // Check if streaming mapper works as expected
    map = scriptFileName;
    reduce = "/bin/cat";
    runStreamJob(TaskType.MAP, isEmptyInput);
    deleteOutDir(fs);

    // Check if streaming reducer works as expected.
    map = "/bin/cat";
    reduce = scriptFileName;
    runStreamJob(TaskType.REDUCE, isEmptyInput);
    clean(fs);
}
项目:hadoop    文件:TaskFinishedEvent.java   
public void setDatum(Object oDatum) {
  this.datum = (TaskFinished)oDatum;
  this.taskid = TaskID.forName(datum.taskid.toString());
  if (datum.successfulAttemptId != null) {
    this.successfulAttemptId = TaskAttemptID
        .forName(datum.successfulAttemptId.toString());
  }
  this.finishTime = datum.finishTime;
  this.taskType = TaskType.valueOf(datum.taskType.toString());
  this.status = datum.status.toString();
  this.counters = EventReader.fromAvro(datum.counters);
}
项目:hadoop    文件:TestStreamingStatus.java   
void runStreamJob(TaskType type, boolean isEmptyInput) throws IOException {
  boolean mayExit = false;
  StreamJob job = new StreamJob(genArgs(
      mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS), map, reduce), mayExit);
  int returnValue = job.go();
  assertEquals(0, returnValue);

  // If input to reducer is empty, dummy reporter(which ignores all
  // reporting lines) is set for MRErrorThread in waitOutputThreads(). So
  // expectedCounterValue is 0 for empty-input-to-reducer case.
  // Output of reducer is also empty for empty-input-to-reducer case.
  int expectedCounterValue = 0;
  if (type == TaskType.MAP || !isEmptyInput) {
    validateTaskStatus(job, type);
    // output is from "print STDOUT" statements in perl script
    validateJobOutput(job.getConf());
    expectedCounterValue = 2;
  }
  validateUserCounter(job, expectedCounterValue);
  validateTaskStderr(job, type);

  deleteOutDir(fs);
}
项目:hadoop    文件:TestTaskAttemptListenerImpl.java   
private static TaskAttemptCompletionEvent createTce(int eventId,
    boolean isMap, TaskAttemptCompletionEventStatus status) {
  JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
  TaskId tid = MRBuilderUtils.newTaskId(jid, 0,
      isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP
          : org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
  TaskAttemptCompletionEvent tce = recordFactory
      .newRecordInstance(TaskAttemptCompletionEvent.class);
  tce.setEventId(eventId);
  tce.setAttemptId(attemptId);
  tce.setStatus(status);
  return tce;
}
项目:hadoop    文件:ZombieJob.java   
private LoggedTaskAttempt getLoggedTaskAttempt(TaskType taskType,
    int taskNumber, int taskAttemptNumber) {
  buildMaps();
  TaskAttemptID id =
      new TaskAttemptID(getMaskedTaskID(taskType, taskNumber),
          taskAttemptNumber);
  return loggedTaskAttemptMap.get(id);
}
项目:hadoop    文件:TestOldMethodsJobID.java   
/**
 * test deprecated methods of TaskID
 * @throws IOException
 */
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testDepricatedMethods() throws IOException {
  JobID jid = new JobID();
  TaskID test = new TaskID(jid, true, 1);
  assertEquals(test.getTaskType(), TaskType.MAP);
  test = new TaskID(jid, false, 1);
  assertEquals(test.getTaskType(), TaskType.REDUCE);

  test = new TaskID("001", 1, false, 1);
  assertEquals(test.getTaskType(), TaskType.REDUCE);
  test = new TaskID("001", 1, true, 1);
  assertEquals(test.getTaskType(), TaskType.MAP);
  ByteArrayOutputStream out = new ByteArrayOutputStream();

  test.write(new DataOutputStream(out));
  TaskID ti = TaskID.read(new DataInputStream(new ByteArrayInputStream(out
      .toByteArray())));
  assertEquals(ti.toString(), test.toString());
  assertEquals("task_001_0001_m_000002",
      TaskID.getTaskIDsPattern("001", 1, true, 2));
  assertEquals("task_003_0001_m_000004",
      TaskID.getTaskIDsPattern("003", 1, TaskType.MAP, 4));
  assertEquals("003_0001_m_000004",
      TaskID.getTaskIDsPatternWOPrefix("003", 1, TaskType.MAP, 4).toString());

}
项目:hadoop    文件:TestEvents.java   
/**
 * simple test TaskUpdatedEvent and TaskUpdated
 * 
 * @throws Exception
 */
@Test(timeout = 10000)
public void testTaskUpdated() throws Exception {
  JobID jid = new JobID("001", 1);
  TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
  TaskUpdatedEvent test = new TaskUpdatedEvent(tid, 1234L);
  assertEquals(test.getTaskId().toString(), tid.toString());
  assertEquals(test.getFinishTime(), 1234L);

}
项目:hadoop    文件:JobBuilder.java   
private ParsedTaskAttempt getOrMakeTaskAttempt(TaskType type,
    String taskIDName, String taskAttemptName) {
  ParsedTask task = getOrMakeTask(type, taskIDName, false);
  ParsedTaskAttempt result = attempts.get(taskAttemptName);

  if (result == null && task != null) {
    result = new ParsedTaskAttempt();
    result.setAttemptID(taskAttemptName);
    attempts.put(taskAttemptName, result);
    task.getAttempts().add(result);
  }

  return result;
}
项目:hadoop    文件:TestCombineFileInputFormat.java   
@Test
public void testRecordReaderInit() throws InterruptedException, IOException {
  // Test that we properly initialize the child recordreader when
  // CombineFileInputFormat and CombineFileRecordReader are used.

  TaskAttemptID taskId = new TaskAttemptID("jt", 0, TaskType.MAP, 0, 0);
  Configuration conf1 = new Configuration();
  conf1.set(DUMMY_KEY, "STATE1");
  TaskAttemptContext context1 = new TaskAttemptContextImpl(conf1, taskId);

  // This will create a CombineFileRecordReader that itself contains a
  // DummyRecordReader.
  InputFormat inputFormat = new ChildRRInputFormat();

  Path [] files = { new Path("file1") };
  long [] lengths = { 1 };

  CombineFileSplit split = new CombineFileSplit(files, lengths);

  RecordReader rr = inputFormat.createRecordReader(split, context1);
  assertTrue("Unexpected RR type!", rr instanceof CombineFileRecordReader);

  // Verify that the initial configuration is the one being used.
  // Right after construction the dummy key should have value "STATE1"
  assertEquals("Invalid initial dummy key value", "STATE1",
    rr.getCurrentKey().toString());

  // Switch the active context for the RecordReader...
  Configuration conf2 = new Configuration();
  conf2.set(DUMMY_KEY, "STATE2");
  TaskAttemptContext context2 = new TaskAttemptContextImpl(conf2, taskId);
  rr.initialize(split, context2);

  // And verify that the new context is updated into the child record reader.
  assertEquals("Invalid secondary dummy key value", "STATE2",
    rr.getCurrentKey().toString());
}
项目:hadoop    文件:TestShuffleScheduler.java   
@SuppressWarnings("rawtypes")
@Test
public void testTipFailed() throws Exception {
  JobConf job = new JobConf();
  job.setNumMapTasks(2);

  TaskStatus status = new TaskStatus() {
    @Override
    public boolean getIsMap() {
      return false;
    }

    @Override
    public void addFetchFailedMap(TaskAttemptID mapTaskId) {
    }
  };
  Progress progress = new Progress();

  TaskAttemptID reduceId = new TaskAttemptID("314159", 0, TaskType.REDUCE,
      0, 0);
  ShuffleSchedulerImpl scheduler = new ShuffleSchedulerImpl(job, status,
      reduceId, null, progress, null, null, null);

  JobID jobId = new JobID();
  TaskID taskId1 = new TaskID(jobId, TaskType.REDUCE, 1);
  scheduler.tipFailed(taskId1);

  Assert.assertEquals("Progress should be 0.5", 0.5f, progress.getProgress(),
      0.0f);
  Assert.assertFalse(scheduler.waitUntilDone(1));

  TaskID taskId0 = new TaskID(jobId, TaskType.REDUCE, 0);
  scheduler.tipFailed(taskId0);
  Assert.assertEquals("Progress should be 1.0", 1.0f, progress.getProgress(),
      0.0f);
  Assert.assertTrue(scheduler.waitUntilDone(1));
}
项目:hadoop    文件:JobClient.java   
private TaskReport[] getTaskReports(final JobID jobId, TaskType type) throws 
  IOException {
  try {
    Job j = getJobUsingCluster(jobId);
    if(j == null) {
      return EMPTY_TASK_REPORTS;
    }
    return TaskReport.downgradeArray(j.getTaskReports(type));
  } catch (InterruptedException ie) {
    throw new IOException(ie);
  }
}
项目:hadoop    文件:TestStreamingStatus.java   
void validateTaskStderr(StreamJob job, TaskType type)
    throws IOException {
  TaskAttemptID attemptId =
      new TaskAttemptID(new TaskID(job.jobId_, type, 0), 0);

  String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR,
      attemptId, false);

  // trim() is called on expectedStderr here because the method
  // MapReduceTestUtil.readTaskLog() returns trimmed String.
  assertTrue(log.equals(expectedStderr.trim()));
}
项目:hadoop    文件:TestJobInfo.java   
@Test(timeout = 5000)
public void testTaskID() throws IOException, InterruptedException {
  JobID jobid = new JobID("1014873536921", 6);
  TaskID tid = new TaskID(jobid, TaskType.MAP, 0);
  org.apache.hadoop.mapred.TaskID tid1 =
      org.apache.hadoop.mapred.TaskID.downgrade(tid);
  org.apache.hadoop.mapred.TaskReport treport =
      new org.apache.hadoop.mapred.TaskReport(tid1, 0.0f,
        State.FAILED.toString(), null, TIPStatus.FAILED, 100, 100,
        new org.apache.hadoop.mapred.Counters());
  Assert
    .assertEquals(treport.getTaskId(), "task_1014873536921_0006_m_000000");
  Assert.assertEquals(treport.getTaskID().toString(),
    "task_1014873536921_0006_m_000000");
}
项目:hadoop    文件:Merger.java   
public MergeQueue(Configuration conf, FileSystem fs, 
                  Path[] inputs, boolean deleteInputs, 
                  CompressionCodec codec, RawComparator<K> comparator,
                  Progressable reporter, 
                  Counters.Counter mergedMapOutputsCounter,
                  TaskType taskType) 
throws IOException {
  this.conf = conf;
  this.fs = fs;
  this.codec = codec;
  this.comparator = comparator;
  this.reporter = reporter;

  if (taskType == TaskType.MAP) {
    considerFinalMergeForProgress();
  }

  for (Path file : inputs) {
    LOG.debug("MergeQ: adding: " + file);
    segments.add(new Segment<K, V>(conf, fs, file, codec, !deleteInputs, 
                                   (file.toString().endsWith(
                                       Task.MERGED_OUTPUT_PREFIX) ? 
                                    null : mergedMapOutputsCounter)));
  }

  // Sort segments on file-lengths
  Collections.sort(segments, segmentComparator); 
}
项目:hadoop    文件:Merger.java   
public MergeQueue(Configuration conf, FileSystem fs, 
    List<Segment<K, V>> segments, RawComparator<K> comparator,
    Progressable reporter, boolean sortSegments, TaskType taskType) {
  this.conf = conf;
  this.fs = fs;
  this.comparator = comparator;
  this.segments = segments;
  this.reporter = reporter;
  if (taskType == TaskType.MAP) {
    considerFinalMergeForProgress();
  }
  if (sortSegments) {
    Collections.sort(segments, segmentComparator);
  }
}
项目:hadoop    文件:Merger.java   
public MergeQueue(Configuration conf, FileSystem fs,
    List<Segment<K, V>> segments, RawComparator<K> comparator,
    Progressable reporter, boolean sortSegments, CompressionCodec codec,
    TaskType taskType) {
  this(conf, fs, segments, comparator, reporter, sortSegments,
      taskType);
  this.codec = codec;
}
项目:hadoop    文件:TaskID.java   
@Deprecated
static StringBuilder getTaskIDsPatternWOPrefix(String jtIdentifier
    , Integer jobId, TaskType type, Integer taskId) {
  StringBuilder builder = new StringBuilder();
  builder.append(JobID.getJobIDsPatternWOPrefix(jtIdentifier, jobId))
    .append(SEPARATOR)
    .append(type != null ? 
        (org.apache.hadoop.mapreduce.TaskID.getRepresentingCharacter(type)) : 
          org.apache.hadoop.mapreduce.TaskID.getAllTaskTypes()).
          append(SEPARATOR)
    .append(taskId != null ? idFormat.format(taskId) : "[0-9]*");
  return builder;
}
项目:hadoop    文件:TaskAttemptFinishedEvent.java   
/**
 * Create an event to record successful finishes for setup and cleanup 
 * attempts
 * @param id Attempt ID
 * @param taskType Type of task
 * @param taskStatus Status of task
 * @param finishTime Finish time of attempt
 * @param hostname Host where the attempt executed
 * @param state State string
 * @param counters Counters for the attempt
 */
public TaskAttemptFinishedEvent(TaskAttemptID id, 
    TaskType taskType, String taskStatus, 
    long finishTime, String rackName,
    String hostname, String state, Counters counters) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.finishTime = finishTime;
  this.rackName = rackName;
  this.hostname = hostname;
  this.state = state;
  this.counters = counters;
}
项目:hadoop    文件:TaskAttemptFinishedEvent.java   
/** Get the event type */
public EventType getEventType() {
  // Note that the task type can be setup/map/reduce/cleanup but the 
  // attempt-type can only be map/reduce.
  return getTaskId().getTaskType() == TaskType.MAP 
         ? EventType.MAP_ATTEMPT_FINISHED
         : EventType.REDUCE_ATTEMPT_FINISHED;
}