Java 类org.apache.hadoop.mapreduce.v2.api.records.Phase 实例源码

项目:hadoop    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:hadoop    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:aliyun-oss-hadoop-fs    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:aliyun-oss-hadoop-fs    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  default:
    break;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:aliyun-oss-hadoop-fs    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:big-c    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:big-c    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:big-c    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:hadoop-plus    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop-plus    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:hadoop-plus    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:FlexMap    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:FlexMap    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:FlexMap    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:hops    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hops    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  default:
    break;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:hops    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:hadoop-TCP    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop-TCP    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:hadoop-TCP    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:hardfs    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hardfs    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:hardfs    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:hadoop-on-lustre2    文件:TestFetchFailure.java   
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
项目:hadoop-on-lustre2    文件:TypeConverter.java   
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
  switch (phase) {
  case STARTING:
    return Phase.STARTING;
  case MAP:
    return Phase.MAP;
  case SHUFFLE:
    return Phase.SHUFFLE;
  case SORT:
    return Phase.SORT;
  case REDUCE:
    return Phase.REDUCE;
  case CLEANUP:
    return Phase.CLEANUP;
  }
  throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
项目:hadoop-on-lustre2    文件:TestCompletedTask.java   
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){

  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);

  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);


  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
项目:hadoop    文件:TaskAttemptImpl.java   
@Override
public Phase getPhase() {
  readLock.lock();
  try {
    return reportedStatus.phase;
  } finally {
    readLock.unlock();
  }
}
项目:hadoop    文件:TaskAttemptImpl.java   
private void initTaskAttemptStatus(TaskAttemptStatus result) {
  result.progress = 0.0f;
  result.phase = Phase.STARTING;
  result.stateString = "NEW";
  result.taskState = TaskAttemptState.NEW;
  Counters counters = EMPTY_COUNTERS;
  result.counters = counters;
}
项目:hadoop    文件:JobImpl.java   
@Override
public void transition(JobImpl job, JobEvent event) {
  //get number of shuffling reduces
  int shufflingReduceTasks = 0;
  for (TaskId taskId : job.reduceTasks) {
    Task task = job.tasks.get(taskId);
    if (TaskState.RUNNING.equals(task.getState())) {
      for(TaskAttempt attempt : task.getAttempts().values()) {
        if(attempt.getPhase() == Phase.SHUFFLE) {
          shufflingReduceTasks++;
          break;
        }
      }
    }
  }

  JobTaskAttemptFetchFailureEvent fetchfailureEvent = 
    (JobTaskAttemptFetchFailureEvent) event;
  for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId mapId : 
        fetchfailureEvent.getMaps()) {
    Integer fetchFailures = job.fetchFailuresMapping.get(mapId);
    fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1);
    job.fetchFailuresMapping.put(mapId, fetchFailures);

    float failureRate = shufflingReduceTasks == 0 ? 1.0f : 
      (float) fetchFailures / shufflingReduceTasks;
    // declare faulty if fetch-failures >= max-allowed-failures
    if (fetchFailures >= job.getMaxFetchFailuresNotifications()
        && failureRate >= job.getMaxAllowedFetchFailuresFraction()) {
      LOG.info("Too many fetch-failures for output of task attempt: " + 
          mapId + " ... raising fetch failure to map");
      job.eventHandler.handle(new TaskAttemptEvent(mapId, 
          TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
      job.fetchFailuresMapping.remove(mapId);
    }
  }
}
项目:hadoop    文件:TaskAttemptReportPBImpl.java   
@Override
public Phase getPhase() {
  TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
  if (!p.hasPhase()) {
    return null;
  }
  return convertFromProtoFormat(p.getPhase());
}
项目:hadoop    文件:TaskAttemptReportPBImpl.java   
@Override
public void setPhase(Phase phase) {
  maybeInitBuilder();
  if (phase == null) {
    builder.clearPhase();
    return;
  }
  builder.setPhase(convertToProtoFormat(phase));
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
@Override
public Phase getPhase() {
  readLock.lock();
  try {
    return reportedStatus.phase;
  } finally {
    readLock.unlock();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptImpl.java   
private void initTaskAttemptStatus(TaskAttemptStatus result) {
  result.progress = 0.0f;
  result.phase = Phase.STARTING;
  result.stateString = "NEW";
  result.taskState = TaskAttemptState.NEW;
  Counters counters = EMPTY_COUNTERS;
  result.counters = counters;
}
项目:aliyun-oss-hadoop-fs    文件:JobImpl.java   
@Override
public void transition(JobImpl job, JobEvent event) {
  //get number of shuffling reduces
  int shufflingReduceTasks = 0;
  for (TaskId taskId : job.reduceTasks) {
    Task task = job.tasks.get(taskId);
    if (TaskState.RUNNING.equals(task.getState())) {
      for(TaskAttempt attempt : task.getAttempts().values()) {
        if(attempt.getPhase() == Phase.SHUFFLE) {
          shufflingReduceTasks++;
          break;
        }
      }
    }
  }

  JobTaskAttemptFetchFailureEvent fetchfailureEvent = 
    (JobTaskAttemptFetchFailureEvent) event;
  for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId mapId : 
        fetchfailureEvent.getMaps()) {
    Integer fetchFailures = job.fetchFailuresMapping.get(mapId);
    fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1);
    job.fetchFailuresMapping.put(mapId, fetchFailures);

    float failureRate = shufflingReduceTasks == 0 ? 1.0f : 
      (float) fetchFailures / shufflingReduceTasks;
    // declare faulty if fetch-failures >= max-allowed-failures
    if (fetchFailures >= job.getMaxFetchFailuresNotifications()
        && failureRate >= job.getMaxAllowedFetchFailuresFraction()) {
      LOG.info("Too many fetch-failures for output of task attempt: " + 
          mapId + " ... raising fetch failure to map");
      job.eventHandler.handle(new TaskAttemptTooManyFetchFailureEvent(mapId,
          fetchfailureEvent.getReduce(), fetchfailureEvent.getHost()));
      job.fetchFailuresMapping.remove(mapId);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptReportPBImpl.java   
@Override
public Phase getPhase() {
  TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
  if (!p.hasPhase()) {
    return null;
  }
  return convertFromProtoFormat(p.getPhase());
}
项目:aliyun-oss-hadoop-fs    文件:TaskAttemptReportPBImpl.java   
@Override
public void setPhase(Phase phase) {
  maybeInitBuilder();
  if (phase == null) {
    builder.clearPhase();
    return;
  }
  builder.setPhase(convertToProtoFormat(phase));
}