Java 类org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse 实例源码

项目:hadoop    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:hadoop    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:aliyun-oss-hadoop-fs    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:aliyun-oss-hadoop-fs    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:big-c    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:big-c    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:hadoop-plus    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, false);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:hadoop-plus    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:FlexMap    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:FlexMap    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:hops    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:hops    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:hadoop-TCP    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:hadoop-TCP    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:hardfs    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:hardfs    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:hadoop-on-lustre2    文件:MRClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(
    GetTaskReportsRequest request) throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = 
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);

  Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB);
  Collection<Task> tasks = job.getTasks(taskType).values();
  LOG.info("Getting task report for " + taskType + "   " + jobId
      + ". Report-size will be " + tasks.size());

  // Take lock to allow only one call, otherwise heap will blow up because
  // of counters in the report when there are multiple callers.
  synchronized (getTaskReportsLock) {
    for (Task task : tasks) {
      response.addTaskReport(task.getReport());
    }
  }

  return response;
}
项目:hadoop-on-lustre2    文件:ClientServiceDelegate.java   
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
项目:hadoop    文件:NotRunningJob.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsResponse resp =
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  resp.addAllTaskReports(new ArrayList<TaskReport>());
  return resp;
}
项目:hadoop    文件:MRClientProtocolPBServiceImpl.java   
@Override
public GetTaskReportsResponseProto getTaskReports(RpcController controller,
    GetTaskReportsRequestProto proto) throws ServiceException {
  GetTaskReportsRequest request = new GetTaskReportsRequestPBImpl(proto);
  try {
    GetTaskReportsResponse response = real.getTaskReports(request);
    return ((GetTaskReportsResponsePBImpl)response).getProto();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop    文件:MRClientProtocolPBClientImpl.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsRequestProto requestProto = ((GetTaskReportsRequestPBImpl)request).getProto();
  try {
    return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto));
  } catch (ServiceException e) {
    throw unwrapAndThrowException(e);
  }
}
项目:hadoop    文件:HistoryClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  Job job = verifyAndGetJob(jobId, true);
  Collection<Task> tasks = job.getTasks(taskType).values();
  for (Task task : tasks) {
    response.addTaskReport(task.getReport());
  }
  return response;
}
项目:aliyun-oss-hadoop-fs    文件:NotRunningJob.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsResponse resp =
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  resp.addAllTaskReports(new ArrayList<TaskReport>());
  return resp;
}
项目:aliyun-oss-hadoop-fs    文件:MRClientProtocolPBServiceImpl.java   
@Override
public GetTaskReportsResponseProto getTaskReports(RpcController controller,
    GetTaskReportsRequestProto proto) throws ServiceException {
  GetTaskReportsRequest request = new GetTaskReportsRequestPBImpl(proto);
  try {
    GetTaskReportsResponse response = real.getTaskReports(request);
    return ((GetTaskReportsResponsePBImpl)response).getProto();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:aliyun-oss-hadoop-fs    文件:MRClientProtocolPBClientImpl.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsRequestProto requestProto = ((GetTaskReportsRequestPBImpl)request).getProto();
  try {
    return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto));
  } catch (ServiceException e) {
    throw unwrapAndThrowException(e);
  }
}
项目:aliyun-oss-hadoop-fs    文件:HistoryClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  Job job = verifyAndGetJob(jobId, true);
  Collection<Task> tasks = job.getTasks(taskType).values();
  for (Task task : tasks) {
    response.addTaskReport(task.getReport());
  }
  return response;
}
项目:big-c    文件:NotRunningJob.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsResponse resp =
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  resp.addAllTaskReports(new ArrayList<TaskReport>());
  return resp;
}
项目:big-c    文件:MRClientProtocolPBServiceImpl.java   
@Override
public GetTaskReportsResponseProto getTaskReports(RpcController controller,
    GetTaskReportsRequestProto proto) throws ServiceException {
  GetTaskReportsRequest request = new GetTaskReportsRequestPBImpl(proto);
  try {
    GetTaskReportsResponse response = real.getTaskReports(request);
    return ((GetTaskReportsResponsePBImpl)response).getProto();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:big-c    文件:MRClientProtocolPBClientImpl.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsRequestProto requestProto = ((GetTaskReportsRequestPBImpl)request).getProto();
  try {
    return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto));
  } catch (ServiceException e) {
    throw unwrapAndThrowException(e);
  }
}
项目:big-c    文件:HistoryClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  Job job = verifyAndGetJob(jobId, true);
  Collection<Task> tasks = job.getTasks(taskType).values();
  for (Task task : tasks) {
    response.addTaskReport(task.getReport());
  }
  return response;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NotRunningJob.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsResponse resp =
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  resp.addAllTaskReports(new ArrayList<TaskReport>());
  return resp;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRClientProtocolPBServiceImpl.java   
@Override
public GetTaskReportsResponseProto getTaskReports(RpcController controller,
    GetTaskReportsRequestProto proto) throws ServiceException {
  GetTaskReportsRequest request = new GetTaskReportsRequestPBImpl(proto);
  try {
    GetTaskReportsResponse response = real.getTaskReports(request);
    return ((GetTaskReportsResponsePBImpl)response).getProto();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRClientProtocolPBClientImpl.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsRequestProto requestProto = ((GetTaskReportsRequestPBImpl)request).getProto();
  try {
    return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto));
  } catch (ServiceException e) {
    throw unwrapAndThrowException(e);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HistoryClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  Job job = verifyAndGetJob(jobId);
  Collection<Task> tasks = job.getTasks(taskType).values();
  for (Task task : tasks) {
    response.addTaskReport(task.getReport());
  }
  return response;
}
项目:hadoop-plus    文件:NotRunningJob.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsResponse resp =
    recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  resp.addAllTaskReports(new ArrayList<TaskReport>());
  return resp;
}
项目:hadoop-plus    文件:MRClientProtocolPBServiceImpl.java   
@Override
public GetTaskReportsResponseProto getTaskReports(RpcController controller,
    GetTaskReportsRequestProto proto) throws ServiceException {
  GetTaskReportsRequest request = new GetTaskReportsRequestPBImpl(proto);
  try {
    GetTaskReportsResponse response = real.getTaskReports(request);
    return ((GetTaskReportsResponsePBImpl)response).getProto();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
项目:hadoop-plus    文件:MRClientProtocolPBClientImpl.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  GetTaskReportsRequestProto requestProto = ((GetTaskReportsRequestPBImpl)request).getProto();
  try {
    return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto));
  } catch (ServiceException e) {
    throw unwrapAndThrowException(e);
  }
}
项目:hadoop-plus    文件:HistoryClientService.java   
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
    throws IOException {
  JobId jobId = request.getJobId();
  TaskType taskType = request.getTaskType();

  GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class);
  Job job = verifyAndGetJob(jobId);
  Collection<Task> tasks = job.getTasks(taskType).values();
  for (Task task : tasks) {
    response.addTaskReport(task.getReport());
  }
  return response;
}