Java 类org.apache.hadoop.mapreduce.v2.LogParams 实例源码

项目:incubator-tez    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  try {
    return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
  } catch (YarnException e) {
    throw new IOException(e);
  }
}
项目:tez    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  try {
    return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
  } catch (YarnException e) {
    throw new IOException(e);
  }
}
项目:big_data    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException {
    return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:hadoop    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:hadoop    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:hadoop    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:aliyun-oss-hadoop-fs    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:aliyun-oss-hadoop-fs    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:aliyun-oss-hadoop-fs    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:big-c    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:big-c    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:big-c    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:hadoop-plus    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:hadoop-plus    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:hadoop-plus    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:FlexMap    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:FlexMap    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:FlexMap    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:ignite    文件:HadoopClientProtocol.java   
/** {@inheritDoc} */
@Override public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException,
    InterruptedException {
    return null;
}
项目:hServer    文件:HServerClientProtocol.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException, InterruptedException {
    return backupRunner.getLogFileParams(jobID, taskAttemptID);
}
项目:hServer    文件:HServerClientProtocol.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException, InterruptedException {
    return backupRunner.getLogFileParams(jobID, taskAttemptID);
}
项目:hServer    文件:HServerClientProtocol.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException, InterruptedException {
    return backupRunner.getLogFileParams(jobID, taskAttemptID);
}
项目:hServer    文件:HServerClientProtocol.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException, InterruptedException {
    return backupRunner.getLogFileParams(jobID, taskAttemptID);
}
项目:hServer    文件:HServerClientProtocol.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID) throws IOException, InterruptedException {
    return backupRunner.getLogFileParams(jobID, taskAttemptID);
}
项目:hops    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:hops    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:hops    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:hadoop-TCP    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:hadoop-TCP    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:hadoop-TCP    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:hardfs    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:hardfs    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:hardfs    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:hadoop-on-lustre2    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
    throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
      TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);

  JobReport report =
      ((GetJobReportResponse) invoke("getJobReport",
          GetJobReportRequest.class, request)).getJobReport();
  if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
      JobState.ERROR).contains(report.getJobState())) {
    if (oldTaskAttemptID != null) {
      GetTaskAttemptReportRequest taRequest =
          recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
      taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
      TaskAttemptReport taReport =
          ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
              GetTaskAttemptReportRequest.class, taRequest))
              .getTaskAttemptReport();
      if (taReport.getContainerId() == null
          || taReport.getNodeManagerHost() == null) {
        throw new IOException("Unable to get log information for task: "
            + oldTaskAttemptID);
      }
      return new LogParams(
          taReport.getContainerId().toString(),
          taReport.getContainerId().getApplicationAttemptId()
              .getApplicationId().toString(),
          NodeId.newInstance(taReport.getNodeManagerHost(),
              taReport.getNodeManagerPort()).toString(), report.getUser());
    } else {
      if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
        throw new IOException("Unable to get log information for job: "
            + oldJobID);
      }
      AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
      return new LogParams(
          amInfo.getContainerId().toString(),
          amInfo.getAppAttemptId().getApplicationId().toString(),
          NodeId.newInstance(amInfo.getNodeManagerHost(),
              amInfo.getNodeManagerPort()).toString(), report.getUser());
    }
  } else {
    throw new IOException("Cannot get log path for a in-progress job");
  }
}
项目:hadoop-on-lustre2    文件:YARNRunner.java   
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
    throws IOException {
  return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
项目:hadoop-on-lustre2    文件:LocalJobRunner.java   
@Override
public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
    org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException("Not supported");
}
项目:incubator-tez    文件:ClientServiceDelegate.java   
public LogParams getLogFilePath(JobID oldJobID,
    TaskAttemptID oldTaskAttemptID)
    throws YarnException, IOException {
  // FIXME logs for an attempt?
  throw new UnsupportedOperationException();
}