Java 类org.apache.hadoop.mapred.TaskLog 实例源码

项目:hadoop    文件:MRAppMaster.java   
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
    ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
    Clock clock, long appSubmitTime) {
  super(MRAppMaster.class.getName());
  this.clock = clock;
  this.startTime = clock.getTime();
  this.appSubmitTime = appSubmitTime;
  this.appAttemptID = applicationAttemptId;
  this.containerID = containerId;
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;
  this.metrics = MRAppMetrics.create();
  logSyncer = TaskLog.createLogSyncer();
  LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
项目:aliyun-oss-hadoop-fs    文件:MRAppMaster.java   
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
    ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
    Clock clock, long appSubmitTime) {
  super(MRAppMaster.class.getName());
  this.clock = clock;
  this.startTime = clock.getTime();
  this.appSubmitTime = appSubmitTime;
  this.appAttemptID = applicationAttemptId;
  this.containerID = containerId;
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;
  this.metrics = MRAppMetrics.create();
  logSyncer = TaskLog.createLogSyncer();
  LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
项目:big-c    文件:MRAppMaster.java   
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
    ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
    Clock clock, long appSubmitTime) {
  super(MRAppMaster.class.getName());
  this.clock = clock;
  this.startTime = clock.getTime();
  this.appSubmitTime = appSubmitTime;
  this.appAttemptID = applicationAttemptId;
  this.containerID = containerId;
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;
  this.metrics = MRAppMetrics.create();
  logSyncer = TaskLog.createLogSyncer();
  LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRAppMaster.java   
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
    ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
    Clock clock, long appSubmitTime) {
  super(MRAppMaster.class.getName());
  this.clock = clock;
  this.startTime = clock.getTime();
  this.appSubmitTime = appSubmitTime;
  this.appAttemptID = applicationAttemptId;
  this.containerID = containerId;
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;
  this.metrics = MRAppMetrics.create();
  logSyncer = TaskLog.createLogSyncer();
  LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MRAsyncDiskService.java   
/**
 * Move all files/directories inside volume into TOBEDELETED, and then
 * delete them.  The TOBEDELETED directory itself is ignored.
 */
public void cleanupAllVolumes() throws IOException {
  for (int v = 0; v < volumes.length; v++) {
    // List all files inside the volumes
    FileStatus[] files = null;
    try {
      files = localFileSystem.listStatus(new Path(volumes[v]),
          TaskLog.USERLOGS_PATH_FILTER);
    } catch (Exception e) {
      // Ignore exceptions in listStatus
      // We tolerate missing volumes.
    }
    if (files != null) {
      for (int f = 0; f < files.length; f++) {
        // Get the file name - the last component of the Path
        String entryName = files[f].getPath().getName();
        // Do not delete the current TOBEDELETED
        if (!TOBEDELETED.equals(entryName)) {
          moveAndDeleteRelativePath(volumes[v], entryName);
        }
      }
    }
  }
}
项目:hadoop-EAR    文件:TaskLogsMonitor.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void revertIndexFileInfo(PerJVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.allAttempts) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:FlexMap    文件:MRAppMaster.java   
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
    ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
    Clock clock, long appSubmitTime) {
  super(MRAppMaster.class.getName());
  this.clock = clock;
  this.startTime = clock.getTime();
  this.appSubmitTime = appSubmitTime;
  this.appAttemptID = applicationAttemptId;
  this.containerID = containerId;
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;
  this.metrics = MRAppMetrics.create();
  logSyncer = TaskLog.createLogSyncer();
  LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
项目:hops    文件:MRAppMaster.java   
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
    ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
    Clock clock, long appSubmitTime) {
  super(MRAppMaster.class.getName());
  this.clock = clock;
  this.startTime = clock.getTime();
  this.appSubmitTime = appSubmitTime;
  this.appAttemptID = applicationAttemptId;
  this.containerID = containerId;
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;
  this.metrics = MRAppMetrics.create();
  logSyncer = TaskLog.createLogSyncer();
  LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
项目:hadoop-on-lustre    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hadoop-on-lustre2    文件:MRAppMaster.java   
public MRAppMaster(ApplicationAttemptId applicationAttemptId,
    ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
    Clock clock, long appSubmitTime, int maxAppAttempts) {
  super(MRAppMaster.class.getName());
  this.clock = clock;
  this.startTime = clock.getTime();
  this.appSubmitTime = appSubmitTime;
  this.appAttemptID = applicationAttemptId;
  this.containerID = containerId;
  this.nmHost = nmHost;
  this.nmPort = nmPort;
  this.nmHttpPort = nmHttpPort;
  this.metrics = MRAppMetrics.create();
  this.maxAppAttempts = maxAppAttempts;
  logSyncer = TaskLog.createLogSyncer();
  LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
项目:RDFS    文件:TaskLogsMonitor.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void revertIndexFileInfo(PerJVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.allAttempts) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:MRAsyncDiskService.java   
/**
 * Move all files/directories inside volume into TOBEDELETED, and then
 * delete them.  The TOBEDELETED directory itself is ignored.
 */
public void cleanupAllVolumes() throws IOException {
  for (int v = 0; v < volumes.length; v++) {
    // List all files inside the volumes
    FileStatus[] files = null;
    try {
      files = localFileSystem.listStatus(new Path(volumes[v]),
          TaskLog.USERLOGS_PATH_FILTER);
    } catch (Exception e) {
      // Ignore exceptions in listStatus
      // We tolerate missing volumes.
    }
    if (files != null) {
      for (int f = 0; f < files.length; f++) {
        // Get the file name - the last component of the Path
        String entryName = files[f].getPath().getName();
        // Do not delete the current TOBEDELETED
        if (!TOBEDELETED.equals(entryName)) {
          moveAndDeleteRelativePath(volumes[v], entryName);
        }
      }
    }
  }
}
项目:mammoth    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:angel    文件:WorkerJVM.java   
/**
 * Create worker attempt jvm command
 * @param conf application configuration
 * @param appid application id
 * @param workerAttemptId worker attempt id
 * @return
 */
public static List<String> getVMCommand(Configuration conf, ApplicationId appid, WorkerAttemptId workerAttemptId) {
  Vector<String> vargs = new Vector<String>(8);

  vargs.add(Environment.JAVA_HOME.$() + "/bin/java");

  String javaOpts = getChildJavaOpts(conf, appid, workerAttemptId);
  LOG.debug("javaOpts=" + javaOpts);

  String[] javaOptsSplit = javaOpts.split(" ");
  for (int i = 0; i < javaOptsSplit.length; i++) {
    vargs.add(javaOptsSplit[i]);
  }

  Path childTmpDir = new Path(Environment.PWD.$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
  vargs.add("-Djava.io.tmpdir=" + childTmpDir);

  // Setup the log4j prop
  long logSize = 0;
  setupLog4jProperties(conf, vargs, logSize);

  // Add main class and its arguments
  String workerClassName =
      conf.get(AngelConf.ANGEL_WORKER_CLASS,
          AngelConf.DEFAULT_ANGEL_WORKER_CLASS);
  vargs.add(workerClassName);

  vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
  vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));

  // Final commmand
  StringBuilder mergedCommand = new StringBuilder();
  for (CharSequence str : vargs) {
    mergedCommand.append(str).append(" ");
  }
  Vector<String> vargsFinal = new Vector<String>(1);
  vargsFinal.add(mergedCommand.toString());
  return vargsFinal;
}
项目:hadoop    文件:TestPipeApplication.java   
/**
 * clean previous std error and outs
 */

private void initStdOut(JobConf configuration) {
  TaskAttemptID taskId = TaskAttemptID.forName(configuration
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
  File stdErr = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDERR);
  // prepare folder
  if (!stdOut.getParentFile().exists()) {
    stdOut.getParentFile().mkdirs();
  } else { // clean logs
    stdOut.deleteOnExit();
    stdErr.deleteOnExit();
  }
}
项目:hadoop    文件:TestPipeApplication.java   
private String readStdOut(JobConf conf) throws Exception {
  TaskAttemptID taskId = TaskAttemptID.forName(conf
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);

  return readFile(stdOut);

}
项目:hadoop    文件:MapReduceTestUtil.java   
/**
 * Reads tasklog and returns it as string after trimming it.
 * 
 * @param filter
 *          Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
 * @param taskId
 *          The task id for which the log has to collected
 * @param isCleanup
 *          whether the task is a cleanup attempt or not.
 * @return task log as string
 * @throws IOException
 */
public static String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:hadoop    文件:TestStreamingStatus.java   
void validateTaskStderr(StreamJob job, TaskType type)
    throws IOException {
  TaskAttemptID attemptId =
      new TaskAttemptID(new TaskID(job.jobId_, type, 0), 0);

  String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR,
      attemptId, false);

  // trim() is called on expectedStderr here because the method
  // MapReduceTestUtil.readTaskLog() returns trimmed String.
  assertTrue(log.equals(expectedStderr.trim()));
}
项目:aliyun-oss-hadoop-fs    文件:TestPipeApplication.java   
/**
 * clean previous std error and outs
 */

private void initStdOut(JobConf configuration) {
  TaskAttemptID taskId = TaskAttemptID.forName(configuration
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
  File stdErr = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDERR);
  // prepare folder
  if (!stdOut.getParentFile().exists()) {
    stdOut.getParentFile().mkdirs();
  } else { // clean logs
    stdOut.deleteOnExit();
    stdErr.deleteOnExit();
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestPipeApplication.java   
private String readStdOut(JobConf conf) throws Exception {
  TaskAttemptID taskId = TaskAttemptID.forName(conf
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);

  return readFile(stdOut);

}
项目:aliyun-oss-hadoop-fs    文件:MapReduceTestUtil.java   
/**
 * Reads tasklog and returns it as string after trimming it.
 * 
 * @param filter
 *          Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
 * @param taskId
 *          The task id for which the log has to collected
 * @param isCleanup
 *          whether the task is a cleanup attempt or not.
 * @return task log as string
 * @throws IOException
 */
public static String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:aliyun-oss-hadoop-fs    文件:TestStreamingStatus.java   
void validateTaskStderr(StreamJob job, TaskType type)
    throws IOException {
  TaskAttemptID attemptId =
      new TaskAttemptID(new TaskID(job.jobId_, type, 0), 0);

  String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR,
      attemptId, false);

  // trim() is called on expectedStderr here because the method
  // MapReduceTestUtil.readTaskLog() returns trimmed String.
  assertTrue(log.equals(expectedStderr.trim()));
}
项目:big-c    文件:TestPipeApplication.java   
/**
 * clean previous std error and outs
 */

private void initStdOut(JobConf configuration) {
  TaskAttemptID taskId = TaskAttemptID.forName(configuration
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
  File stdErr = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDERR);
  // prepare folder
  if (!stdOut.getParentFile().exists()) {
    stdOut.getParentFile().mkdirs();
  } else { // clean logs
    stdOut.deleteOnExit();
    stdErr.deleteOnExit();
  }
}
项目:big-c    文件:TestPipeApplication.java   
private String readStdOut(JobConf conf) throws Exception {
  TaskAttemptID taskId = TaskAttemptID.forName(conf
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);

  return readFile(stdOut);

}
项目:big-c    文件:MapReduceTestUtil.java   
/**
 * Reads tasklog and returns it as string after trimming it.
 * 
 * @param filter
 *          Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
 * @param taskId
 *          The task id for which the log has to collected
 * @param isCleanup
 *          whether the task is a cleanup attempt or not.
 * @return task log as string
 * @throws IOException
 */
public static String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:big-c    文件:TestStreamingStatus.java   
void validateTaskStderr(StreamJob job, TaskType type)
    throws IOException {
  TaskAttemptID attemptId =
      new TaskAttemptID(new TaskID(job.jobId_, type, 0), 0);

  String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR,
      attemptId, false);

  // trim() is called on expectedStderr here because the method
  // MapReduceTestUtil.readTaskLog() returns trimmed String.
  assertTrue(log.equals(expectedStderr.trim()));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestPipeApplication.java   
/**
 * clean previous std error and outs
 */

private void initStdOut(JobConf configuration) {
  TaskAttemptID taskId = TaskAttemptID.forName(configuration
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);
  File stdErr = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDERR);
  // prepare folder
  if (!stdOut.getParentFile().exists()) {
    stdOut.getParentFile().mkdirs();
  } else { // clean logs
    stdOut.deleteOnExit();
    stdErr.deleteOnExit();
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestPipeApplication.java   
private String readStdOut(JobConf conf) throws Exception {
  TaskAttemptID taskId = TaskAttemptID.forName(conf
          .get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdOut = TaskLog.getTaskLogFile(taskId, false, TaskLog.LogName.STDOUT);

  return readFile(stdOut);

}
项目:hadoop-2.6.0-cdh5.4.3    文件:MapReduceTestUtil.java   
/**
 * Reads tasklog and returns it as string after trimming it.
 * 
 * @param filter
 *          Task log filter; can be STDOUT, STDERR, SYSLOG, DEBUGOUT, PROFILE
 * @param taskId
 *          The task id for which the log has to collected
 * @param isCleanup
 *          whether the task is a cleanup attempt or not.
 * @return task log as string
 * @throws IOException
 */
public static String readTaskLog(TaskLog.LogName filter,
    org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
    throws IOException {
  // string buffer to store task log
  StringBuffer result = new StringBuffer();
  int res;

  // reads the whole tasklog into inputstream
  InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
      isCleanup);
  // construct string log from inputstream.
  byte[] b = new byte[65536];
  while (true) {
    res = taskLogReader.read(b);
    if (res > 0) {
      result.append(new String(b));
    } else {
      break;
    }
  }
  taskLogReader.close();

  // trim the string and return it
  String str = result.toString();
  str = str.trim();
  return str;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestStreamingStatus.java   
void validateTaskStderr(StreamJob job, TaskType type)
    throws IOException {
  TaskAttemptID attemptId =
      new TaskAttemptID(new TaskID(job.jobId_, type, 0), 0);

  String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR,
      attemptId, false);

  // trim() is called on expectedStderr here because the method
  // MapReduceTestUtil.readTaskLog() returns trimmed String.
  assertTrue(log.equals(expectedStderr.trim()));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * Get the logFileDetails of all the list of attempts passed.
 * @param allAttempts the attempts we are interested in
 * 
 * @return a map of task to the log-file detail
 * @throws IOException
 */
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
    final List<Task> allAttempts) throws IOException {
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
      new HashMap<Task, Map<LogName, LogFileDetail>>();
  for (Task task : allAttempts) {
    Map<LogName, LogFileDetail> allLogsFileDetails;
    allLogsFileDetails =
        TaskLog.getAllLogsFileDetails(task.getTaskID(), task.isTaskCleanupTask());
    taskLogFileDetails.put(task, allLogsFileDetails);
  }
  return taskLogFileDetails;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * Truncation of logs is done. Now sync the index files to reflect the
 * truncated sizes.
 * 
 * @param firstAttempt
 * @param updatedTaskLogFileDetails
 */
private void updateIndicesAfterLogTruncation(String location,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) {
  for (Entry<Task, Map<LogName, LogFileDetail>> entry : 
                              updatedTaskLogFileDetails.entrySet()) {
    Task task = entry.getKey();
    Map<LogName, LogFileDetail> logFileDetails = entry.getValue();
    Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>();
    // set current and previous lengths
    for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) {
      logLengths.put(logName, new Long[] { Long.valueOf(0L),
          Long.valueOf(0L) });
      LogFileDetail lfd = logFileDetails.get(logName);
      if (lfd != null) {
        // Set previous lengths
        logLengths.get(logName)[0] = Long.valueOf(lfd.start);
        // Set current lengths
        logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length);
      }
    }
    try {
      TaskLog.writeToIndexFile(location, task.getTaskID(),
          task.isTaskCleanupTask(), logLengths);
    } catch (IOException ioe) {
      LOG.warn("Exception encountered while updating index file of task "
          + task.getTaskID()
          + ". Ignoring and continuing with other tasks.", ioe);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
public static void main(String args[]) throws IOException {
  isTruncaterJvm = true;

  String taskRanFile = args[0];
  Configuration conf = new Configuration();

  //read the Task objects from the file
  LocalFileSystem lfs = FileSystem.getLocal(conf);
  FSDataInputStream din = lfs.open(new Path(taskRanFile));

  int numTasksRan = din.readInt();
  List<Task> taskAttemptsRan = new ArrayList<Task>();
  for (int i = 0; i < numTasksRan; i++) {
    Task t;
    if (din.readBoolean()) {
      t = new MapTask(); 
    } else {
      t = new ReduceTask();
    }
    t.readFields(din);
    taskAttemptsRan.add(t);
  }
  Task firstTask = taskAttemptsRan.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                    taskAttemptsRan));
  System.exit(0);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Localizer.java   
/**
 * Create job log directory and set appropriate permissions for the directory.
 * 
 * @param jobId
 */
public void initializeJobLogDir(JobID jobId) throws IOException {
  Path jobUserLogDir = new Path(TaskLog.getJobDir(jobId).getCanonicalPath());
  if (!fs.mkdirs(jobUserLogDir)) {
    throw new IOException("Could not create job user log directory: " +
                          jobUserLogDir);
  }
  fs.setPermission(jobUserLogDir, new FsPermission((short)0700));
}
项目:hadoop-EAR    文件:TaskLogsMonitor.java   
/**
 * Get the logFileDetails of all the list of attempts passed.
 * 
 * @param lInfo
 * @return a map of task to the log-file detail
 * @throws IOException
 */
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
    final List<Task> allAttempts) throws IOException {
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
      new HashMap<Task, Map<LogName, LogFileDetail>>();
  for (Task task : allAttempts) {
    Map<LogName, LogFileDetail> allLogsFileDetails;
    allLogsFileDetails =
        TaskLog.getAllLogsFileDetails(task.getTaskID(),
            task.isTaskCleanupTask());
    taskLogFileDetails.put(task, allLogsFileDetails);
  }
  return taskLogFileDetails;
}
项目:hadoop-EAR    文件:TaskLogsMonitor.java   
/**
 * Truncation of logs is done. Now sync the index files to reflect the
 * truncated sizes.
 * 
 * @param firstAttempt
 * @param updatedTaskLogFileDetails
 */
private void updateIndicesAfterLogTruncation(TaskAttemptID firstAttempt,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails) {
  for (Entry<Task, Map<LogName, LogFileDetail>> entry : 
                              updatedTaskLogFileDetails.entrySet()) {
    Task task = entry.getKey();
    Map<LogName, LogFileDetail> logFileDetails = entry.getValue();
    Map<LogName, Long[]> logLengths = new HashMap<LogName, Long[]>();
    // set current and previous lengths
    for (LogName logName : TaskLog.LOGS_TRACKED_BY_INDEX_FILES) {
      logLengths.put(logName, new Long[] { Long.valueOf(0L),
          Long.valueOf(0L) });
      LogFileDetail lfd = logFileDetails.get(logName);
      if (lfd != null) {
        // Set previous lengths
        logLengths.get(logName)[0] = Long.valueOf(lfd.start);
        // Set current lengths
        logLengths.get(logName)[1] = Long.valueOf(lfd.start + lfd.length);
      }
    }
    try {
      TaskLog.writeToIndexFile(firstAttempt, task.getTaskID(),
          task.isTaskCleanupTask(), logLengths);
    } catch (IOException ioe) {
      LOG.warn("Exception in updateIndicesAfterLogTruncation : "
          + StringUtils.stringifyException(ioe));
      LOG.warn("Exception encountered while updating index file of task "
          + task.getTaskID()
          + ". Ignoring and continuing with other tasks.");
    }
  }
}