Java 类org.apache.hadoop.mapreduce.server.tasktracker.JVMInfo 实例源码

项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hadoop-on-lustre    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hadoop-on-lustre    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:mammoth    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:mammoth    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * @param lInfo
 * @param taskLogFileDetails
 * @param updatedTaskLogFileDetails
 * @param logName
 */
private void copyOriginalIndexFileInfo(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    Map<Task, Map<LogName, LogFileDetail>> updatedTaskLogFileDetails,
    LogName logName) {
  if (TaskLog.LOGS_TRACKED_BY_INDEX_FILES.contains(logName)) {
    for (Task task : lInfo.getAllAttempts()) {
      if (!updatedTaskLogFileDetails.containsKey(task)) {
        updatedTaskLogFileDetails.put(task,
            new HashMap<LogName, LogFileDetail>());
      }
      updatedTaskLogFileDetails.get(task).put(logName,
          taskLogFileDetails.get(task).get(logName));
    }
  }
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * Check if truncation of logs is needed for the given jvmInfo. If all the
 * tasks that ran in a JVM are within the log-limits, then truncation is not
 * needed. Otherwise it is needed.
 * 
 * @param lInfo
 * @param taskLogFileDetails
 * @param logName
 * @return true if truncation is needed, false otherwise
 */
private boolean isTruncationNeeded(JVMInfo lInfo,
    Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
    LogName logName) {
  boolean truncationNeeded = false;
  LogFileDetail logFileDetail = null;
  for (Task task : lInfo.getAllAttempts()) {
    long taskRetainSize =
        (task.isMapTask() ? mapRetainSize : reduceRetainSize);
    Map<LogName, LogFileDetail> allLogsFileDetails =
        taskLogFileDetails.get(task);
    logFileDetail = allLogsFileDetails.get(logName);
    if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
        && logFileDetail.length > taskRetainSize) {
      truncationNeeded = true;
      break;
    }
  }
  return truncationNeeded;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JvmManager.java   
private void jvmFinished() {
  JvmFinishedEvent jfe = new JvmFinishedEvent(new JVMInfo(
      TaskLog.getAttemptDir(firstTask.getTaskID(), 
                            firstTask.isTaskCleanupTask()),
      tasksGiven));
  tracker.getUserLogManager().addLogEvent(jfe);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
/**
 * Check the log file sizes generated by the attempts that ran in a
 * particular JVM
 * @param lInfo
 * @return
 * @throws IOException
 */
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
  // Read the log-file details for all the attempts that ran in this JVM
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
  try {
    taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
  } catch (IOException e) {
    LOG.warn(
        "Exception in truncateLogs while getting allLogsFileDetails()."
            + " Ignoring the truncation of logs of this process.", e);
    return false;
  }

  File attemptLogDir = lInfo.getLogLocation();

  for (LogName logName : LogName.values()) {

    File logFile = new File(attemptLogDir, logName.toString());

    if (logFile.exists()) {
      if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
        LOG.debug("Truncation is not needed for "
            + logFile.getAbsolutePath());
      } else return true;
    }
  }
  return false;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TaskLogsTruncater.java   
public static void main(String args[]) throws IOException {
  isTruncaterJvm = true;

  String taskRanFile = args[0];
  Configuration conf = new Configuration();

  //read the Task objects from the file
  LocalFileSystem lfs = FileSystem.getLocal(conf);
  FSDataInputStream din = lfs.open(new Path(taskRanFile));

  int numTasksRan = din.readInt();
  List<Task> taskAttemptsRan = new ArrayList<Task>();
  for (int i = 0; i < numTasksRan; i++) {
    Task t;
    if (din.readBoolean()) {
      t = new MapTask(); 
    } else {
      t = new ReduceTask();
    }
    t.readFields(din);
    taskAttemptsRan.add(t);
  }
  Task firstTask = taskAttemptsRan.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                    taskAttemptsRan));
  System.exit(0);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DefaultTaskController.java   
@Override
public void truncateLogsAsUser(String user, List<Task> allAttempts)
  throws IOException {
  Task firstTask = allAttempts.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(getConf());

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                     allAttempts));
}
项目:hadoop-on-lustre    文件:JvmManager.java   
private void jvmFinished() {
  JvmFinishedEvent jfe = new JvmFinishedEvent(new JVMInfo(
      TaskLog.getAttemptDir(firstTask.getTaskID(), 
                            firstTask.isTaskCleanupTask()),
      tasksGiven));
  tracker.getUserLogManager().addLogEvent(jfe);
}
项目:hadoop-on-lustre    文件:TaskLogsTruncater.java   
/**
 * Check the log file sizes generated by the attempts that ran in a
 * particular JVM
 * @param lInfo
 * @return is truncation required?
 * @throws IOException
 */
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
  // Read the log-file details for all the attempts that ran in this JVM
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
  try {
    taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
  } catch (IOException e) {
    LOG.warn(
        "Exception in truncateLogs while getting allLogsFileDetails()."
            + " Ignoring the truncation of logs of this process.", e);
    return false;
  }

  File attemptLogDir = lInfo.getLogLocation();

  for (LogName logName : LogName.values()) {

    File logFile = new File(attemptLogDir, logName.toString());

    if (logFile.exists()) {
      if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
        LOG.debug("Truncation is not needed for "
            + logFile.getAbsolutePath());
      } else return true;
    }
  }
  return false;
}
项目:hadoop-on-lustre    文件:TaskLogsTruncater.java   
public static void main(String args[]) throws IOException {
  String taskRanFile = args[0];
  Configuration conf = new Configuration();

  //read the Task objects from the file
  LocalFileSystem lfs = FileSystem.getLocal(conf);
  FSDataInputStream din = lfs.open(new Path(taskRanFile));

  int numTasksRan = din.readInt();
  List<Task> taskAttemptsRan = new ArrayList<Task>();
  for (int i = 0; i < numTasksRan; i++) {
    Task t;
    if (din.readBoolean()) {
      t = new MapTask(); 
    } else {
      t = new ReduceTask();
    }
    t.readFields(din);
    taskAttemptsRan.add(t);
  }
  Task firstTask = taskAttemptsRan.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                    taskAttemptsRan));
  System.exit(0);
}
项目:hadoop-on-lustre    文件:DefaultTaskController.java   
@Override
public void truncateLogsAsUser(String user, List<Task> allAttempts)
  throws IOException {
  Task firstTask = allAttempts.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(getConf());

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                     allAttempts));
}
项目:hanoi-hadoop-2.0.0-cdh    文件:JvmManager.java   
private void jvmFinished() {
  JvmFinishedEvent jfe = new JvmFinishedEvent(new JVMInfo(
      TaskLog.getAttemptDir(firstTask.getTaskID(), 
                            firstTask.isTaskCleanupTask()),
      tasksGiven));
  tracker.getUserLogManager().addLogEvent(jfe);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TaskLogsTruncater.java   
/**
 * Check the log file sizes generated by the attempts that ran in a
 * particular JVM
 * @param lInfo
 * @return
 * @throws IOException
 */
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
  // Read the log-file details for all the attempts that ran in this JVM
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
  try {
    taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
  } catch (IOException e) {
    LOG.warn(
        "Exception in truncateLogs while getting allLogsFileDetails()."
            + " Ignoring the truncation of logs of this process.", e);
    return false;
  }

  File attemptLogDir = lInfo.getLogLocation();

  for (LogName logName : LogName.values()) {

    File logFile = new File(attemptLogDir, logName.toString());

    if (logFile.exists()) {
      if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
        LOG.debug("Truncation is not needed for "
            + logFile.getAbsolutePath());
      } else return true;
    }
  }
  return false;
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TaskLogsTruncater.java   
public static void main(String args[]) throws IOException {
  isTruncaterJvm = true;

  String taskRanFile = args[0];
  Configuration conf = new Configuration();

  //read the Task objects from the file
  LocalFileSystem lfs = FileSystem.getLocal(conf);
  FSDataInputStream din = lfs.open(new Path(taskRanFile));

  int numTasksRan = din.readInt();
  List<Task> taskAttemptsRan = new ArrayList<Task>();
  for (int i = 0; i < numTasksRan; i++) {
    Task t;
    if (din.readBoolean()) {
      t = new MapTask(); 
    } else {
      t = new ReduceTask();
    }
    t.readFields(din);
    taskAttemptsRan.add(t);
  }
  Task firstTask = taskAttemptsRan.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                    taskAttemptsRan));
  System.exit(0);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:DefaultTaskController.java   
@Override
public void truncateLogsAsUser(String user, List<Task> allAttempts)
  throws IOException {
  Task firstTask = allAttempts.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(getConf());

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                     allAttempts));
}
项目:mammoth    文件:JvmManager.java   
private void jvmFinished() {
  JvmFinishedEvent jfe = new JvmFinishedEvent(new JVMInfo(
      TaskLog.getAttemptDir(firstTask.getTaskID(), 
                            firstTask.isTaskCleanupTask()),
      tasksGiven));
  tracker.getUserLogManager().addLogEvent(jfe);
}
项目:mammoth    文件:TaskLogsTruncater.java   
/**
 * Check the log file sizes generated by the attempts that ran in a
 * particular JVM
 * @param lInfo
 * @return is truncation required?
 * @throws IOException
 */
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
  // Read the log-file details for all the attempts that ran in this JVM
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
  try {
    taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
  } catch (IOException e) {
    LOG.warn(
        "Exception in truncateLogs while getting allLogsFileDetails()."
            + " Ignoring the truncation of logs of this process.", e);
    return false;
  }

  File attemptLogDir = lInfo.getLogLocation();

  for (LogName logName : LogName.values()) {

    File logFile = new File(attemptLogDir, logName.toString());

    if (logFile.exists()) {
      if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
        LOG.debug("Truncation is not needed for "
            + logFile.getAbsolutePath());
      } else return true;
    }
  }
  return false;
}
项目:mammoth    文件:TaskLogsTruncater.java   
public static void main(String args[]) throws IOException {
  String taskRanFile = args[0];
  Configuration conf = new Configuration();

  //read the Task objects from the file
  LocalFileSystem lfs = FileSystem.getLocal(conf);
  FSDataInputStream din = lfs.open(new Path(taskRanFile));

  int numTasksRan = din.readInt();
  List<Task> taskAttemptsRan = new ArrayList<Task>();
  for (int i = 0; i < numTasksRan; i++) {
    Task t;
    if (din.readBoolean()) {
      t = new MapTask(); 
    } else {
      t = new ReduceTask();
    }
    t.readFields(din);
    taskAttemptsRan.add(t);
  }
  Task firstTask = taskAttemptsRan.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                    taskAttemptsRan));
  LOG.info("System.exit(0) TaskLogsTruncater");
  System.exit(0);
}
项目:hortonworks-extension    文件:JvmManager.java   
private void jvmFinished() {
  JvmFinishedEvent jfe = new JvmFinishedEvent(new JVMInfo(
      TaskLog.getAttemptDir(firstTask.getTaskID(), 
                            firstTask.isTaskCleanupTask()),
      tasksGiven));
  tracker.getUserLogManager().addLogEvent(jfe);
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * Check the log file sizes generated by the attempts that ran in a
 * particular JVM
 * @param lInfo
 * @return is truncation required?
 * @throws IOException
 */
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
  // Read the log-file details for all the attempts that ran in this JVM
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
  try {
    taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
  } catch (IOException e) {
    LOG.warn(
        "Exception in truncateLogs while getting allLogsFileDetails()."
            + " Ignoring the truncation of logs of this process.", e);
    return false;
  }

  File attemptLogDir = lInfo.getLogLocation();

  for (LogName logName : LogName.values()) {

    File logFile = new File(attemptLogDir, logName.toString());

    if (logFile.exists()) {
      if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
        LOG.debug("Truncation is not needed for "
            + logFile.getAbsolutePath());
      } else return true;
    }
  }
  return false;
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
public static void main(String args[]) throws IOException {
  String taskRanFile = args[0];
  Configuration conf = new Configuration();

  //read the Task objects from the file
  LocalFileSystem lfs = FileSystem.getLocal(conf);
  FSDataInputStream din = lfs.open(new Path(taskRanFile));

  int numTasksRan = din.readInt();
  List<Task> taskAttemptsRan = new ArrayList<Task>();
  for (int i = 0; i < numTasksRan; i++) {
    Task t;
    if (din.readBoolean()) {
      t = new MapTask(); 
    } else {
      t = new ReduceTask();
    }
    t.readFields(din);
    taskAttemptsRan.add(t);
  }
  Task firstTask = taskAttemptsRan.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                    taskAttemptsRan));
  System.exit(0);
}
项目:hortonworks-extension    文件:DefaultTaskController.java   
@Override
public void truncateLogsAsUser(String user, List<Task> allAttempts)
  throws IOException {
  Task firstTask = allAttempts.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(getConf());

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                     allAttempts));
}
项目:hortonworks-extension    文件:JvmManager.java   
private void jvmFinished() {
  JvmFinishedEvent jfe = new JvmFinishedEvent(new JVMInfo(
      TaskLog.getAttemptDir(firstTask.getTaskID(), 
                            firstTask.isTaskCleanupTask()),
      tasksGiven));
  tracker.getUserLogManager().addLogEvent(jfe);
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
/**
 * Check the log file sizes generated by the attempts that ran in a
 * particular JVM
 * @param lInfo
 * @return is truncation required?
 * @throws IOException
 */
public boolean shouldTruncateLogs(JVMInfo lInfo) throws IOException {
  // Read the log-file details for all the attempts that ran in this JVM
  Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails;
  try {
    taskLogFileDetails = getAllLogsFileDetails(lInfo.getAllAttempts());
  } catch (IOException e) {
    LOG.warn(
        "Exception in truncateLogs while getting allLogsFileDetails()."
            + " Ignoring the truncation of logs of this process.", e);
    return false;
  }

  File attemptLogDir = lInfo.getLogLocation();

  for (LogName logName : LogName.values()) {

    File logFile = new File(attemptLogDir, logName.toString());

    if (logFile.exists()) {
      if(!isTruncationNeeded(lInfo, taskLogFileDetails, logName)) {
        LOG.debug("Truncation is not needed for "
            + logFile.getAbsolutePath());
      } else return true;
    }
  }
  return false;
}
项目:hortonworks-extension    文件:TaskLogsTruncater.java   
public static void main(String args[]) throws IOException {
  String taskRanFile = args[0];
  Configuration conf = new Configuration();

  //read the Task objects from the file
  LocalFileSystem lfs = FileSystem.getLocal(conf);
  FSDataInputStream din = lfs.open(new Path(taskRanFile));

  int numTasksRan = din.readInt();
  List<Task> taskAttemptsRan = new ArrayList<Task>();
  for (int i = 0; i < numTasksRan; i++) {
    Task t;
    if (din.readBoolean()) {
      t = new MapTask(); 
    } else {
      t = new ReduceTask();
    }
    t.readFields(din);
    taskAttemptsRan.add(t);
  }
  Task firstTask = taskAttemptsRan.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                    taskAttemptsRan));
  System.exit(0);
}
项目:hortonworks-extension    文件:DefaultTaskController.java   
@Override
public void truncateLogsAsUser(String user, List<Task> allAttempts)
  throws IOException {
  Task firstTask = allAttempts.get(0);
  TaskLogsTruncater trunc = new TaskLogsTruncater(getConf());

  trunc.truncateLogs(new JVMInfo(
          TaskLog.getAttemptDir(firstTask.getTaskID(), 
                                firstTask.isTaskCleanupTask()),
                     allAttempts));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestTaskLogsTruncater.java   
/**
 * Test the truncation of log-file.
 * 
 * It writes two log files and truncates one, does not truncate other. 
 * 
 * @throws IOException
 */
@Test
public void testLogTruncation() throws IOException {
  Configuration conf = setRetainSizes(1000L, 1000L);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  TaskID baseId = new TaskID();
  int taskcount = 0;

  TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++);
  Task task = new MapTask(null, attemptID, 0, new JobSplit.TaskSplitIndex(), 
                          0);

  // Let the tasks write logs more than retain-size
  writeRandomBytes(attemptID, attemptID, LogName.SYSLOG, 1500);
  writeRandomBytes(attemptID, attemptID, LogName.STDERR, 500);

  File attemptDir = TaskLog.getAttemptDir(attemptID, false);
  assertTrue(attemptDir + " doesn't exist!", attemptDir.exists());

  // Finish the task and the JVM too.
  JVMInfo jvmInfo = new JVMInfo(attemptDir, Arrays.asList(task));
  trunc.truncateLogs(jvmInfo);

  // The log-file should now be truncated.
  assertTrue(attemptDir.exists());

  Map<LogName, Long> logLengths = getAllLogsFileLengths(attemptID, false);
  File logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  String syslog = TestMiniMRMapRedDebugScript.readTaskLog(LogName.SYSLOG,
      attemptID, false);
  assertTrue(syslog.startsWith(TaskLogsTruncater.TRUNCATED_MSG));
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
  String stderr = TestMiniMRMapRedDebugScript.readTaskLog(LogName.STDERR,
      attemptID, false);
  assertFalse(stderr.startsWith(TaskLogsTruncater.TRUNCATED_MSG));

  // truncate once again
  trunc.truncateLogs(jvmInfo);
  logLengths = getAllLogsFileLengths(attemptID, false);
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
}
项目:hadoop-on-lustre    文件:TestTaskLogsTruncater.java   
/**
 * Test the truncation of log-file.
 * 
 * It writes two log files and truncates one, does not truncate other. 
 * 
 * @throws IOException
 */
@Test
public void testLogTruncation() throws IOException {
  Configuration conf = setRetainSizes(1000L, 1000L);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  TaskID baseId = new TaskID();
  int taskcount = 0;

  TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++);
  Task task = new MapTask(null, attemptID, 0, new JobSplit.TaskSplitIndex(), 
                          0);

  // Let the tasks write logs more than retain-size
  writeRandomBytes(attemptID, attemptID, LogName.SYSLOG, 1500);
  writeRandomBytes(attemptID, attemptID, LogName.STDERR, 500);

  File attemptDir = TaskLog.getAttemptDir(attemptID, false);
  assertTrue(attemptDir + " doesn't exist!", attemptDir.exists());

  // Finish the task and the JVM too.
  JVMInfo jvmInfo = new JVMInfo(attemptDir, Arrays.asList(task));
  trunc.truncateLogs(jvmInfo);

  // The log-file should now be truncated.
  assertTrue(attemptDir.exists());

  Map<LogName, Long> logLengths = getAllLogsFileLengths(attemptID, false);
  File logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  String syslog = TestMiniMRMapRedDebugScript.readTaskLog(LogName.SYSLOG,
      attemptID, false);
  assertTrue(syslog.startsWith(TaskLogsTruncater.TRUNCATED_MSG));
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
  String stderr = TestMiniMRMapRedDebugScript.readTaskLog(LogName.STDERR,
      attemptID, false);
  assertFalse(stderr.startsWith(TaskLogsTruncater.TRUNCATED_MSG));

  // truncate once again
  trunc.truncateLogs(jvmInfo);
  logLengths = getAllLogsFileLengths(attemptID, false);
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestTaskLogsTruncater.java   
/**
 * Test the truncation of log-file.
 * 
 * It writes two log files and truncates one, does not truncate other. 
 * 
 * @throws IOException
 */
@Test
public void testLogTruncation() throws IOException {
  Configuration conf = setRetainSizes(1000L, 1000L);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  TaskID baseId = new TaskID();
  int taskcount = 0;

  TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++);
  Task task = new MapTask(null, attemptID, 0, new JobSplit.TaskSplitIndex(), 
                          0);

  // Let the tasks write logs more than retain-size
  writeRandomBytes(attemptID, attemptID, LogName.SYSLOG, 1500);
  writeRandomBytes(attemptID, attemptID, LogName.STDERR, 500);

  File attemptDir = TaskLog.getAttemptDir(attemptID, false);
  assertTrue(attemptDir + " doesn't exist!", attemptDir.exists());

  // Finish the task and the JVM too.
  JVMInfo jvmInfo = new JVMInfo(attemptDir, Arrays.asList(task));
  trunc.truncateLogs(jvmInfo);

  // The log-file should now be truncated.
  assertTrue(attemptDir.exists());

  Map<LogName, Long> logLengths = getAllLogsFileLengths(attemptID, false);
  File logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  String syslog = TestMiniMRMapRedDebugScript.readTaskLog(LogName.SYSLOG,
      attemptID, false);
  assertTrue(syslog.startsWith(TaskLogsTruncater.TRUNCATED_MSG));
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
  String stderr = TestMiniMRMapRedDebugScript.readTaskLog(LogName.STDERR,
      attemptID, false);
  assertFalse(stderr.startsWith(TaskLogsTruncater.TRUNCATED_MSG));

  // truncate once again
  trunc.truncateLogs(jvmInfo);
  logLengths = getAllLogsFileLengths(attemptID, false);
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
}
项目:hortonworks-extension    文件:TestTaskLogsTruncater.java   
/**
 * Test the truncation of log-file.
 * 
 * It writes two log files and truncates one, does not truncate other. 
 * 
 * @throws IOException
 */
@Test
public void testLogTruncation() throws IOException {
  Configuration conf = setRetainSizes(1000L, 1000L);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  TaskID baseId = new TaskID();
  int taskcount = 0;

  TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++);
  Task task = new MapTask(null, attemptID, 0, new JobSplit.TaskSplitIndex(), 
                          0);

  // Let the tasks write logs more than retain-size
  writeRandomBytes(attemptID, attemptID, LogName.SYSLOG, 1500);
  writeRandomBytes(attemptID, attemptID, LogName.STDERR, 500);

  File attemptDir = TaskLog.getAttemptDir(attemptID, false);
  assertTrue(attemptDir + " doesn't exist!", attemptDir.exists());

  // Finish the task and the JVM too.
  JVMInfo jvmInfo = new JVMInfo(attemptDir, Arrays.asList(task));
  trunc.truncateLogs(jvmInfo);

  // The log-file should now be truncated.
  assertTrue(attemptDir.exists());

  Map<LogName, Long> logLengths = getAllLogsFileLengths(attemptID, false);
  File logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  String syslog = TestMiniMRMapRedDebugScript.readTaskLog(LogName.SYSLOG,
      attemptID, false);
  assertTrue(syslog.startsWith(TaskLogsTruncater.TRUNCATED_MSG));
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
  String stderr = TestMiniMRMapRedDebugScript.readTaskLog(LogName.STDERR,
      attemptID, false);
  assertFalse(stderr.startsWith(TaskLogsTruncater.TRUNCATED_MSG));

  // truncate once again
  trunc.truncateLogs(jvmInfo);
  logLengths = getAllLogsFileLengths(attemptID, false);
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
}
项目:hortonworks-extension    文件:TestTaskLogsTruncater.java   
/**
 * Test the truncation of log-file.
 * 
 * It writes two log files and truncates one, does not truncate other. 
 * 
 * @throws IOException
 */
@Test
public void testLogTruncation() throws IOException {
  Configuration conf = setRetainSizes(1000L, 1000L);
  TaskLogsTruncater trunc = new TaskLogsTruncater(conf);

  TaskID baseId = new TaskID();
  int taskcount = 0;

  TaskAttemptID attemptID = new TaskAttemptID(baseId, taskcount++);
  Task task = new MapTask(null, attemptID, 0, new JobSplit.TaskSplitIndex(), 
                          0);

  // Let the tasks write logs more than retain-size
  writeRandomBytes(attemptID, attemptID, LogName.SYSLOG, 1500);
  writeRandomBytes(attemptID, attemptID, LogName.STDERR, 500);

  File attemptDir = TaskLog.getAttemptDir(attemptID, false);
  assertTrue(attemptDir + " doesn't exist!", attemptDir.exists());

  // Finish the task and the JVM too.
  JVMInfo jvmInfo = new JVMInfo(attemptDir, Arrays.asList(task));
  trunc.truncateLogs(jvmInfo);

  // The log-file should now be truncated.
  assertTrue(attemptDir.exists());

  Map<LogName, Long> logLengths = getAllLogsFileLengths(attemptID, false);
  File logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  String syslog = TestMiniMRMapRedDebugScript.readTaskLog(LogName.SYSLOG,
      attemptID, false);
  assertTrue(syslog.startsWith(TaskLogsTruncater.TRUNCATED_MSG));
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
  String stderr = TestMiniMRMapRedDebugScript.readTaskLog(LogName.STDERR,
      attemptID, false);
  assertFalse(stderr.startsWith(TaskLogsTruncater.TRUNCATED_MSG));

  // truncate once again
  trunc.truncateLogs(jvmInfo);
  logLengths = getAllLogsFileLengths(attemptID, false);
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.SYSLOG);
  assertEquals(1000 + truncatedMsgSize, logFile.length());
  // The index file should also be proper.
  assertEquals(1000 + truncatedMsgSize, logLengths.get(LogName.SYSLOG)
      .longValue());
  logFile = TaskLog.getTaskLogFile(attemptID, false, LogName.STDERR);
  assertEquals(500, logFile.length());
  // The index file should also be proper.
  assertEquals(500, logLengths.get(LogName.STDERR).longValue());
}