Java 类org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter 实例源码

项目:hadoop    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:hadoop    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:aliyun-oss-hadoop-fs    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:aliyun-oss-hadoop-fs    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:aliyun-oss-hadoop-fs    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:big-c    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:big-c    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:big-c    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-plus    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:hadoop-plus    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-plus    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:FlexMap    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:FlexMap    文件:MultiMapTask.java   
NewMultiTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit splits[],
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);
  this.context=taskContext;
  this.splits=splits;
  this.inputFormat=inputFormat;
  this.splitsLength=0;       

}
项目:FlexMap    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:FlexMap    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hops    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:hops    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hops    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-TCP    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:hadoop-TCP    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-TCP    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hardfs    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:hardfs    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hardfs    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-on-lustre2    文件:TestJobCounters.java   
private void validateFileCounters(Counters counter, long fileBytesRead,
    long fileBytesWritten, long mapOutputBytes,
    long mapOutputMaterializedBytes) {
  assertTrue(counter.findCounter(FileInputFormatCounter.BYTES_READ)
      .getValue() != 0);
  assertEquals(fileBytesRead,
      counter.findCounter(FileInputFormatCounter.BYTES_READ).getValue());

  assertTrue(counter.findCounter(FileOutputFormatCounter.BYTES_WRITTEN)
      .getValue() != 0);

  if (mapOutputBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_BYTES).getValue() != 0);
  }
  if (mapOutputMaterializedBytes >= 0) {
    assertTrue(counter.findCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES)
        .getValue() != 0);
  }
}
项目:hadoop-on-lustre2    文件:MapTask.java   
TrackedRecordReader(TaskReporter reporter, JobConf job) 
  throws IOException{
  inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  fileInputByteCounter = reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  this.reporter = reporter;

  List<Statistics> matchedStats = null;
  if (this.reporter.getInputSplit() instanceof FileSplit) {
    matchedStats = getFsStatistics(((FileSplit) this.reporter
        .getInputSplit()).getPath(), job);
  }
  fsStats = matchedStats;

  bytesInPrev = getInputBytes(fsStats);
  rawIn = job.getInputFormat().getRecordReader(reporter.getInputSplit(),
      job, reporter);
  bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop-on-lustre2    文件:MapTask.java   
NewTrackingRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
    org.apache.hadoop.mapreduce.InputFormat<K, V> inputFormat,
    TaskReporter reporter,
    org.apache.hadoop.mapreduce.TaskAttemptContext taskContext)
    throws InterruptedException, IOException {
  this.reporter = reporter;
  this.inputRecordCounter = reporter
      .getCounter(TaskCounter.MAP_INPUT_RECORDS);
  this.fileInputByteCounter = reporter
      .getCounter(FileInputFormatCounter.BYTES_READ);

  List <Statistics> matchedStats = null;
  if (split instanceof org.apache.hadoop.mapreduce.lib.input.FileSplit) {
    matchedStats = getFsStatistics(((org.apache.hadoop.mapreduce.lib.input.FileSplit) split)
        .getPath(), taskContext.getConfiguration());
  }
  fsStats = matchedStats;

  long bytesInPrev = getInputBytes(fsStats);
  this.real = inputFormat.createRecordReader(split, taskContext);
  long bytesInCurr = getInputBytes(fsStats);
  fileInputByteCounter.increment(bytesInCurr - bytesInPrev);
}
项目:hadoop    文件:Counters.java   
@SuppressWarnings({ "deprecation" })
private static void initDepricatedMap() {
  depricatedCounterMap.put(FileInputFormat.Counter.class.getName(),
    FileInputFormatCounter.class.getName());
  depricatedCounterMap.put(FileOutputFormat.Counter.class.getName(),
    FileOutputFormatCounter.class.getName());
  depricatedCounterMap.put(
    org.apache.hadoop.mapreduce.lib.input.FileInputFormat.Counter.class
      .getName(), FileInputFormatCounter.class.getName());
  depricatedCounterMap.put(
    org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.Counter.class
      .getName(), FileOutputFormatCounter.class.getName());
}
项目:hadoop    文件:Counters.java   
public synchronized Counter findCounter(String group, String name) {
  if (name.equals("MAP_INPUT_BYTES")) {
    LOG.warn("Counter name MAP_INPUT_BYTES is deprecated. " +
             "Use FileInputFormatCounters as group name and " +
             " BYTES_READ as counter name instead");
    return findCounter(FileInputFormatCounter.BYTES_READ);
  }
  String newGroupKey = getNewGroupKey(group);
  if (newGroupKey != null) {
    group = newGroupKey;
  }
  return getGroup(group).getCounterForName(name);
}
项目:aliyun-oss-hadoop-fs    文件:StatusReportChecker.java   
protected void initUsedCounters() {
  reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
  reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  reporter.getCounter(TaskCounter.MAP_OUTPUT_BYTES);
  reporter.getCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES);
  reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
  reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
  reporter.getCounter(TaskCounter.SPILLED_RECORDS);
}
项目:aliyun-oss-hadoop-fs    文件:Counters.java   
@SuppressWarnings({ "deprecation" })
private static void initDepricatedMap() {
  depricatedCounterMap.put(FileInputFormat.Counter.class.getName(),
    FileInputFormatCounter.class.getName());
  depricatedCounterMap.put(FileOutputFormat.Counter.class.getName(),
    FileOutputFormatCounter.class.getName());
  depricatedCounterMap.put(
    org.apache.hadoop.mapreduce.lib.input.FileInputFormat.Counter.class
      .getName(), FileInputFormatCounter.class.getName());
  depricatedCounterMap.put(
    org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.Counter.class
      .getName(), FileOutputFormatCounter.class.getName());
}
项目:aliyun-oss-hadoop-fs    文件:Counters.java   
public synchronized Counter findCounter(String group, String name) {
  if (name.equals("MAP_INPUT_BYTES")) {
    LOG.warn("Counter name MAP_INPUT_BYTES is deprecated. " +
             "Use FileInputFormatCounters as group name and " +
             " BYTES_READ as counter name instead");
    return findCounter(FileInputFormatCounter.BYTES_READ);
  }
  String newGroupKey = getNewGroupKey(group);
  if (newGroupKey != null) {
    group = newGroupKey;
  }
  return getGroup(group).getCounterForName(name);
}
项目:big-c    文件:Counters.java   
@SuppressWarnings({ "deprecation" })
private static void initDepricatedMap() {
  depricatedCounterMap.put(FileInputFormat.Counter.class.getName(),
    FileInputFormatCounter.class.getName());
  depricatedCounterMap.put(FileOutputFormat.Counter.class.getName(),
    FileOutputFormatCounter.class.getName());
  depricatedCounterMap.put(
    org.apache.hadoop.mapreduce.lib.input.FileInputFormat.Counter.class
      .getName(), FileInputFormatCounter.class.getName());
  depricatedCounterMap.put(
    org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.Counter.class
      .getName(), FileOutputFormatCounter.class.getName());
}
项目:big-c    文件:Counters.java   
public synchronized Counter findCounter(String group, String name) {
  if (name.equals("MAP_INPUT_BYTES")) {
    LOG.warn("Counter name MAP_INPUT_BYTES is deprecated. " +
             "Use FileInputFormatCounters as group name and " +
             " BYTES_READ as counter name instead");
    return findCounter(FileInputFormatCounter.BYTES_READ);
  }
  String newGroupKey = getNewGroupKey(group);
  if (newGroupKey != null) {
    group = newGroupKey;
  }
  return getGroup(group).getCounterForName(name);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:StatusReportChecker.java   
protected void initUsedCounters() {
  reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
  reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
  reporter.getCounter(FileInputFormatCounter.BYTES_READ);
  reporter.getCounter(TaskCounter.MAP_OUTPUT_BYTES);
  reporter.getCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES);
  reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
  reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
  reporter.getCounter(TaskCounter.SPILLED_RECORDS);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Counters.java   
@SuppressWarnings({ "deprecation" })
private static void initDepricatedMap() {
  depricatedCounterMap.put(FileInputFormat.Counter.class.getName(),
    FileInputFormatCounter.class.getName());
  depricatedCounterMap.put(FileOutputFormat.Counter.class.getName(),
    FileOutputFormatCounter.class.getName());
  depricatedCounterMap.put(
    org.apache.hadoop.mapreduce.lib.input.FileInputFormat.Counter.class
      .getName(), FileInputFormatCounter.class.getName());
  depricatedCounterMap.put(
    org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.Counter.class
      .getName(), FileOutputFormatCounter.class.getName());
}