Java 类org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader 实例源码

项目:book-hadoop-hacks    文件:CustomCombineFileInputFormat.java   
@Override
public RecordReader<Text, Text> createRecordReader(InputSplit split,
        TaskAttemptContext context)  {

    @SuppressWarnings("unused")
    Configuration conf = context.getConfiguration();

    CombineFileRecordReader<Text, Text> reader = null;
    try {
        reader = new CombineFileRecordReader<Text, Text>(
                (CombineFileSplit) split, context,
                CombineFileLineRecordReader.class);
    } catch (IOException e) {
        e.printStackTrace();
    }

    return reader;
}
项目:mara    文件:CombineAvroKeyFileInputFormat.java   
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public RecordReader<AvroKey<T>, NullWritable> createRecordReader(InputSplit split,
        TaskAttemptContext context) throws IOException {
    return new CombineFileRecordReader((CombineFileSplit)split, context,
            DelegatingAvroRecordReader.class);
}
项目:DISH    文件:BinaryFileInputFormat.java   
/**
 * Return a CombineFileRecordReader
 */
@Override
public CombineFileRecordReader<Text, BytesWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {

    if (!(split instanceof CombineFileSplit))
        throw new IllegalArgumentException("Input must be a Directory!");

    return new CombineFileRecordReader<Text, BytesWritable>((CombineFileSplit) split, context, BinaryFileRecordReader.class);
}
项目:incubator-datafu    文件:CombinedAvroKeyInputFormat.java   
@SuppressWarnings("unchecked")
@Override
public RecordReader<AvroKey<T>, NullWritable> createRecordReader(InputSplit inputSplit,
                                                                 TaskAttemptContext context) throws IOException
{
  Schema readerSchema = AvroJob.getInputKeySchema(context.getConfiguration());
  if (null == readerSchema) {
    LOG.warn("Reader schema was not set. Use AvroJob.setInputKeySchema() if desired.");
    LOG.info("Using a reader schema equal to the writer schema.");
  }

  Object c = CombinedAvroKeyRecordReader.class;
  return new CombineFileRecordReader<AvroKey<T>, NullWritable>((CombineFileSplit) inputSplit, context, (Class<? extends RecordReader<AvroKey<T>, NullWritable>>)c);
}
项目:multiple-dimension-spread    文件:MDSCombineSpreadInputFormat.java   
@Override
public RecordReader<NullWritable,Spread> createRecordReader( final InputSplit split , final TaskAttemptContext context)throws IOException{
  return new CombineFileRecordReader<NullWritable,Spread>( (CombineFileSplit)split , context , MDSCombineSpreadReader.class );
}
项目:hadoop    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:aliyun-oss-hadoop-fs    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:big-c    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:Gobblin    文件:AvroKeyRecursiveCombineFileInputFormat.java   
@Override
public RecordReader<AvroKey<GenericRecord>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext cx)
    throws IOException {
  return new CombineFileRecordReader<AvroKey<GenericRecord>, NullWritable>((CombineFileSplit) split, cx,
      AvroKeyCombineFileRecordReader.class);
}
项目:hadoop-plus    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:mara    文件:CombineTextFileInputFormat.java   
@Override
public RecordReader<FileLineWritable, Text> createRecordReader(
        InputSplit split, TaskAttemptContext context) throws IOException {
    return new CombineFileRecordReader<>(
            (CombineFileSplit)split, context,FileLineWritableRecordReader.class);
}
项目:FlexMap    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:hope-tactical-equipment    文件:TotalTextInputFormat.java   
/**
 * 返回一个CombineFileRecordReader对象
 * CombineFileRecordReader的构造函数中,指定RecordReader
 */
@Override
public RecordReader<Text, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
    CombineFileRecordReader<Text, Text> recordReader = new CombineFileRecordReader((CombineFileSplit) split, context, CombineFileRecordReader.class);
    return recordReader;
}
项目:cloudera-framework    文件:RecordTextCombineInputFormat.java   
@Override
public RecordReader<RecordKey, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<>((CombineFileSplit) split, context, RecordReaderText.class);
}
项目:hops    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:openimaj    文件:CombineSequenceFileInputFormat.java   
@SuppressWarnings({ "unchecked", "rawtypes" })
@Override
public RecordReader<K, V> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
    return new CombineFileRecordReader((CombineFileSplit)split, context, CombineSequenceFileRecordReader.class);
}
项目:glusterfs-hadoop-examples    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:incubator-gobblin    文件:AvroKeyRecursiveCombineFileInputFormat.java   
@Override
public RecordReader<AvroKey<GenericRecord>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext cx)
    throws IOException {
  return new CombineFileRecordReader<>((CombineFileSplit) split, cx, AvroKeyCombineFileRecordReader.class);
}
项目:hadoop-TCP    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:hardfs    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:hadoop-on-lustre2    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:incubator-blur    文件:CsvBlurDriver.java   
@Override
public RecordReader<Writable, Text> createRecordReader(InputSplit split, TaskAttemptContext context)
    throws IOException {
  return new CombineFileRecordReader<Writable, Text>((CombineFileSplit) split, context,
      SequenceFileRecordReaderWrapper.class);
}
项目:Hadoop-CombineFileInputFormat    文件:CFInputFormat.java   
public RecordReader<FileLineWritable, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException{
  return new CombineFileRecordReader<FileLineWritable, Text>((CombineFileSplit)split, context, CFRecordReader.class);
}
项目:Hadoop-Patent-Data    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:hanoi-hadoop-2.0.0-cdh    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:mapreduce-fork    文件:MultiFileWordCount.java   
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
    TaskAttemptContext context) throws IOException {
  return new CombineFileRecordReader<WordOffset, Text>(
    (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:polar-bear    文件:CombinedTextInputFormat.java   
@Override
public RecordReader<LongWritable, Text> createRecordReader(org.apache.hadoop.mapreduce.InputSplit inputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext taskAttemptContext) throws IOException {
  return new CombineFileRecordReader<LongWritable, Text>((CombineFileSplit) inputSplit, taskAttemptContext, MyLineRecordReader.class);
}
项目:bigdata_pattern    文件:MyMultiFileInputFormat.java   
public RecordReader<MultiFileInputWritableComparable,Text> 
    createRecordReader(InputSplit split,TaskAttemptContext context) throws IOException 
{
  return new CombineFileRecordReader<MultiFileInputWritableComparable, Text>
  ((CombineFileSplit)split, context, CombineFileLineRecordReader.class);
}
项目:HadoopLung    文件:MultipleFilesInputFormat.java   
/**
 * Creates a CombineFileRecordReader to read each file assigned to this
 * InputSplit. Note, that unlike ordinary InputSplits, split must be a
 * CombineFileSplit, and therefore is expected to specify multiple files.
 * 
 * @param split
 *            The InputSplit to read. Throws an IllegalArgumentException if
 *            this is not a CombineFileSplit.
 * @param context
 *            The context for this task.
 * @return a CombineFileRecordReader to process each file in split. It will
 *         read each file with a WholeFileRecordReader.
 * @throws IOException
 *             if there is an error.
 */
@Override
public RecordReader<NullWritable, BytesWritable> createRecordReader(
        InputSplit split, TaskAttemptContext context) throws IOException {

    if (!(split instanceof CombineFileSplit)) {
        throw new IllegalArgumentException(
                "split must be a CombineFileSplit");
    }
    return new CombineFileRecordReader<NullWritable, BytesWritable>(
            (CombineFileSplit) split, context,
            MultipleFilesRecordReader.class);
}