@Override public RecordReader<Text, Text> createRecordReader(InputSplit split, TaskAttemptContext context) { @SuppressWarnings("unused") Configuration conf = context.getConfiguration(); CombineFileRecordReader<Text, Text> reader = null; try { reader = new CombineFileRecordReader<Text, Text>( (CombineFileSplit) split, context, CombineFileLineRecordReader.class); } catch (IOException e) { e.printStackTrace(); } return reader; }
@Override @SuppressWarnings({ "unchecked", "rawtypes" }) public RecordReader<AvroKey<T>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { return new CombineFileRecordReader((CombineFileSplit)split, context, DelegatingAvroRecordReader.class); }
/** * Return a CombineFileRecordReader */ @Override public CombineFileRecordReader<Text, BytesWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { if (!(split instanceof CombineFileSplit)) throw new IllegalArgumentException("Input must be a Directory!"); return new CombineFileRecordReader<Text, BytesWritable>((CombineFileSplit) split, context, BinaryFileRecordReader.class); }
@SuppressWarnings("unchecked") @Override public RecordReader<AvroKey<T>, NullWritable> createRecordReader(InputSplit inputSplit, TaskAttemptContext context) throws IOException { Schema readerSchema = AvroJob.getInputKeySchema(context.getConfiguration()); if (null == readerSchema) { LOG.warn("Reader schema was not set. Use AvroJob.setInputKeySchema() if desired."); LOG.info("Using a reader schema equal to the writer schema."); } Object c = CombinedAvroKeyRecordReader.class; return new CombineFileRecordReader<AvroKey<T>, NullWritable>((CombineFileSplit) inputSplit, context, (Class<? extends RecordReader<AvroKey<T>, NullWritable>>)c); }
@Override public RecordReader<NullWritable,Spread> createRecordReader( final InputSplit split , final TaskAttemptContext context)throws IOException{ return new CombineFileRecordReader<NullWritable,Spread>( (CombineFileSplit)split , context , MDSCombineSpreadReader.class ); }
public RecordReader<WordOffset,Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { return new CombineFileRecordReader<WordOffset, Text>( (CombineFileSplit)split, context, CombineFileLineRecordReader.class); }
@Override public RecordReader<AvroKey<GenericRecord>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext cx) throws IOException { return new CombineFileRecordReader<AvroKey<GenericRecord>, NullWritable>((CombineFileSplit) split, cx, AvroKeyCombineFileRecordReader.class); }
@Override public RecordReader<FileLineWritable, Text> createRecordReader( InputSplit split, TaskAttemptContext context) throws IOException { return new CombineFileRecordReader<>( (CombineFileSplit)split, context,FileLineWritableRecordReader.class); }
/** * 返回一个CombineFileRecordReader对象 * CombineFileRecordReader的构造函数中,指定RecordReader */ @Override public RecordReader<Text, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { CombineFileRecordReader<Text, Text> recordReader = new CombineFileRecordReader((CombineFileSplit) split, context, CombineFileRecordReader.class); return recordReader; }
@Override public RecordReader<RecordKey, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { return new CombineFileRecordReader<>((CombineFileSplit) split, context, RecordReaderText.class); }
@SuppressWarnings({ "unchecked", "rawtypes" }) @Override public RecordReader<K, V> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { return new CombineFileRecordReader((CombineFileSplit)split, context, CombineSequenceFileRecordReader.class); }
@Override public RecordReader<AvroKey<GenericRecord>, NullWritable> createRecordReader(InputSplit split, TaskAttemptContext cx) throws IOException { return new CombineFileRecordReader<>((CombineFileSplit) split, cx, AvroKeyCombineFileRecordReader.class); }
@Override public RecordReader<Writable, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException { return new CombineFileRecordReader<Writable, Text>((CombineFileSplit) split, context, SequenceFileRecordReaderWrapper.class); }
public RecordReader<FileLineWritable, Text> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException{ return new CombineFileRecordReader<FileLineWritable, Text>((CombineFileSplit)split, context, CFRecordReader.class); }
@Override public RecordReader<LongWritable, Text> createRecordReader(org.apache.hadoop.mapreduce.InputSplit inputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext taskAttemptContext) throws IOException { return new CombineFileRecordReader<LongWritable, Text>((CombineFileSplit) inputSplit, taskAttemptContext, MyLineRecordReader.class); }
public RecordReader<MultiFileInputWritableComparable,Text> createRecordReader(InputSplit split,TaskAttemptContext context) throws IOException { return new CombineFileRecordReader<MultiFileInputWritableComparable, Text> ((CombineFileSplit)split, context, CombineFileLineRecordReader.class); }
/** * Creates a CombineFileRecordReader to read each file assigned to this * InputSplit. Note, that unlike ordinary InputSplits, split must be a * CombineFileSplit, and therefore is expected to specify multiple files. * * @param split * The InputSplit to read. Throws an IllegalArgumentException if * this is not a CombineFileSplit. * @param context * The context for this task. * @return a CombineFileRecordReader to process each file in split. It will * read each file with a WholeFileRecordReader. * @throws IOException * if there is an error. */ @Override public RecordReader<NullWritable, BytesWritable> createRecordReader( InputSplit split, TaskAttemptContext context) throws IOException { if (!(split instanceof CombineFileSplit)) { throw new IllegalArgumentException( "split must be a CombineFileSplit"); } return new CombineFileRecordReader<NullWritable, BytesWritable>( (CombineFileSplit) split, context, MultipleFilesRecordReader.class); }