Java 类org.apache.hadoop.mapreduce.lib.input.SplitLineReader 实例源码

项目:hadoop    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:hadoop    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:spark-util    文件:ErrorHandlingLineRecordReader.java   
public void initialize(InputSplit genericSplit, TaskAttemptContext context)  {
    try {
        FileSplit split = (FileSplit)genericSplit;
        Configuration job = context.getConfiguration();
        this.maxLineLength = job.getInt("mapreduce.input.linerecordreader.line.maxlength", 2147483647);
        this.start = split.getStart();
        this.end = this.start + split.getLength();
        Path file = split.getPath();
        FileSystem fs = file.getFileSystem(job);
        this.fileIn = fs.open(file);
        CompressionCodec codec = (new CompressionCodecFactory(job)).getCodec(file);
        if(null != codec) {
            this.isCompressedInput = true;
            this.decompressor = CodecPool.getDecompressor(codec);
            if(codec instanceof SplittableCompressionCodec) {
                SplitCompressionInputStream cIn = ((SplittableCompressionCodec)codec).createInputStream(this.fileIn, this.decompressor, this.start, this.end, SplittableCompressionCodec.READ_MODE.BYBLOCK);
                this.in = new CompressedSplitLineReader(cIn, job, this.recordDelimiterBytes);
                this.start = cIn.getAdjustedStart();
                this.end = cIn.getAdjustedEnd();
                this.filePosition = cIn;
            } else {
                this.in = new SplitLineReader(codec.createInputStream(this.fileIn, this.decompressor), job, this.recordDelimiterBytes);
                this.filePosition = this.fileIn;
            }
        } else {
            this.fileIn.seek(this.start);
            this.in = new SplitLineReader(this.fileIn, job, this.recordDelimiterBytes);
            this.filePosition = this.fileIn;
        }

        if(this.start != 0L) {
            this.start += (long)this.in.readLine(new Text(), 0, this.maxBytesToConsume(this.start));
        }

        this.pos = this.start;
    }catch(Exception ex){
        LOG.warn("Exception occurred during initialization {}", ex, ex);
    }

}
项目:aliyun-oss-hadoop-fs    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:aliyun-oss-hadoop-fs    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:big-c    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:big-c    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;
  this.filePosition = null;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset, 
    Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength",
                                  Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  this.filePosition = null;
}
项目:FlexMap    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:FlexMap    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:hops    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:hops    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:hadoop-on-lustre2    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
项目:hadoop-on-lustre2    文件:LineRecordReader.java   
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}