Java 类org.apache.hadoop.io.BoundedByteArrayOutputStream 实例源码

项目:hadoop    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:big-c    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:hadoop-plus    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:FlexMap    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:hadoop-TCP    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:hardfs    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:hadoop-on-lustre2    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(mapId, (long)size, primaryMapOutput);
  this.conf = conf;
  this.merger = merger;
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:hadoop-on-lustre2    文件:InMemoryLinkMapOutput.java   
public InMemoryLinkMapOutput(JobConf conf, TaskAttemptID mapId,
        MergeManagerImpl<K, V> merger, int size, CompressionCodec codec,
        boolean primaryMapOutput) {
    super(conf, mapId, merger, size, codec, primaryMapOutput);
    // super(mapId, (long) size, primaryMapOutput);
    this.conf = conf;
    this.merger = merger;
    this.codec = codec;
    byteStream = new BoundedByteArrayOutputStream(size);
    memory = byteStream.getBuffer();
    if (codec != null) {
        decompressor = CodecPool.getDecompressor(codec);
    } else {
        decompressor = null;
    }
}
项目:incubator-tez    文件:MapOutput.java   
MapOutput(InputAttemptIdentifier attemptIdentifier, MergeManager merger, int size, 
          boolean primaryMapOutput) {
  this.id = ID.incrementAndGet();
  this.attemptIdentifier = attemptIdentifier;
  this.merger = merger;

  type = Type.MEMORY;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();

  this.size = size;

  localFS = null;
  disk = null;
  outputPath = null;
  tmpOutputPath = null;

  this.primaryMapOutput = primaryMapOutput;
}
项目:mapreduce-fork    文件:MapOutput.java   
MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, int size, 
          boolean primaryMapOutput) {
  this.id = ID.incrementAndGet();
  this.mapId = mapId;
  this.merger = merger;

  type = Type.MEMORY;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();

  this.size = size;

  localFS = null;
  disk = null;
  outputPath = null;
  tmpOutputPath = null;

  this.primaryMapOutput = primaryMapOutput;
}
项目:tez    文件:TestTezMerger.java   
private List<TezMerger.Segment> createInMemorySegments(int segmentCount, int keysPerSegment)
    throws IOException {
  List<TezMerger.Segment> segmentList = Lists.newLinkedList();
  Random rnd = new Random();
  DataInputBuffer key = new DataInputBuffer();
  DataInputBuffer value = new DataInputBuffer();
  for (int i = 0; i < segmentCount; i++) {
    BoundedByteArrayOutputStream stream = new BoundedByteArrayOutputStream(10000);
    InMemoryWriter writer = new InMemoryWriter(stream);

    for (int j = 0; j < keysPerSegment; j++) {
      populateData(new IntWritable(rnd.nextInt()), new LongWritable(rnd.nextLong()), key, value);
      writer.append(key, value);
    }
    writer.close();
    InMemoryReader reader = new InMemoryReader(merger, null, stream.getBuffer(), 0, stream.getLimit());

    segmentList.add(new TezMerger.Segment(reader, null));
  }
  return segmentList;
}
项目:hadoop    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:aliyun-oss-hadoop-fs    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(conf, merger, mapId, (long)size, primaryMapOutput);
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:big-c    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:hadoop-plus    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:FlexMap    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:hops    文件:InMemoryMapOutput.java   
public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
                         MergeManagerImpl<K, V> merger,
                         int size, CompressionCodec codec,
                         boolean primaryMapOutput) {
  super(conf, merger, mapId, (long)size, primaryMapOutput);
  this.codec = codec;
  byteStream = new BoundedByteArrayOutputStream(size);
  memory = byteStream.getBuffer();
  if (codec != null) {
    decompressor = CodecPool.getDecompressor(codec);
  } else {
    decompressor = null;
  }
}
项目:hops    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:hadoop-TCP    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:hardfs    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:hadoop-on-lustre2    文件:TestMergeManager.java   
private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
  BoundedByteArrayOutputStream stream = output.getArrayStream();
  int count = stream.getLimit();
  for (int i=0; i < count; ++i) {
    stream.write(i);
  }
}
项目:incubator-tez    文件:TestIFile.java   
@Test
//Test InMemoryWriter
public void testInMemoryWriter() throws IOException {
  InMemoryWriter writer = null;
  BoundedByteArrayOutputStream bout = new BoundedByteArrayOutputStream(1024 * 1024);

  List<KVPair> data = KVDataGen.generateTestData(true, 0);

  //No RLE, No RepeatKeys, no compression
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, false, false, data, null);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //No RLE, RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, false, true, data, null);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //RLE, No RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, true, false, data, null);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //RLE, RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, true, true, data, null);
  readUsingInMemoryReader(bout.getBuffer(), data);
}
项目:tez    文件:TestIFile.java   
@Test(timeout = 5000)
//Test InMemoryWriter
public void testInMemoryWriter() throws IOException {
  InMemoryWriter writer = null;
  BoundedByteArrayOutputStream bout = new BoundedByteArrayOutputStream(1024 * 1024);

  List<KVPair> data = KVDataGen.generateTestData(true, 10);

  //No RLE, No RepeatKeys, no compression
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, false, data);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //No RLE, RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout);
  writeTestFileUsingDataBuffer(writer, true, data);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //RLE, No RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout, true);
  writeTestFileUsingDataBuffer(writer, false, data);
  readUsingInMemoryReader(bout.getBuffer(), data);

  //RLE, RepeatKeys, no compression
  bout.reset();
  writer = new InMemoryWriter(bout, true);
  writeTestFileUsingDataBuffer(writer, true, data);
  readUsingInMemoryReader(bout.getBuffer(), data);
}
项目:hadoop-oss    文件:TFile.java   
@Override
public void close() throws IOException {
  if (closed == true) {
    return;
  }

  try {
    ++errorCount;
    byte[] key = currentKeyBufferOS.getBuffer();
    int len = currentKeyBufferOS.size();
    /**
     * verify length.
     */
    if (expectedLength >= 0 && expectedLength != len) {
      throw new IOException("Incorrect key length: expected="
          + expectedLength + " actual=" + len);
    }

    Utils.writeVInt(blkAppender, len);
    blkAppender.write(key, 0, len);
    if (tfileIndex.getFirstKey() == null) {
      tfileIndex.setFirstKey(key, 0, len);
    }

    if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
      byte[] lastKey = lastKeyBufferOS.getBuffer();
      int lastLen = lastKeyBufferOS.size();
      if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
          lastLen) < 0) {
        throw new IOException("Keys are not added in sorted order");
      }
    }

    BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
    currentKeyBufferOS = lastKeyBufferOS;
    lastKeyBufferOS = tmp;
    --errorCount;
  } finally {
    closed = true;
    state = State.END_KEY;
  }
}
项目:hadoop    文件:InMemoryWriter.java   
public InMemoryWriter(BoundedByteArrayOutputStream arrayStream) {
  super(null);
  this.out = 
    new DataOutputStream(new IFileOutputStream(arrayStream));
}
项目:hadoop    文件:InMemoryMapOutput.java   
public BoundedByteArrayOutputStream getArrayStream() {
  return byteStream;
}
项目:hadoop    文件:TFile.java   
@Override
public void close() throws IOException {
  if (closed == true) {
    return;
  }

  try {
    ++errorCount;
    byte[] key = currentKeyBufferOS.getBuffer();
    int len = currentKeyBufferOS.size();
    /**
     * verify length.
     */
    if (expectedLength >= 0 && expectedLength != len) {
      throw new IOException("Incorrect key length: expected="
          + expectedLength + " actual=" + len);
    }

    Utils.writeVInt(blkAppender, len);
    blkAppender.write(key, 0, len);
    if (tfileIndex.getFirstKey() == null) {
      tfileIndex.setFirstKey(key, 0, len);
    }

    if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
      byte[] lastKey = lastKeyBufferOS.getBuffer();
      int lastLen = lastKeyBufferOS.size();
      if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
          lastLen) < 0) {
        throw new IOException("Keys are not added in sorted order");
      }
    }

    BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
    currentKeyBufferOS = lastKeyBufferOS;
    lastKeyBufferOS = tmp;
    --errorCount;
  } finally {
    closed = true;
    state = State.END_KEY;
  }
}
项目:aliyun-oss-hadoop-fs    文件:InMemoryWriter.java   
public InMemoryWriter(BoundedByteArrayOutputStream arrayStream) {
  super(null);
  this.out = 
    new DataOutputStream(new IFileOutputStream(arrayStream));
}
项目:aliyun-oss-hadoop-fs    文件:InMemoryMapOutput.java   
public BoundedByteArrayOutputStream getArrayStream() {
  return byteStream;
}
项目:aliyun-oss-hadoop-fs    文件:TFile.java   
@Override
public void close() throws IOException {
  if (closed == true) {
    return;
  }

  try {
    ++errorCount;
    byte[] key = currentKeyBufferOS.getBuffer();
    int len = currentKeyBufferOS.size();
    /**
     * verify length.
     */
    if (expectedLength >= 0 && expectedLength != len) {
      throw new IOException("Incorrect key length: expected="
          + expectedLength + " actual=" + len);
    }

    Utils.writeVInt(blkAppender, len);
    blkAppender.write(key, 0, len);
    if (tfileIndex.getFirstKey() == null) {
      tfileIndex.setFirstKey(key, 0, len);
    }

    if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
      byte[] lastKey = lastKeyBufferOS.getBuffer();
      int lastLen = lastKeyBufferOS.size();
      if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
          lastLen) < 0) {
        throw new IOException("Keys are not added in sorted order");
      }
    }

    BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
    currentKeyBufferOS = lastKeyBufferOS;
    lastKeyBufferOS = tmp;
    --errorCount;
  } finally {
    closed = true;
    state = State.END_KEY;
  }
}
项目:big-c    文件:InMemoryWriter.java   
public InMemoryWriter(BoundedByteArrayOutputStream arrayStream) {
  super(null);
  this.out = 
    new DataOutputStream(new IFileOutputStream(arrayStream));
}
项目:big-c    文件:InMemoryMapOutput.java   
public BoundedByteArrayOutputStream getArrayStream() {
  return byteStream;
}
项目:big-c    文件:TFile.java   
@Override
public void close() throws IOException {
  if (closed == true) {
    return;
  }

  try {
    ++errorCount;
    byte[] key = currentKeyBufferOS.getBuffer();
    int len = currentKeyBufferOS.size();
    /**
     * verify length.
     */
    if (expectedLength >= 0 && expectedLength != len) {
      throw new IOException("Incorrect key length: expected="
          + expectedLength + " actual=" + len);
    }

    Utils.writeVInt(blkAppender, len);
    blkAppender.write(key, 0, len);
    if (tfileIndex.getFirstKey() == null) {
      tfileIndex.setFirstKey(key, 0, len);
    }

    if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
      byte[] lastKey = lastKeyBufferOS.getBuffer();
      int lastLen = lastKeyBufferOS.size();
      if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
          lastLen) < 0) {
        throw new IOException("Keys are not added in sorted order");
      }
    }

    BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
    currentKeyBufferOS = lastKeyBufferOS;
    lastKeyBufferOS = tmp;
    --errorCount;
  } finally {
    closed = true;
    state = State.END_KEY;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:InMemoryWriter.java   
public InMemoryWriter(BoundedByteArrayOutputStream arrayStream) {
  super(null);
  this.out = 
    new DataOutputStream(new IFileOutputStream(arrayStream));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:InMemoryMapOutput.java   
public BoundedByteArrayOutputStream getArrayStream() {
  return byteStream;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TFile.java   
@Override
public void close() throws IOException {
  if (closed == true) {
    return;
  }

  try {
    ++errorCount;
    byte[] key = currentKeyBufferOS.getBuffer();
    int len = currentKeyBufferOS.size();
    /**
     * verify length.
     */
    if (expectedLength >= 0 && expectedLength != len) {
      throw new IOException("Incorrect key length: expected="
          + expectedLength + " actual=" + len);
    }

    Utils.writeVInt(blkAppender, len);
    blkAppender.write(key, 0, len);
    if (tfileIndex.getFirstKey() == null) {
      tfileIndex.setFirstKey(key, 0, len);
    }

    if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
      byte[] lastKey = lastKeyBufferOS.getBuffer();
      int lastLen = lastKeyBufferOS.size();
      if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
          lastLen) < 0) {
        throw new IOException("Keys are not added in sorted order");
      }
    }

    BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
    currentKeyBufferOS = lastKeyBufferOS;
    lastKeyBufferOS = tmp;
    --errorCount;
  } finally {
    closed = true;
    state = State.END_KEY;
  }
}
项目:apex-malhar    文件:DTFile.java   
@Override
public void close() throws IOException {
  if (closed == true) {
    return;
  }

  try {
    ++errorCount;
    byte[] key = currentKeyBufferOS.getBuffer();
    int len = currentKeyBufferOS.size();
    /**
     * verify length.
     */
    if (expectedLength >= 0 && expectedLength != len) {
      throw new IOException("Incorrect key length: expected="
          + expectedLength + " actual=" + len);
    }

    Utils.writeVInt(blkAppender, len);
    blkAppender.write(key, 0, len);
    if (tfileIndex.getFirstKey() == null) {
      tfileIndex.setFirstKey(key, 0, len);
    }

    if (tfileMeta.isSorted() && tfileMeta.getRecordCount()>0) {
      byte[] lastKey = lastKeyBufferOS.getBuffer();
      int lastLen = lastKeyBufferOS.size();
      if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
          lastLen) < 0) {
        throw new IOException("Keys are not added in sorted order");
      }
    }

    BoundedByteArrayOutputStream tmp = currentKeyBufferOS;
    currentKeyBufferOS = lastKeyBufferOS;
    lastKeyBufferOS = tmp;
    --errorCount;
  } finally {
    closed = true;
    state = State.END_KEY;
  }
}