Java 类org.apache.hadoop.io.file.tfile.Compression.Algorithm 实例源码

项目:hadoop-oss    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hadoop-oss    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hadoop-oss    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:hadoop    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hadoop    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hadoop    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:aliyun-oss-hadoop-fs    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:aliyun-oss-hadoop-fs    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:aliyun-oss-hadoop-fs    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:big-c    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:big-c    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:big-c    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:hadoop-EAR    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hadoop-EAR    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hadoop-EAR    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:apex-malhar    文件:DTBCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(DTFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:apex-malhar    文件:DTBCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:apex-malhar    文件:DTBCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin, BlockRegion region, Configuration conf, Reader r) throws IOException
{
  this.compressAlgo = compressionAlgo;
  Decompressor decompressor = compressionAlgo.getDecompressor();
  this.region = region;
  try {

    InputStream in = compressAlgo.createDecompressionStream(new BoundedRangeFileInputStream(fsin, region.getOffset(), region.getCompressedSize()), decompressor, DTFile.getFSInputBufferSize(conf));
    int l = 1;
    r.baos.reset();
    byte[] buf = new byte[DTFile.getFSInputBufferSize(conf)];
    while (l >= 0) {
      l = in.read(buf);
      if (l > 0) {
        r.baos.write(buf, 0, l);
      }
    }
    // keep decompressed data into cache
    byte[] blockData = r.baos.toByteArray();
    rbain = new ReusableByteArrayInputStream(blockData);
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }

}
项目:hadoop-plus    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hadoop-plus    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hadoop-plus    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:hops    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hops    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hops    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:hadoop-TCP    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hadoop-TCP    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hadoop-TCP    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:hadoop-on-lustre    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hadoop-on-lustre    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hadoop-on-lustre    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:hardfs    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hardfs    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hardfs    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:hadoop-on-lustre2    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
项目:hadoop-on-lustre2    文件:BCFile.java   
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
项目:hadoop-on-lustre2    文件:BCFile.java   
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
项目:RDFS    文件:BCFile.java   
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}