Java 类org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel 实例源码

项目:hadoop-oss    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-oss    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:hadoop    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:aliyun-oss-hadoop-fs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:aliyun-oss-hadoop-fs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:big-c    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:big-c    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:hadoop-plus    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-plus    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:hops    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hops    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:hadoop-TCP    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-TCP    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:hardfs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hardfs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:hadoop-on-lustre2    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-on-lustre2    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibFactory() {
  Configuration cfg = new Configuration();

  assertTrue("testZlibFactory compression level error !!!",
      CompressionLevel.DEFAULT_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.DEFAULT_STRATEGY == ZlibFactory
          .getCompressionStrategy(cfg));

  ZlibFactory.setCompressionLevel(cfg, CompressionLevel.BEST_COMPRESSION);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionLevel.BEST_COMPRESSION == ZlibFactory
          .getCompressionLevel(cfg));

  ZlibFactory.setCompressionStrategy(cfg, CompressionStrategy.FILTERED);
  assertTrue("testZlibFactory compression strategy error !!!",
      CompressionStrategy.FILTERED == ZlibFactory.getCompressionStrategy(cfg));
}
项目:hadoop-oss    文件:TestCodec.java   
@Test
public void testGzipCodecWithParam() throws IOException {
  Configuration conf = new Configuration(this.conf);
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf, CompressionStrategy.HUFFMAN_ONLY);
  codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
  codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
项目:hadoop-oss    文件:TestCodec.java   
private static void gzipReinitTest(Configuration conf, CompressionCodec codec)
    throws IOException {
  // Add codec to cache
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf,
      CompressionStrategy.DEFAULT_STRATEGY);
  Compressor c1 = CodecPool.getCompressor(codec);
  CodecPool.returnCompressor(c1);
  // reset compressor's compression level to perform no compression
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  Compressor c2 = CodecPool.getCompressor(codec, conf);
  // ensure same compressor placed earlier
  assertTrue("Got mismatched ZlibCompressor", c1 == c2);
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c2);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
    CodecPool.returnCompressor(c2);
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration",
             outbytes.length >= b.length);
}
项目:hadoop-oss    文件:TestCodec.java   
private static void codecTestWithNOCompression (Configuration conf,
                    String codecClass) throws IOException {
  // Create a compressor with NO_COMPRESSION and make sure that
  // output is not compressed by comparing the size with the
  // original input

  CompressionCodec codec = null;
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  try {
    codec = (CompressionCodec)
      ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException("Illegal codec!");
  }
  Compressor c = codec.createCompressor();
  // ensure same compressor placed earlier
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration(NO_COMPRESSION)",
             outbytes.length >= b.length);
}
项目:hadoop-oss    文件:TestCompressionStreamReuse.java   
@Test
public void testGzipCompressStreamReuseWithParam() throws IOException {
  Configuration conf = new Configuration(this.conf);
  ZlibFactory
      .setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf,
      CompressionStrategy.HUFFMAN_ONLY);
  resetStateTest(conf, seed, count,
      "org.apache.hadoop.io.compress.GzipCodec");
}
项目:hadoop    文件:TestCodec.java   
@Test
public void testGzipCodecWithParam() throws IOException {
  Configuration conf = new Configuration(this.conf);
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf, CompressionStrategy.HUFFMAN_ONLY);
  codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
  codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
项目:hadoop    文件:TestCodec.java   
private static void gzipReinitTest(Configuration conf, CompressionCodec codec)
    throws IOException {
  // Add codec to cache
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf,
      CompressionStrategy.DEFAULT_STRATEGY);
  Compressor c1 = CodecPool.getCompressor(codec);
  CodecPool.returnCompressor(c1);
  // reset compressor's compression level to perform no compression
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  Compressor c2 = CodecPool.getCompressor(codec, conf);
  // ensure same compressor placed earlier
  assertTrue("Got mismatched ZlibCompressor", c1 == c2);
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c2);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
    CodecPool.returnCompressor(c2);
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration",
             outbytes.length >= b.length);
}
项目:hadoop    文件:TestCodec.java   
private static void codecTestWithNOCompression (Configuration conf,
                    String codecClass) throws IOException {
  // Create a compressor with NO_COMPRESSION and make sure that
  // output is not compressed by comparing the size with the
  // original input

  CompressionCodec codec = null;
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  try {
    codec = (CompressionCodec)
      ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException("Illegal codec!");
  }
  Compressor c = codec.createCompressor();
  // ensure same compressor placed earlier
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration(NO_COMPRESSION)",
             outbytes.length >= b.length);
}
项目:aliyun-oss-hadoop-fs    文件:TestCodec.java   
@Test
public void testGzipCodecWithParam() throws IOException {
  Configuration conf = new Configuration(this.conf);
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf, CompressionStrategy.HUFFMAN_ONLY);
  codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
  codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
项目:aliyun-oss-hadoop-fs    文件:TestCodec.java   
private static void gzipReinitTest(Configuration conf, CompressionCodec codec)
    throws IOException {
  // Add codec to cache
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf,
      CompressionStrategy.DEFAULT_STRATEGY);
  Compressor c1 = CodecPool.getCompressor(codec);
  CodecPool.returnCompressor(c1);
  // reset compressor's compression level to perform no compression
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  Compressor c2 = CodecPool.getCompressor(codec, conf);
  // ensure same compressor placed earlier
  assertTrue("Got mismatched ZlibCompressor", c1 == c2);
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c2);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
    CodecPool.returnCompressor(c2);
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration",
             outbytes.length >= b.length);
}
项目:aliyun-oss-hadoop-fs    文件:TestCodec.java   
private static void codecTestWithNOCompression (Configuration conf,
                    String codecClass) throws IOException {
  // Create a compressor with NO_COMPRESSION and make sure that
  // output is not compressed by comparing the size with the
  // original input

  CompressionCodec codec = null;
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  try {
    codec = (CompressionCodec)
      ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException("Illegal codec!");
  }
  Compressor c = codec.createCompressor();
  // ensure same compressor placed earlier
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration(NO_COMPRESSION)",
             outbytes.length >= b.length);
}
项目:aliyun-oss-hadoop-fs    文件:TestCompressionStreamReuse.java   
@Test
public void testGzipCompressStreamReuseWithParam() throws IOException {
  Configuration conf = new Configuration(this.conf);
  ZlibFactory
      .setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf,
      CompressionStrategy.HUFFMAN_ONLY);
  resetStateTest(conf, seed, count,
      "org.apache.hadoop.io.compress.GzipCodec");
}
项目:big-c    文件:TestCodec.java   
@Test
public void testGzipCodecWithParam() throws IOException {
  Configuration conf = new Configuration(this.conf);
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf, CompressionStrategy.HUFFMAN_ONLY);
  codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
  codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
项目:big-c    文件:TestCodec.java   
private static void gzipReinitTest(Configuration conf, CompressionCodec codec)
    throws IOException {
  // Add codec to cache
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf,
      CompressionStrategy.DEFAULT_STRATEGY);
  Compressor c1 = CodecPool.getCompressor(codec);
  CodecPool.returnCompressor(c1);
  // reset compressor's compression level to perform no compression
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  Compressor c2 = CodecPool.getCompressor(codec, conf);
  // ensure same compressor placed earlier
  assertTrue("Got mismatched ZlibCompressor", c1 == c2);
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c2);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
    CodecPool.returnCompressor(c2);
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration",
             outbytes.length >= b.length);
}
项目:big-c    文件:TestCodec.java   
private static void codecTestWithNOCompression (Configuration conf,
                    String codecClass) throws IOException {
  // Create a compressor with NO_COMPRESSION and make sure that
  // output is not compressed by comparing the size with the
  // original input

  CompressionCodec codec = null;
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  try {
    codec = (CompressionCodec)
      ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException("Illegal codec!");
  }
  Compressor c = codec.createCompressor();
  // ensure same compressor placed earlier
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration(NO_COMPRESSION)",
             outbytes.length >= b.length);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCodec.java   
@Test
public void testGzipCodecWithParam() throws IOException {
  Configuration conf = new Configuration(this.conf);
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf, CompressionStrategy.HUFFMAN_ONLY);
  codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
  codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCodec.java   
private static void gzipReinitTest(Configuration conf, CompressionCodec codec)
    throws IOException {
  // Add codec to cache
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf,
      CompressionStrategy.DEFAULT_STRATEGY);
  Compressor c1 = CodecPool.getCompressor(codec);
  CodecPool.returnCompressor(c1);
  // reset compressor's compression level to perform no compression
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  Compressor c2 = CodecPool.getCompressor(codec, conf);
  // ensure same compressor placed earlier
  assertTrue("Got mismatched ZlibCompressor", c1 == c2);
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c2);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
    CodecPool.returnCompressor(c2);
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration",
             outbytes.length >= b.length);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCodec.java   
private static void codecTestWithNOCompression (Configuration conf,
                    String codecClass) throws IOException {
  // Create a compressor with NO_COMPRESSION and make sure that
  // output is not compressed by comparing the size with the
  // original input

  CompressionCodec codec = null;
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  try {
    codec = (CompressionCodec)
      ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException("Illegal codec!");
  }
  Compressor c = codec.createCompressor();
  // ensure same compressor placed earlier
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration(NO_COMPRESSION)",
             outbytes.length >= b.length);
}
项目:hadoop-plus    文件:TestCodec.java   
@Test
public void testGzipCodecWithParam() throws IOException {
  Configuration conf = new Configuration(this.conf);
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf, CompressionStrategy.HUFFMAN_ONLY);
  codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.GzipCodec");
  codecTest(conf, seed, count, "org.apache.hadoop.io.compress.GzipCodec");
}
项目:hadoop-plus    文件:TestCodec.java   
private static void gzipReinitTest(Configuration conf, CompressionCodec codec)
    throws IOException {
  // Add codec to cache
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.BEST_COMPRESSION);
  ZlibFactory.setCompressionStrategy(conf,
      CompressionStrategy.DEFAULT_STRATEGY);
  Compressor c1 = CodecPool.getCompressor(codec);
  CodecPool.returnCompressor(c1);
  // reset compressor's compression level to perform no compression
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  Compressor c2 = CodecPool.getCompressor(codec, conf);
  // ensure same compressor placed earlier
  assertTrue("Got mismatched ZlibCompressor", c1 == c2);
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c2);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
    CodecPool.returnCompressor(c2);
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration",
             outbytes.length >= b.length);
}
项目:hadoop-plus    文件:TestCodec.java   
private static void codecTestWithNOCompression (Configuration conf,
                    String codecClass) throws IOException {
  // Create a compressor with NO_COMPRESSION and make sure that
  // output is not compressed by comparing the size with the
  // original input

  CompressionCodec codec = null;
  ZlibFactory.setCompressionLevel(conf, CompressionLevel.NO_COMPRESSION);
  try {
    codec = (CompressionCodec)
      ReflectionUtils.newInstance(conf.getClassByName(codecClass), conf);
  } catch (ClassNotFoundException cnfe) {
    throw new IOException("Illegal codec!");
  }
  Compressor c = codec.createCompressor();
  // ensure same compressor placed earlier
  ByteArrayOutputStream bos = new ByteArrayOutputStream();
  CompressionOutputStream cos = null;
  // write trivially compressable data
  byte[] b = new byte[1 << 15];
  Arrays.fill(b, (byte) 43);
  try {
    cos = codec.createOutputStream(bos, c);
    cos.write(b);
  } finally {
    if (cos != null) {
      cos.close();
    }
  }
  byte[] outbytes = bos.toByteArray();
  // verify data were not compressed
  assertTrue("Compressed bytes contrary to configuration(NO_COMPRESSION)",
             outbytes.length >= b.length);
}