Java 类org.apache.hadoop.io.compress.CompressDecompressTester 实例源码

项目:hadoop-oss    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-oss    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:aliyun-oss-hadoop-fs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:big-c    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:big-c    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-plus    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-plus    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hops    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hops    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-TCP    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-TCP    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hardfs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hardfs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-on-lustre2    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-on-lustre2    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}