Java 类org.apache.hadoop.io.compress.CompressDecompressTester.CompressionTestStrategy 实例源码

项目:hadoop-oss    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    GenericTestUtils.assertExceptionContains(
        "testCompressorDecompressor error !!!", ex);
  }
}
项目:hadoop-oss    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    GenericTestUtils.assertExceptionContains(
        "testCompressorDecompressorWithExeedBufferLimit error !!!", ex);
  }
}
项目:hadoop-oss    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-oss    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
项目:hadoop    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:aliyun-oss-hadoop-fs    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:big-c    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:big-c    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
项目:big-c    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:big-c    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-plus    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-plus    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
项目:hadoop-plus    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-plus    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hops    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    GenericTestUtils.assertExceptionContains(
        "testCompressorDecompressor error !!!", ex);
  }
}
项目:hops    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    GenericTestUtils.assertExceptionContains(
        "testCompressorDecompressorWithExeedBufferLimit error !!!", ex);
  }
}
项目:hops    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hops    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-TCP    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-TCP    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
项目:hadoop-TCP    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-TCP    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hardfs    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hardfs    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
项目:hardfs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hardfs    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}
项目:hadoop-on-lustre2    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;

  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-on-lustre2    文件:TestCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
项目:hadoop-on-lustre2    文件:TestZlibCompressorDecompressor.java   
@Test
public void testZlibCompressorDecompressor() {
  try {
    int SIZE = 44 * 1024;
    byte[] rawData = generate(SIZE);

    CompressDecompressTester.of(rawData)
      .withCompressDecompressPair(new ZlibCompressor(), new ZlibDecompressor())
      .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
         CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
       .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
项目:hadoop-on-lustre2    文件:TestZlibCompressorDecompressor.java   
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
    .withCompressDecompressPair(
      new ZlibCompressor(
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel.BEST_COMPRESSION,
          CompressionStrategy.DEFAULT_STRATEGY,
          org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE),
       new ZlibDecompressor(
          org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader.DEFAULT_HEADER,
          BYTE_SIZE))
       .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
          CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();
  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  } 
}