Java 类org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor 实例源码

项目:hadoop-oss    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  // don't use native libs
  ZlibFactory.setNativeZlibLoaded(false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:hadoop-oss    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  // don't use native libs
  ZlibFactory.setNativeZlibLoaded(false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hadoop    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:aliyun-oss-hadoop-fs    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  // don't use native libs
  ZlibFactory.setNativeZlibLoaded(false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:aliyun-oss-hadoop-fs    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  // don't use native libs
  ZlibFactory.setNativeZlibLoaded(false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:big-c    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:hadoop-plus    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:hops    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:hadoop-TCP    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:hadoop-on-lustre    文件:TestCodec.java   
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertTrue(java.util.Arrays.equals(b, dflchk));
}
项目:hardfs    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:hadoop-on-lustre2    文件:TestCodec.java   
@Test
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertArrayEquals(b, dflchk);
}
项目:hortonworks-extension    文件:TestCodec.java   
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertTrue(java.util.Arrays.equals(b, dflchk));
}
项目:hortonworks-extension    文件:TestCodec.java   
public void testGzipCompatibility() throws IOException {
  Random r = new Random();
  long seed = r.nextLong();
  r.setSeed(seed);
  LOG.info("seed: " + seed);

  DataOutputBuffer dflbuf = new DataOutputBuffer();
  GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
  byte[] b = new byte[r.nextInt(128 * 1024 + 1)];
  r.nextBytes(b);
  gzout.write(b);
  gzout.close();

  DataInputBuffer gzbuf = new DataInputBuffer();
  gzbuf.reset(dflbuf.getData(), dflbuf.getLength());

  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
  Decompressor decom = codec.createDecompressor();
  assertNotNull(decom);
  assertEquals(BuiltInGzipDecompressor.class, decom.getClass());
  InputStream gzin = codec.createInputStream(gzbuf, decom);

  dflbuf.reset();
  IOUtils.copyBytes(gzin, dflbuf, 4096);
  final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
  assertTrue(java.util.Arrays.equals(b, dflchk));
}
项目:hadoop-oss    文件:TestCodec.java   
@Test
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  ZlibFactory.setNativeZlibLoaded(false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hadoop    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hadoop    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestCodec.java   
@Test
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  ZlibFactory.setNativeZlibLoaded(false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:big-c    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:big-c    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hadoop-plus    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hadoop-plus    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hops    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hops    文件:TestCodec.java   
@Test
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hadoop-TCP    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hadoop-TCP    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hadoop-on-lustre    文件:TestCodec.java   
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hadoop-on-lustre    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
      ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);

  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
             codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hardfs    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hardfs    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hadoop-on-lustre2    文件:TestCodec.java   
@Test
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hadoop-on-lustre2    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
              ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
      codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hortonworks-extension    文件:TestCodec.java   
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hortonworks-extension    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
      ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);

  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
             codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}
项目:hortonworks-extension    文件:TestCodec.java   
public void testBuiltInGzipConcat() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  GzipConcatTest(conf, BuiltInGzipDecompressor.class);
}
项目:hortonworks-extension    文件:TestCodec.java   
public void testCodecPoolAndGzipDecompressor() {
  // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
  // Assert that this is the case.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  conf.setBoolean("hadoop.native.lib", false);
  assertFalse("ZlibFactory is using native libs against request",
      ZlibFactory.isNativeZlibLoaded(conf));

  // This should give us a BuiltInZlibInflater.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);

  // its createOutputStream() just wraps the existing stream in a
  // java.util.zip.GZIPOutputStream.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
  assertTrue("Codec for .gz file is not GzipCodec", 
             codec instanceof GzipCodec);

  // make sure we don't get a null decompressor
  Decompressor codecDecompressor = codec.createDecompressor();
  if (null == codecDecompressor) {
    fail("Got null codecDecompressor");
  }

  // Asking the CodecPool for a decompressor for GzipCodec
  // should not return null
  Decompressor poolDecompressor = CodecPool.getDecompressor(codec);
  if (null == poolDecompressor) {
    fail("Got null poolDecompressor");
  }
  // return a couple decompressors
  CodecPool.returnDecompressor(zlibDecompressor);
  CodecPool.returnDecompressor(poolDecompressor);
  Decompressor poolDecompressor2 = CodecPool.getDecompressor(codec);
  if (poolDecompressor.getClass() == BuiltInGzipDecompressor.class) {
    if (poolDecompressor == poolDecompressor2) {
      fail("Reused java gzip decompressor in pool");
    }
  } else {
    if (poolDecompressor != poolDecompressor2) {
      fail("Did not reuse native gzip decompressor in pool");
    }
  }
}