Java 类org.apache.hadoop.hbase.io.compress.Compression.Algorithm 实例源码

项目:ditb    文件:HBaseTestingUtility.java   
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[][] columnFamilies, Algorithm compression,
    DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
    Durability durability)
        throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setDurability(durability);
  desc.setRegionReplication(regionReplication);
  HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
  for (int i = 0; i < columnFamilies.length; i++) {
    HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
    hcd.setDataBlockEncoding(dataBlockEncoding);
    hcd.setCompressionType(compression);
    hcds[i] = hcd;
  }
  return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
}
项目:ditb    文件:TestHFileOutputFormat2.java   
/**
 * Test for {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat2.configureCompression(conf, table.getTableDescriptor());

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:ditb    文件:TestHFileOutputFormat.java   
/**
 * Test for {@link HFileOutputFormat#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:ditb    文件:TestHFileBlock.java   
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  hbw.ensureBlockReady();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}
项目:ditb    文件:TestHFileBlock.java   
static void assertBuffersEqual(ByteBuffer expectedBuffer,
    ByteBuffer actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismatch for %s, commonPrefix %d, expected %s, got %s",
        buildMessageDetails(compression, encoding, pread), prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
项目:ditb    文件:TestHFileDataBlockEncoder.java   
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0, 
       0, meta);
  return b;
}
项目:ditb    文件:TestPrefixTreeEncoding.java   
@Test
public void testSeekWithRandomData() throws Exception {
  PrefixTreeCodec encoder = new PrefixTreeCodec();
  ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
  DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
  int batchId = numBatchesWritten++;
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(false)
                      .withIncludesTags(includesTag)
                      .withCompression(Algorithm.NONE)
                      .build();
  HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
      DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
  generateRandomTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
  EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
      encoder.newDataBlockDecodingContext(meta));
  byte[] onDiskBytes = baosInMemory.toByteArray();
  ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
      onDiskBytes.length - DataBlockEncoding.ID_SIZE);
  verifySeeking(seeker, readBuffer, batchId);
}
项目:ditb    文件:TestPrefixTreeEncoding.java   
@Test
public void testSeekWithFixedData() throws Exception {
  PrefixTreeCodec encoder = new PrefixTreeCodec();
  int batchId = numBatchesWritten++;
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(false)
                      .withIncludesTags(includesTag)
                      .withCompression(Algorithm.NONE)
                      .build();
  HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
      DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
  ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
  DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
  generateFixedTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
  EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
      encoder.newDataBlockDecodingContext(meta));
  byte[] onDiskBytes = baosInMemory.toByteArray();
  ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
      onDiskBytes.length - DataBlockEncoding.ID_SIZE);
  verifySeeking(seeker, readBuffer, batchId);
}
项目:ditb    文件:EncodedDataBlock.java   
/**
 * Find the size of compressed data assuming that buffer will be compressed
 * using given algorithm.
 * @param algo compression algorithm
 * @param compressor compressor already requested from codec
 * @param inputBuffer Array to be compressed.
 * @param offset Offset to beginning of the data.
 * @param length Length to be compressed.
 * @return Size of compressed data in bytes.
 * @throws IOException
 */
public static int getCompressedSize(Algorithm algo, Compressor compressor,
    byte[] inputBuffer, int offset, int length) throws IOException {
  DataOutputStream compressedStream = new DataOutputStream(
      new IOUtils.NullOutputStream());
  if (compressor != null) {
    compressor.reset();
  }
  OutputStream compressingStream = null;

  try {
    compressingStream = algo.createCompressionStream(
        compressedStream, compressor, 0);

    compressingStream.write(inputBuffer, offset, length);
    compressingStream.flush();

    return compressedStream.size();
  } finally {
    if (compressingStream != null) compressingStream.close();
  }
}
项目:antsdb    文件:Helper.java   
public static void createTable(Connection conn, String namespace, String tableName, Algorithm compressionType) {
// Check whether table already exists
      if (Helper.existsTable(conn, namespace, tableName)) {
          Helper.dropTable(conn, namespace, tableName);
      }
        if (!Helper.existsTable(conn, namespace, tableName)) {

            // Create table
            try (Admin admin = conn.getAdmin()) {
              HTableDescriptor table = new HTableDescriptor(TableName.valueOf(namespace, tableName));
              table.addFamily(new HColumnDescriptor(SYS_COLUMN_FAMILY).setCompressionType(compressionType));
              table.addFamily(new HColumnDescriptor(DATA_COLUMN_FAMILY).setCompressionType(compressionType));
              _log.debug("creating table {}", table.toString());
              admin.createTable(table);
            } 
            catch (Exception ex) {
                throw new OrcaHBaseException(ex, "Failed to create table - " + tableName);
            }
        }
  }
项目:antsdb    文件:Helper.java   
public static void truncateTable(Connection connection, String namespace, String tableName) {

        try {

            TableName table = TableName.valueOf(namespace, tableName);

            // get compression type
            Table htable = connection.getTable(table);          
            HTableDescriptor tableDesc = htable.getTableDescriptor();
            HColumnDescriptor[] families = tableDesc.getColumnFamilies();
            Algorithm compressionType =  families[0].getCompression();

            // drop table
            dropTable(connection, namespace, tableName);

            // create table
            createTable(connection, namespace, tableName, compressionType);

        } catch (Exception ex) {
            throw new OrcaHBaseException("Failed to truncate table - " + tableName, ex);
        }
    }
项目:pbase    文件:TestHFileOutputFormat2.java   
/**
 * Test for {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat2.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:pbase    文件:TestHFileOutputFormat.java   
/**
 * Test for {@link HFileOutputFormat#configureCompression(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:pbase    文件:TestHFileBlock.java   
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  hbw.ensureBlockReady();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}
项目:pbase    文件:TestHFileBlock.java   
static void assertBuffersEqual(ByteBuffer expectedBuffer,
    ByteBuffer actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismatch for %s, commonPrefix %d, expected %s, got %s",
        buildMessageDetails(compression, encoding, pread), prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
项目:pbase    文件:TestHFileDataBlockEncoder.java   
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0, 
       0, meta);
  return b;
}
项目:pbase    文件:TestPrefixTreeEncoding.java   
@Test
public void testSeekWithRandomData() throws Exception {
  PrefixTreeCodec encoder = new PrefixTreeCodec();
  ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
  DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
  int batchId = numBatchesWritten++;
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(false)
                      .withIncludesTags(includesTag)
                      .withCompression(Algorithm.NONE)
                      .build();
  HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
      DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
  generateRandomTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
  EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
      encoder.newDataBlockDecodingContext(meta));
  byte[] onDiskBytes = baosInMemory.toByteArray();
  ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
      onDiskBytes.length - DataBlockEncoding.ID_SIZE);
  verifySeeking(seeker, readBuffer, batchId);
}
项目:pbase    文件:TestPrefixTreeEncoding.java   
@Test
public void testSeekWithFixedData() throws Exception {
  PrefixTreeCodec encoder = new PrefixTreeCodec();
  int batchId = numBatchesWritten++;
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(false)
                      .withIncludesTags(includesTag)
                      .withCompression(Algorithm.NONE)
                      .build();
  HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
      DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
  ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
  DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
  generateFixedTestData(kvset, batchId, includesTag, encoder, blkEncodingCtx, userDataStream);
  EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
      encoder.newDataBlockDecodingContext(meta));
  byte[] onDiskBytes = baosInMemory.toByteArray();
  ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
      onDiskBytes.length - DataBlockEncoding.ID_SIZE);
  verifySeeking(seeker, readBuffer, batchId);
}
项目:pbase    文件:EncodedDataBlock.java   
/**
 * Find the size of compressed data assuming that buffer will be compressed
 * using given algorithm.
 * @param algo compression algorithm
 * @param compressor compressor already requested from codec
 * @param inputBuffer Array to be compressed.
 * @param offset Offset to beginning of the data.
 * @param length Length to be compressed.
 * @return Size of compressed data in bytes.
 * @throws IOException
 */
public static int getCompressedSize(Algorithm algo, Compressor compressor,
    byte[] inputBuffer, int offset, int length) throws IOException {
  DataOutputStream compressedStream = new DataOutputStream(
      new IOUtils.NullOutputStream());
  if (compressor != null) {
    compressor.reset();
  }
  OutputStream compressingStream = null;

  try {
    compressingStream = algo.createCompressionStream(
        compressedStream, compressor, 0);

    compressingStream.write(inputBuffer, offset, length);
    compressingStream.flush();

    return compressedStream.size();
  } finally {
    if (compressingStream != null) compressingStream.close();
  }
}
项目:near-image-replica-detection    文件:HBaseUtils.java   
public static void createTable(HBaseAdmin admin, String tableName, String[] families,
        boolean dataBlockEncoding, boolean compression, int remaining) throws IOException {
    HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName));
    for (String f : families) {
        HColumnDescriptor colDesc = new HColumnDescriptor(f);
        // Compression
        if (compression) {
            colDesc.setCompressionType(Algorithm.SNAPPY);
        }

        // Alive time
        if (remaining > 0) {
            colDesc.setTimeToLive(remaining);
        }

        // Block encoding
        if (dataBlockEncoding) {
            colDesc.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
        }

        table.addFamily(colDesc);
    }
    admin.createTable(table);
}
项目:HIndex    文件:TestHFileOutputFormat2.java   
/**
 * Test for {@link HFileOutputFormat2#configureCompression(HTable,
 * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat2.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:HIndex    文件:TestHFileOutputFormat.java   
/**
 * Test for {@link HFileOutputFormat#configureCompression(HTable,
 * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, Compression.Algorithm> familyToCompression =
        getMockColumnFamiliesForCompression(numCfs);
    HTable table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForCompression(table, familyToCompression);
    HFileOutputFormat.configureCompression(table, conf);

    // read back family specific compression setting from the configuration
    Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat
        .createFamilyCompressionMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
      assertEquals("Compression configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
    }
  }
}
项目:HIndex    文件:TestHFileBlock.java   
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  byte[] headerAndData = hbw.getHeaderAndDataForTest();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}
项目:HIndex    文件:TestHFileBlock.java   
static void assertBuffersEqual(ByteBuffer expectedBuffer,
    ByteBuffer actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismath for compression %s, encoding %s, " +
        "pread %s, commonPrefix %d, expected %s, got %s",
        compression, encoding, pread, prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
项目:HIndex    文件:TestHFileDataBlockEncoder.java   
private HFileBlock getSampleHFileBlock(boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(
      generator.generateTestKeyValues(60, useTag), includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
      HFileBlock.FILL_HEADER, 0, 
       0, meta);
  return b;
}
项目:HIndex    文件:TestPrefixTreeEncoding.java   
@Test
public void testSeekWithRandomData() throws Exception {
  PrefixTreeCodec encoder = new PrefixTreeCodec();
  int batchId = numBatchesWritten++;
  ByteBuffer dataBuffer = generateRandomTestData(kvset, batchId, includesTag);
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(false)
                      .withIncludesTags(includesTag)
                      .withCompression(Algorithm.NONE)
                      .build();
  HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
      DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
  encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
  EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
      encoder.newDataBlockDecodingContext(meta));
  byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
  ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
      onDiskBytes.length - DataBlockEncoding.ID_SIZE);
  verifySeeking(seeker, readBuffer, batchId);
}
项目:HIndex    文件:TestPrefixTreeEncoding.java   
@Test
public void testSeekWithFixedData() throws Exception {
  PrefixTreeCodec encoder = new PrefixTreeCodec();
  int batchId = numBatchesWritten++;
  ByteBuffer dataBuffer = generateFixedTestData(kvset, batchId, includesTag);
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(false)
                      .withIncludesTags(includesTag)
                      .withCompression(Algorithm.NONE)
                      .build();
  HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(
      DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
  encoder.encodeKeyValues(dataBuffer, blkEncodingCtx);
  EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR,
      encoder.newDataBlockDecodingContext(meta));
  byte[] onDiskBytes = blkEncodingCtx.getOnDiskBytesWithHeader();
  ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE,
      onDiskBytes.length - DataBlockEncoding.ID_SIZE);
  verifySeeking(seeker, readBuffer, batchId);
}
项目:HIndex    文件:EncodedDataBlock.java   
/**
 * Find the size of compressed data assuming that buffer will be compressed
 * using given algorithm.
 * @param algo compression algorithm
 * @param compressor compressor already requested from codec
 * @param inputBuffer Array to be compressed.
 * @param offset Offset to beginning of the data.
 * @param length Length to be compressed.
 * @return Size of compressed data in bytes.
 * @throws IOException
 */
public static int getCompressedSize(Algorithm algo, Compressor compressor,
    byte[] inputBuffer, int offset, int length) throws IOException {
  DataOutputStream compressedStream = new DataOutputStream(
      new IOUtils.NullOutputStream());
  if (compressor != null) {
    compressor.reset();
  }
  OutputStream compressingStream = null;

  try {
    compressingStream = algo.createCompressionStream(
        compressedStream, compressor, 0);

    compressingStream.write(inputBuffer, offset, length);
    compressingStream.flush();

    return compressedStream.size();
  } finally {
    if (compressingStream != null) compressingStream.close();
  }
}
项目:hbase    文件:HBaseTestingUtility.java   
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[][] columnFamilies, Algorithm compression,
    DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
    Durability durability)
        throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.setDurability(durability);
  desc.setRegionReplication(regionReplication);
  HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
  for (int i = 0; i < columnFamilies.length; i++) {
    HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
    hcd.setDataBlockEncoding(dataBlockEncoding);
    hcd.setCompressionType(compression);
    hcds[i] = hcd;
  }
  return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
}
项目:hbase    文件:TestHFileBlock.java   
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  hbw.ensureBlockReady();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}