Java 类org.apache.hadoop.hbase.io.hfile.HFileBlock 实例源码

项目:LCIndex-HBase-0.94.16    文件:HFileDataBlockEncoderImpl.java   
@Override
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
  if (block.getBlockType() == BlockType.DATA) {
    if (!useEncodedScanner(isCompaction)) {
      // Unencoded block, and we don't want to encode in cache.
      return block;
    }
    // Encode the unencoded block with the in-cache encoding.
    return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS());
  }

  if (block.getBlockType() == BlockType.ENCODED_DATA) {
    if (block.getDataBlockEncodingId() == onDisk.getId()) {
      // The block is already in the desired in-cache encoding.
      return block;
    }
    // We don't want to re-encode a block in a different encoding. The HFile
    // reader should have been instantiated in such a way that we would not
    // have to do this.
    throw new AssertionError("Expected on-disk data block encoding " +
        onDisk + ", got " + block.getDataBlockEncoding());
  }
  return block;
}
项目:LCIndex-HBase-0.94.16    文件:HFileDataBlockEncoderImpl.java   
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS) {
  ByteBuffer compressedBuffer = encodeBufferToHFileBlockBuffer(
      block.getBufferWithoutHeader(), algo, includesMemstoreTS,
      block.getDummyHeaderForVersion());
  int sizeWithoutHeader = compressedBuffer.limit() - block.headerSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      compressedBuffer, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  block.passSchemaMetricsTo(encodedBlock);
  return encodedBlock;
}
项目:IRIndex    文件:HFileDataBlockEncoderImpl.java   
@Override
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
  if (block.getBlockType() == BlockType.DATA) {
    if (!useEncodedScanner(isCompaction)) {
      // Unencoded block, and we don't want to encode in cache.
      return block;
    }
    // Encode the unencoded block with the in-cache encoding.
    return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS());
  }

  if (block.getBlockType() == BlockType.ENCODED_DATA) {
    if (block.getDataBlockEncodingId() == onDisk.getId()) {
      // The block is already in the desired in-cache encoding.
      return block;
    }
    // We don't want to re-encode a block in a different encoding. The HFile
    // reader should have been instantiated in such a way that we would not
    // have to do this.
    throw new AssertionError("Expected on-disk data block encoding " +
        onDisk + ", got " + block.getDataBlockEncoding());
  }
  return block;
}
项目:IRIndex    文件:HFileDataBlockEncoderImpl.java   
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS) {
  ByteBuffer compressedBuffer = encodeBufferToHFileBlockBuffer(
      block.getBufferWithoutHeader(), algo, includesMemstoreTS,
      block.getDummyHeaderForVersion());
  int sizeWithoutHeader = compressedBuffer.limit() - block.headerSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      compressedBuffer, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  block.passSchemaMetricsTo(encodedBlock);
  return encodedBlock;
}
项目:bigbase    文件:CacheableSerializer.java   
@SuppressWarnings("unchecked")
public static void setHFileDeserializer()
{
       Field field = getProtectedField(HFileBlock.class, "blockDeserializer");

       if (field == null){
           LOG.error("Could not get access to HFileBlock.blockDeserializer");
        return;
       }

       try
       {
        CacheableDeserializer<Cacheable> serde = (CacheableDeserializer<Cacheable>) field.get(null);
        if(serde != null){
            deserializer.set(serde);
        } else{
            LOG.warn("HFileBlock.blockDeserializer is null");
        }
       }
       catch (Exception e)
       {
           LOG.warn("unable to read HFileBlock.blockDeserializer");
       }

}
项目:HBase-Research    文件:HFileDataBlockEncoderImpl.java   
@Override
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
  if (block.getBlockType() == BlockType.DATA) {
    if (!useEncodedScanner(isCompaction)) {
      // Unencoded block, and we don't want to encode in cache.
      return block;
    }
    // Encode the unencoded block with the in-cache encoding.
    return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS());
  }

  if (block.getBlockType() == BlockType.ENCODED_DATA) {
    if (block.getDataBlockEncodingId() == onDisk.getId()) {
      // The block is already in the desired in-cache encoding.
      return block;
    }
    // We don't want to re-encode a block in a different encoding. The HFile
    // reader should have been instantiated in such a way that we would not
    // have to do this.
    throw new AssertionError("Expected on-disk data block encoding " +
        onDisk + ", got " + block.getDataBlockEncoding());
  }
  return block;
}
项目:HBase-Research    文件:HFileDataBlockEncoderImpl.java   
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS) {
  ByteBuffer compressedBuffer = encodeBufferToHFileBlockBuffer(
      block.getBufferWithoutHeader(), algo, includesMemstoreTS,
      block.getDummyHeaderForVersion());
  int sizeWithoutHeader = compressedBuffer.limit() - block.headerSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      compressedBuffer, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  block.passSchemaMetricsTo(encodedBlock);
  return encodedBlock;
}
项目:hbase-0.94.8-qod    文件:HFileDataBlockEncoderImpl.java   
@Override
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
  if (block.getBlockType() == BlockType.DATA) {
    if (!useEncodedScanner(isCompaction)) {
      // Unencoded block, and we don't want to encode in cache.
      return block;
    }
    // Encode the unencoded block with the in-cache encoding.
    return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS());
  }

  if (block.getBlockType() == BlockType.ENCODED_DATA) {
    if (block.getDataBlockEncodingId() == onDisk.getId()) {
      // The block is already in the desired in-cache encoding.
      return block;
    }
    // We don't want to re-encode a block in a different encoding. The HFile
    // reader should have been instantiated in such a way that we would not
    // have to do this.
    throw new AssertionError("Expected on-disk data block encoding " +
        onDisk + ", got " + block.getDataBlockEncoding());
  }
  return block;
}
项目:hbase-0.94.8-qod    文件:HFileDataBlockEncoderImpl.java   
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS) {
  ByteBuffer compressedBuffer = encodeBufferToHFileBlockBuffer(
      block.getBufferWithoutHeader(), algo, includesMemstoreTS,
      block.getDummyHeaderForVersion());
  int sizeWithoutHeader = compressedBuffer.limit() - block.headerSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      compressedBuffer, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  block.passSchemaMetricsTo(encodedBlock);
  return encodedBlock;
}
项目:hbase-0.94.8-qod    文件:HFileDataBlockEncoderImpl.java   
@Override
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
  if (block.getBlockType() == BlockType.DATA) {
    if (!useEncodedScanner(isCompaction)) {
      // Unencoded block, and we don't want to encode in cache.
      return block;
    }
    // Encode the unencoded block with the in-cache encoding.
    return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS());
  }

  if (block.getBlockType() == BlockType.ENCODED_DATA) {
    if (block.getDataBlockEncodingId() == onDisk.getId()) {
      // The block is already in the desired in-cache encoding.
      return block;
    }
    // We don't want to re-encode a block in a different encoding. The HFile
    // reader should have been instantiated in such a way that we would not
    // have to do this.
    throw new AssertionError("Expected on-disk data block encoding " +
        onDisk + ", got " + block.getDataBlockEncoding());
  }
  return block;
}
项目:hbase-0.94.8-qod    文件:HFileDataBlockEncoderImpl.java   
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS) {
  ByteBuffer compressedBuffer = encodeBufferToHFileBlockBuffer(
      block.getBufferWithoutHeader(), algo, includesMemstoreTS,
      block.getDummyHeaderForVersion());
  int sizeWithoutHeader = compressedBuffer.limit() - block.headerSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      compressedBuffer, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  block.passSchemaMetricsTo(encodedBlock);
  return encodedBlock;
}
项目:hindex    文件:HFileDataBlockEncoderImpl.java   
@Override
public HFileBlock diskToCacheFormat(HFileBlock block, boolean isCompaction) {
  if (block.getBlockType() == BlockType.DATA) {
    if (!useEncodedScanner(isCompaction)) {
      // Unencoded block, and we don't want to encode in cache.
      return block;
    }
    // Encode the unencoded block with the in-cache encoding.
    return encodeDataBlock(block, inCache, block.doesIncludeMemstoreTS());
  }

  if (block.getBlockType() == BlockType.ENCODED_DATA) {
    if (block.getDataBlockEncodingId() == onDisk.getId()) {
      // The block is already in the desired in-cache encoding.
      return block;
    }
    // We don't want to re-encode a block in a different encoding. The HFile
    // reader should have been instantiated in such a way that we would not
    // have to do this.
    throw new AssertionError("Expected on-disk data block encoding " +
        onDisk + ", got " + block.getDataBlockEncoding());
  }
  return block;
}
项目:hindex    文件:HFileDataBlockEncoderImpl.java   
private HFileBlock encodeDataBlock(HFileBlock block,
    DataBlockEncoding algo, boolean includesMemstoreTS) {
  ByteBuffer compressedBuffer = encodeBufferToHFileBlockBuffer(
      block.getBufferWithoutHeader(), algo, includesMemstoreTS,
      block.getDummyHeaderForVersion());
  int sizeWithoutHeader = compressedBuffer.limit() - block.headerSize();
  HFileBlock encodedBlock = new HFileBlock(BlockType.ENCODED_DATA,
      block.getOnDiskSizeWithoutHeader(),
      sizeWithoutHeader, block.getPrevBlockOffset(),
      compressedBuffer, HFileBlock.FILL_HEADER, block.getOffset(),
      includesMemstoreTS, block.getMinorVersion(),
      block.getBytesPerChecksum(), block.getChecksumType(),
      block.getOnDiskDataSizeWithHeader());
  block.passSchemaMetricsTo(encodedBlock);
  return encodedBlock;
}
项目:ditb    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset,
      keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK, null);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf, bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:LCIndex-HBase-0.94.16    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:pbase    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset,
      keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK, null);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf, bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:HIndex    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:IRIndex    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:hbase    文件:BucketCache.java   
public BucketEntry writeToCache(final IOEngine ioEngine,
    final BucketAllocator bucketAllocator,
    final UniqueIndexMap<Integer> deserialiserMap,
    final LongAdder realCacheSize) throws CacheFullException, IOException,
    BucketAllocatorException {
  int len = data.getSerializedLength();
  // This cacheable thing can't be serialized
  if (len == 0) return null;
  long offset = bucketAllocator.allocateBlock(len);
  BucketEntry bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory);
  bucketEntry.setDeserialiserReference(data.getDeserializer(), deserialiserMap);
  try {
    if (data instanceof HFileBlock) {
      // If an instance of HFileBlock, save on some allocations.
      HFileBlock block = (HFileBlock)data;
      ByteBuff sliceBuf = block.getBufferReadOnly();
      ByteBuffer metadata = block.getMetaData();
      if (LOG.isTraceEnabled()) {
        LOG.trace("Write offset=" + offset + ", len=" + len);
      }
      ioEngine.write(sliceBuf, offset);
      ioEngine.write(metadata, offset + len - metadata.limit());
    } else {
      ByteBuffer bb = ByteBuffer.allocate(len);
      data.serialize(bb);
      ioEngine.write(bb, offset);
    }
  } catch (IOException ioe) {
    // free it in bucket allocator
    bucketAllocator.freeBlock(offset);
    throw ioe;
  }

  realCacheSize.add(len);
  return bucketEntry;
}
项目:hbase    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
  sf.initReader();
  HFile.Reader reader = sf.getReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, -1, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:RStore    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + HFileBlock.HEADER_SIZE,
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:PyroDB    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset,
      keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false, true,
          BlockType.BLOOM_CHUNK, null);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:PyroDB    文件:PFileReader.java   
@Override
protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) {
  ByteBuffer buffer = curBlock.getBufferWithoutHeader();
  buffer.rewind();
  byte pNum = buffer.get();
  //LOG.info("Shen Li: PFileScanner.getFirstKeyInBlock called, pNum is " + pNum);
  buffer.position(buffer.position() + 
      (pNum + 1) * PKeyValue.POINTER_SIZE);
  int klen = buffer.getInt();
  buffer.getInt();
  ByteBuffer keyBuff = buffer.slice();
  keyBuff.limit(klen);
  keyBuff.rewind();
  return keyBuff;
}
项目:c5    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:HBase-Research    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:hbase-0.94.8-qod    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:hbase-0.94.8-qod    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:DominoHBase    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:DominoHBase    文件:EncodedDataBlock.java   
/**
 * Create a buffer which will be encoded using dataBlockEncoder.
 * @param dataBlockEncoder Algorithm used for compression.
 * @param encoding encoding type used
 * @param rawKVs
 */
public EncodedDataBlock(DataBlockEncoder dataBlockEncoder,
    boolean includesMemstoreTS, DataBlockEncoding encoding, byte[] rawKVs) {
  Preconditions.checkNotNull(encoding,
      "Cannot create encoded data block with null encoder");
  this.dataBlockEncoder = dataBlockEncoder;
  encodingCtx =
      dataBlockEncoder.newDataBlockEncodingContext(Compression.Algorithm.NONE,
          encoding, HFileBlock.DUMMY_HEADER);
  this.rawKVs = rawKVs;
}
项目:DominoHBase    文件:TestDataBlockEncoders.java   
private HFileBlockEncodingContext getEncodingContext(
    Compression.Algorithm algo, DataBlockEncoding encoding) {
  DataBlockEncoder encoder = encoding.getEncoder();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(algo, encoding,
        HFileBlock.DUMMY_HEADER);
  } else {
    return new HFileBlockDefaultEncodingContext(algo, encoding, HFileBlock.DUMMY_HEADER);
  }
}
项目:spliceengine    文件:HRegionUtil.java   
private static int addStoreFileCutpoints(List<byte[]> cutpoints, HFile.Reader fileReader, long storeFileInBytes, int carry, Pair<byte[], byte[]> range, int splitBlockSize) throws IOException {
    HFileBlockIndex.BlockIndexReader indexReader = fileReader.getDataBlockIndexReader();
    int size = indexReader.getRootBlockCount();
    int levels = fileReader.getTrailer().getNumDataIndexLevels();
    if (levels == 1) {
        int incrementalSize = (int) (size > 0 ? storeFileInBytes / (float) size : storeFileInBytes);
        int sizeCounter = 0;
        for (int i = 0; i < size; ++i) {
            if (sizeCounter >= splitBlockSize) {
                sizeCounter = 0;
                KeyValue tentative = KeyValue.createKeyValueFromKey(indexReader.getRootBlockKey(i));
                if (CellUtils.isKeyValueInRange(tentative, range)) {
                    cutpoints.add(tentative.getRow());
                }
            }
            sizeCounter += incrementalSize;
        }
     return sizeCounter;
    } else {
        for (int i = 0; i < size; ++i) {
            HFileBlock block = fileReader.readBlock(
                    indexReader.getRootBlockOffset(i),
                    indexReader.getRootBlockDataSize(i),
                    true, true, false, true,
                    levels == 2 ? BlockType.LEAF_INDEX : BlockType.INTERMEDIATE_INDEX,
                    fileReader.getDataBlockEncoding());
            carry = addIndexCutpoints(fileReader, block.getBufferWithoutHeader(), levels - 1,  cutpoints, storeFileInBytes / size, carry, range, splitBlockSize);
        }
     return carry;
    }
}
项目:hindex    文件:CompoundBloomFilter.java   
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
    ByteBuffer bloom) {
  // We try to store the result in this variable so we can update stats for
  // testing, but when an error happens, we log a message and return.
  boolean result;

  int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
  if (block < 0) {
    result = false; // This key is not in the file.
  } else {
    HFileBlock bloomBlock;
    try {
      // We cache the block and use a positional read.
      bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
          index.getRootBlockDataSize(block), true, true, false,
          BlockType.BLOOM_CHUNK);
    } catch (IOException ex) {
      // The Bloom filter is broken, turn it off.
      throw new IllegalArgumentException(
          "Failed to load Bloom block for key "
              + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
    }

    ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
    result = ByteBloomFilter.contains(key, keyOffset, keyLength,
        bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
        bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
  }

  if (numQueriesPerChunk != null && block >= 0) {
    // Update statistics. Only used in unit tests.
    ++numQueriesPerChunk[block];
    if (result)
      ++numPositivesPerChunk[block];
  }

  return result;
}
项目:ditb    文件:DataBlockEncodingTool.java   
/**
 * Verify if all data block encoders are working properly.
 *
 * @param scanner Of file which was compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @return true if all data block encoders compressed/decompressed correctly.
 * @throws IOException thrown if scanner is invalid
 */
public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  KeyValue currentKv;

  scanner.seek(KeyValue.LOWESTKEY);
  List<Iterator<Cell>> codecIterators =
      new ArrayList<Iterator<Cell>>();
  for(EncodedDataBlock codec : codecs) {
    codecIterators.add(codec.getIterator(HFileBlock.headerSize(useHBaseChecksum)));
  }

  int j = 0;
  while ((currentKv = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
    // Iterates through key/value pairs
    ++j;
    for (Iterator<Cell> it : codecIterators) {
      Cell c = it.next();
      KeyValue codecKv = KeyValueUtil.ensureKeyValue(c);
      if (codecKv == null || 0 != Bytes.compareTo(
          codecKv.getBuffer(), codecKv.getOffset(), codecKv.getLength(),
          currentKv.getBuffer(), currentKv.getOffset(),
          currentKv.getLength())) {
        if (codecKv == null) {
          LOG.error("There is a bug in codec " + it +
              " it returned null KeyValue,");
        } else {
          int prefix = 0;
          int limitLength = 2 * Bytes.SIZEOF_INT +
              Math.min(codecKv.getLength(), currentKv.getLength());
          while (prefix < limitLength &&
              codecKv.getBuffer()[prefix + codecKv.getOffset()] ==
              currentKv.getBuffer()[prefix + currentKv.getOffset()]) {
            prefix++;
          }

          LOG.error("There is bug in codec " + it.toString() +
              "\n on element " + j +
              "\n codecKv.getKeyLength() " + codecKv.getKeyLength() +
              "\n codecKv.getValueLength() " + codecKv.getValueLength() +
              "\n codecKv.getLength() " + codecKv.getLength() +
              "\n currentKv.getKeyLength() " + currentKv.getKeyLength() +
              "\n currentKv.getValueLength() " + currentKv.getValueLength() +
              "\n codecKv.getLength() " + currentKv.getLength() +
              "\n currentKV rowLength " + currentKv.getRowLength() +
              " familyName " + currentKv.getFamilyLength() +
              " qualifier " + currentKv.getQualifierLength() +
              "\n prefix " + prefix +
              "\n codecKv   '" + Bytes.toStringBinary(codecKv.getBuffer(),
                  codecKv.getOffset(), prefix) + "' diff '" +
                  Bytes.toStringBinary(codecKv.getBuffer(),
                      codecKv.getOffset() + prefix, codecKv.getLength() -
                      prefix) + "'" +
              "\n currentKv '" + Bytes.toStringBinary(
                 currentKv.getBuffer(),
                 currentKv.getOffset(), prefix) + "' diff '" +
                 Bytes.toStringBinary(currentKv.getBuffer(),
                     currentKv.getOffset() + prefix, currentKv.getLength() -
                     prefix) + "'"
              );
        }
        return false;
      }
    }
  }

  LOG.info("Verification was successful!");

  return true;
}
项目:ditb    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:LCIndex-HBase-0.94.16    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:pbase    文件:DataBlockEncodingTool.java   
/**
 * Verify if all data block encoders are working properly.
 *
 * @param scanner Of file which was compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @return true if all data block encoders compressed/decompressed correctly.
 * @throws IOException thrown if scanner is invalid
 */
public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  KeyValue currentKv;

  scanner.seek(KeyValue.LOWESTKEY);
  List<Iterator<Cell>> codecIterators =
      new ArrayList<Iterator<Cell>>();
  for(EncodedDataBlock codec : codecs) {
    codecIterators.add(codec.getIterator(HFileBlock.headerSize(useHBaseChecksum)));
  }

  int j = 0;
  while ((currentKv = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
    // Iterates through key/value pairs
    ++j;
    for (Iterator<Cell> it : codecIterators) {
      Cell c = it.next();
      KeyValue codecKv = KeyValueUtil.ensureKeyValue(c);
      if (codecKv == null || 0 != Bytes.compareTo(
          codecKv.getBuffer(), codecKv.getOffset(), codecKv.getLength(),
          currentKv.getBuffer(), currentKv.getOffset(),
          currentKv.getLength())) {
        if (codecKv == null) {
          LOG.error("There is a bug in codec " + it +
              " it returned null KeyValue,");
        } else {
          int prefix = 0;
          int limitLength = 2 * Bytes.SIZEOF_INT +
              Math.min(codecKv.getLength(), currentKv.getLength());
          while (prefix < limitLength &&
              codecKv.getBuffer()[prefix + codecKv.getOffset()] ==
              currentKv.getBuffer()[prefix + currentKv.getOffset()]) {
            prefix++;
          }

          LOG.error("There is bug in codec " + it.toString() +
              "\n on element " + j +
              "\n codecKv.getKeyLength() " + codecKv.getKeyLength() +
              "\n codecKv.getValueLength() " + codecKv.getValueLength() +
              "\n codecKv.getLength() " + codecKv.getLength() +
              "\n currentKv.getKeyLength() " + currentKv.getKeyLength() +
              "\n currentKv.getValueLength() " + currentKv.getValueLength() +
              "\n codecKv.getLength() " + currentKv.getLength() +
              "\n currentKV rowLength " + currentKv.getRowLength() +
              " familyName " + currentKv.getFamilyLength() +
              " qualifier " + currentKv.getQualifierLength() +
              "\n prefix " + prefix +
              "\n codecKv   '" + Bytes.toStringBinary(codecKv.getBuffer(),
                  codecKv.getOffset(), prefix) + "' diff '" +
                  Bytes.toStringBinary(codecKv.getBuffer(),
                      codecKv.getOffset() + prefix, codecKv.getLength() -
                      prefix) + "'" +
              "\n currentKv '" + Bytes.toStringBinary(
                 currentKv.getBuffer(),
                 currentKv.getOffset(), prefix) + "' diff '" +
                 Bytes.toStringBinary(currentKv.getBuffer(),
                     currentKv.getOffset() + prefix, currentKv.getLength() -
                     prefix) + "'"
              );
        }
        return false;
      }
    }
  }

  LOG.info("Verification was successful!");

  return true;
}
项目:pbase    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:HIndex    文件:DataBlockEncodingTool.java   
/**
 * Verify if all data block encoders are working properly.
 *
 * @param scanner Of file which was compressed.
 * @param kvLimit Maximal count of KeyValue which will be processed.
 * @return true if all data block encoders compressed/decompressed correctly.
 * @throws IOException thrown if scanner is invalid
 */
public boolean verifyCodecs(final KeyValueScanner scanner, final int kvLimit)
    throws IOException {
  KeyValue currentKv;

  scanner.seek(KeyValue.LOWESTKEY);
  List<Iterator<Cell>> codecIterators =
      new ArrayList<Iterator<Cell>>();
  for(EncodedDataBlock codec : codecs) {
    codecIterators.add(codec.getIterator(HFileBlock.headerSize(useHBaseChecksum)));
  }

  int j = 0;
  while ((currentKv = scanner.next()) != null && j < kvLimit) {
    // Iterates through key/value pairs
    ++j;
    for (Iterator<Cell> it : codecIterators) {
      Cell c = it.next();
      KeyValue codecKv = KeyValueUtil.ensureKeyValue(c);
      if (codecKv == null || 0 != Bytes.compareTo(
          codecKv.getBuffer(), codecKv.getOffset(), codecKv.getLength(),
          currentKv.getBuffer(), currentKv.getOffset(),
          currentKv.getLength())) {
        if (codecKv == null) {
          LOG.error("There is a bug in codec " + it +
              " it returned null KeyValue,");
        } else {
          int prefix = 0;
          int limitLength = 2 * Bytes.SIZEOF_INT +
              Math.min(codecKv.getLength(), currentKv.getLength());
          while (prefix < limitLength &&
              codecKv.getBuffer()[prefix + codecKv.getOffset()] ==
              currentKv.getBuffer()[prefix + currentKv.getOffset()]) {
            prefix++;
          }

          LOG.error("There is bug in codec " + it.toString() +
              "\n on element " + j +
              "\n codecKv.getKeyLength() " + codecKv.getKeyLength() +
              "\n codecKv.getValueLength() " + codecKv.getValueLength() +
              "\n codecKv.getLength() " + codecKv.getLength() +
              "\n currentKv.getKeyLength() " + currentKv.getKeyLength() +
              "\n currentKv.getValueLength() " + currentKv.getValueLength() +
              "\n codecKv.getLength() " + currentKv.getLength() +
              "\n currentKV rowLength " + currentKv.getRowLength() +
              " familyName " + currentKv.getFamilyLength() +
              " qualifier " + currentKv.getQualifierLength() +
              "\n prefix " + prefix +
              "\n codecKv   '" + Bytes.toStringBinary(codecKv.getBuffer(),
                  codecKv.getOffset(), prefix) + "' diff '" +
                  Bytes.toStringBinary(codecKv.getBuffer(),
                      codecKv.getOffset() + prefix, codecKv.getLength() -
                      prefix) + "'" +
              "\n currentKv '" + Bytes.toStringBinary(
                 currentKv.getBuffer(),
                 currentKv.getOffset(), prefix) + "' diff '" +
                 Bytes.toStringBinary(currentKv.getBuffer(),
                     currentKv.getOffset() + prefix, currentKv.getLength() -
                     prefix) + "'"
              );
        }
        return false;
      }
    }
  }

  LOG.info("Verification was successful!");

  return true;
}
项目:HIndex    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
项目:IRIndex    文件:TestCacheOnWriteInSchema.java   
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}