Java 类org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType 实例源码

项目:LCIndex-HBase-0.94.16    文件:TestBlocksScanned.java   
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
项目:IRIndex    文件:TestBlocksScanned.java   
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
项目:HBase-Research    文件:TestBlocksScanned.java   
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
项目:hbase-0.94.8-qod    文件:TestBlocksScanned.java   
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
项目:hbase-0.94.8-qod    文件:TestBlocksScanned.java   
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
项目:hindex    文件:TestBlocksScanned.java   
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
项目:LCIndex-HBase-0.94.16    文件:TestScannerSelectionUsingTTL.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
          conf, htd);

  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  Map<String, Long> metricsBeforeCompaction =
    SchemaMetrics.getMetricsSnapshot();

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
  } else {
    region.compactStores();
  }

  SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
  Map<String, Long> compactionMetrics =
      SchemaMetrics.diffMetrics(metricsBeforeCompaction,
          SchemaMetrics.getMetricsSnapshot());
  long compactionDataBlocksRead = SchemaMetrics.getLong(
      compactionMetrics,
      SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
          BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
  assertEquals("Invalid number of blocks accessed during compaction. " +
      "We only expect non-expired files to be accessed.",
      numFreshFiles, compactionDataBlocksRead);
  region.close();
}
项目:LCIndex-HBase-0.94.16    文件:TestScannerSelectionUsingKeyRange.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
项目:LCIndex-HBase-0.94.16    文件:TestForceCacheImportantBlocks.java   
private String getMetricName(SchemaMetrics metrics, BlockCategory category) {
  String hitsMetricName =
      metrics.getBlockMetricName(category, SchemaMetrics.NO_COMPACTION,
          BlockMetricType.CACHE_HIT);
  return hitsMetricName;
}
项目:IRIndex    文件:TestScannerSelectionUsingTTL.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
          conf, htd);

  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  Map<String, Long> metricsBeforeCompaction =
    SchemaMetrics.getMetricsSnapshot();

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
  } else {
    region.compactStores();
  }

  SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
  Map<String, Long> compactionMetrics =
      SchemaMetrics.diffMetrics(metricsBeforeCompaction,
          SchemaMetrics.getMetricsSnapshot());
  long compactionDataBlocksRead = SchemaMetrics.getLong(
      compactionMetrics,
      SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
          BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
  assertEquals("Invalid number of blocks accessed during compaction. " +
      "We only expect non-expired files to be accessed.",
      numFreshFiles, compactionDataBlocksRead);
  region.close();
}
项目:IRIndex    文件:TestScannerSelectionUsingKeyRange.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
项目:IRIndex    文件:TestForceCacheImportantBlocks.java   
private String getMetricName(SchemaMetrics metrics, BlockCategory category) {
  String hitsMetricName =
      metrics.getBlockMetricName(category, SchemaMetrics.NO_COMPACTION,
          BlockMetricType.CACHE_HIT);
  return hitsMetricName;
}
项目:HBase-Research    文件:TestScannerSelectionUsingTTL.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
          conf, htd);

  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  Map<String, Long> metricsBeforeCompaction =
    SchemaMetrics.getMetricsSnapshot();

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
  } else {
    region.compactStores();
  }

  SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
  Map<String, Long> compactionMetrics =
      SchemaMetrics.diffMetrics(metricsBeforeCompaction,
          SchemaMetrics.getMetricsSnapshot());
  long compactionDataBlocksRead = SchemaMetrics.getLong(
      compactionMetrics,
      SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
          BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
  assertEquals("Invalid number of blocks accessed during compaction. " +
      "We only expect non-expired files to be accessed.",
      numFreshFiles, compactionDataBlocksRead);
  region.close();
}
项目:HBase-Research    文件:TestScannerSelectionUsingKeyRange.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
项目:HBase-Research    文件:TestForceCacheImportantBlocks.java   
private String getMetricName(SchemaMetrics metrics, BlockCategory category) {
  String hitsMetricName =
      metrics.getBlockMetricName(category, SchemaMetrics.NO_COMPACTION,
          BlockMetricType.CACHE_HIT);
  return hitsMetricName;
}
项目:hbase-0.94.8-qod    文件:TestScannerSelectionUsingTTL.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
          conf, htd);

  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  Map<String, Long> metricsBeforeCompaction =
    SchemaMetrics.getMetricsSnapshot();

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
  } else {
    region.compactStores();
  }

  SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
  Map<String, Long> compactionMetrics =
      SchemaMetrics.diffMetrics(metricsBeforeCompaction,
          SchemaMetrics.getMetricsSnapshot());
  long compactionDataBlocksRead = SchemaMetrics.getLong(
      compactionMetrics,
      SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
          BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
  assertEquals("Invalid number of blocks accessed during compaction. " +
      "We only expect non-expired files to be accessed.",
      numFreshFiles, compactionDataBlocksRead);
  region.close();
}
项目:hbase-0.94.8-qod    文件:TestScannerSelectionUsingKeyRange.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
项目:hbase-0.94.8-qod    文件:TestForceCacheImportantBlocks.java   
private String getMetricName(SchemaMetrics metrics, BlockCategory category) {
  String hitsMetricName =
      metrics.getBlockMetricName(category, SchemaMetrics.NO_COMPACTION,
          BlockMetricType.CACHE_HIT);
  return hitsMetricName;
}
项目:hbase-0.94.8-qod    文件:TestScannerSelectionUsingTTL.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
          conf, htd);

  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  Map<String, Long> metricsBeforeCompaction =
    SchemaMetrics.getMetricsSnapshot();

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
  } else {
    region.compactStores();
  }

  SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
  Map<String, Long> compactionMetrics =
      SchemaMetrics.diffMetrics(metricsBeforeCompaction,
          SchemaMetrics.getMetricsSnapshot());
  long compactionDataBlocksRead = SchemaMetrics.getLong(
      compactionMetrics,
      SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
          BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
  assertEquals("Invalid number of blocks accessed during compaction. " +
      "We only expect non-expired files to be accessed.",
      numFreshFiles, compactionDataBlocksRead);
  region.close();
}
项目:hbase-0.94.8-qod    文件:TestScannerSelectionUsingKeyRange.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
项目:hbase-0.94.8-qod    文件:TestForceCacheImportantBlocks.java   
private String getMetricName(SchemaMetrics metrics, BlockCategory category) {
  String hitsMetricName =
      metrics.getBlockMetricName(category, SchemaMetrics.NO_COMPACTION,
          BlockMetricType.CACHE_HIT);
  return hitsMetricName;
}
项目:hindex    文件:TestScannerSelectionUsingTTL.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
          conf, htd);

  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  Map<String, Long> metricsBeforeCompaction =
    SchemaMetrics.getMetricsSnapshot();

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
  } else {
    region.compactStores();
  }

  SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
  Map<String, Long> compactionMetrics =
      SchemaMetrics.diffMetrics(metricsBeforeCompaction,
          SchemaMetrics.getMetricsSnapshot());
  long compactionDataBlocksRead = SchemaMetrics.getLong(
      compactionMetrics,
      SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
          BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
  assertEquals("Invalid number of blocks accessed during compaction. " +
      "We only expect non-expired files to be accessed.",
      numFreshFiles, compactionDataBlocksRead);
  region.close();
}
项目:hindex    文件:TestScannerSelectionUsingKeyRange.java   
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
项目:hindex    文件:TestForceCacheImportantBlocks.java   
private String getMetricName(SchemaMetrics metrics, BlockCategory category) {
  String hitsMetricName =
      metrics.getBlockMetricName(category, SchemaMetrics.NO_COMPACTION,
          BlockMetricType.CACHE_HIT);
  return hitsMetricName;
}