Java 类org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon 实例源码

项目:ditb    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:ditb    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:pbase    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:pbase    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:HIndex    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:HIndex    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:PyroDB    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:PyroDB    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:c5    文件:TestScanner.java   
/**
 * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner
 * update readers code essentially.  This is not highly concurrent, since its all 1 thread.
 * HBase-910.
 * @throws Exception
 */
@Test
public void testScanAndSyncFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, false)); // do a sync flush.
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:c5    文件:TestScanner.java   
/**
 * Tests to do a concurrent flush (using a 2nd thread) while scanning.  This tests both
 * the StoreScanner update readers and the transition from memstore -> snapshot -> store file.
 *
 * @throws Exception
 */
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
  this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);
  try {
      LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
    int count = count(hri, -1, false);
    assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
  } catch (Exception e) {
    LOG.error("Failed", e);
    throw e;
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:ditb    文件:TestCompaction.java   
/**
 * Verify that you can stop a long-running compaction (used during RS shutdown)
 * @throws Exception
 */
@Test
public void testInterruptCompaction() throws Exception {
  assertEquals(0, count());

  // lower the polling interval for this test
  int origWI = HStore.closeCheckInterval;
  HStore.closeCheckInterval = 10 * 1000; // 10 KB

  try {
    // Create a couple store files w/ 15KB (over 10KB interval)
    int jmax = (int) Math.ceil(15.0 / compactionThreshold);
    byte[] pad = new byte[1000]; // 1 KB chunk
    for (int i = 0; i < compactionThreshold; i++) {
      HRegionIncommon loader = new HRegionIncommon(r);
      Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
      p.setDurability(Durability.SKIP_WAL);
      for (int j = 0; j < jmax; j++) {
        p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
      }
      HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
      loader.put(p);
      loader.flushcache();
    }

    HRegion spyR = spy(r);
    doAnswer(new Answer() {
      public Object answer(InvocationOnMock invocation) throws Throwable {
        r.writestate.writesEnabled = false;
        return invocation.callRealMethod();
      }
    }).when(spyR).doRegionCompactionPrep();

    // force a minor compaction, but not before requesting a stop
    spyR.compactStores();

    // ensure that the compaction stopped, all old files are intact,
    Store s = r.stores.get(COLUMN_FAMILY);
    assertEquals(compactionThreshold, s.getStorefilesCount());
    assertTrue(s.getStorefilesSize() > 15 * 1000);
    // and no new store files persisted past compactStores()
    FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
    assertEquals(0, ls.length);

  } finally {
    // don't mess up future tests
    r.writestate.writesEnabled = true;
    HStore.closeCheckInterval = origWI;

    // Delete all Store information once done using
    for (int i = 0; i < compactionThreshold; i++) {
      Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
      byte[][] famAndQf = { COLUMN_FAMILY, null };
      delete.deleteFamily(famAndQf[0]);
      r.delete(delete);
    }
    r.flush(true);

    // Multiple versions allowed for an entry, so the delete isn't enough
    // Lower TTL and expire to ensure that all our entries have been wiped
    final int ttl = 1000;
    for (Store hstore : this.r.stores.values()) {
      HStore store = (HStore) hstore;
      ScanInfo old = store.getScanInfo();
      ScanInfo si =
          new ScanInfo(old.getConfiguration(), old.getFamily(), old.getMinVersions(),
              old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 0, old.getComparator());
      store.setScanInfo(si);
    }
    Thread.sleep(ttl);

    r.compact(true);
    assertEquals(0, count());
  }
}
项目:ditb    文件:TestCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:ditb    文件:TestMinorCompaction.java   
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
  HRegionIncommon loader = new HRegionIncommon(r);
  for (int i = 0; i < compactionThreshold + 1; i++) {
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    r.flush(true);
  }

  Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // Now add deletes to memstore and then flush it.  That will put us over
  // the compaction threshold of 3 store files.  Compacting these store files
  // should result in a compacted store file that has no references to the
  // deleted row.
  r.delete(delete);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  r.flush(true);
  // should not change anything.
  // Let us check again

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // do a compaction
  Store store2 = r.getStore(fam2);
  int numFiles1 = store2.getStorefiles().size();
  assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
  ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
  int numFiles2 = store2.getStorefiles().size();
  // Check that we did compact
  assertTrue("Number of store files should go down", numFiles1 > numFiles2);
  // Check that it was a minor compaction.
  assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
}
项目:ditb    文件:TestScanner.java   
/**
 * Make sure scanner returns correct result when we run a major compaction
 * with deletes.
 *
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
  HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
  this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);

  try {
    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);

    Delete dc = new Delete(firstRowBytes);
    /* delete column1 of firstRow */
    dc.deleteColumns(fam1, col1);
    r.delete(dc);
    r.flush(true);

    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    r.flush(true);

    InternalScanner s = r.getScanner(new Scan());
    // run a major compact, column1 of firstRow will be cleaned.
    r.compact(true);

    List<Cell> results = new ArrayList<Cell>();
    s.next(results);

    // make sure returns column2 of firstRow
    assertTrue("result is not correct, keyValues : " + results,
        results.size() == 1);
    assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); 
    assertTrue(CellUtil.matchingFamily(results.get(0), fam2));

    results = new ArrayList<Cell>();
    s.next(results);

    // get secondRow
    assertTrue(results.size() == 2);
    assertTrue(CellUtil.matchingRow(results.get(0), secondRowBytes));
    assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
    assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:ditb    文件:TestMajorCompaction.java   
private void createStoreFile(final Region region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:ditb    文件:TestMajorCompaction.java   
private void createSmallerStoreFile(final Region region) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
      "bbb").getBytes(), null);
  loader.flushcache();
}
项目:pbase    文件:TestCompaction.java   
/**
 * Verify that you can stop a long-running compaction
 * (used during RS shutdown)
 * @throws Exception
 */
@Test
public void testInterruptCompaction() throws Exception {
  assertEquals(0, count());

  // lower the polling interval for this test
  int origWI = HStore.closeCheckInterval;
  HStore.closeCheckInterval = 10*1000; // 10 KB

  try {
    // Create a couple store files w/ 15KB (over 10KB interval)
    int jmax = (int) Math.ceil(15.0/compactionThreshold);
    byte [] pad = new byte[1000]; // 1 KB chunk
    for (int i = 0; i < compactionThreshold; i++) {
      HRegionIncommon loader = new HRegionIncommon(r);
      Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
      p.setDurability(Durability.SKIP_WAL);
      for (int j = 0; j < jmax; j++) {
        p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
      }
      HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
      loader.put(p);
      loader.flushcache();
    }

    HRegion spyR = spy(r);
    doAnswer(new Answer() {
      public Object answer(InvocationOnMock invocation) throws Throwable {
        r.writestate.writesEnabled = false;
        return invocation.callRealMethod();
      }
    }).when(spyR).doRegionCompactionPrep();

    // force a minor compaction, but not before requesting a stop
    spyR.compactStores();

    // ensure that the compaction stopped, all old files are intact,
    Store s = r.stores.get(COLUMN_FAMILY);
    assertEquals(compactionThreshold, s.getStorefilesCount());
    assertTrue(s.getStorefilesSize() > 15*1000);
    // and no new store files persisted past compactStores()
    FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
    assertEquals(0, ls.length);

  } finally {
    // don't mess up future tests
    r.writestate.writesEnabled = true;
    HStore.closeCheckInterval = origWI;

    // Delete all Store information once done using
    for (int i = 0; i < compactionThreshold; i++) {
      Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
      byte [][] famAndQf = {COLUMN_FAMILY, null};
      delete.deleteFamily(famAndQf[0]);
      r.delete(delete);
    }
    r.flushcache();

    // Multiple versions allowed for an entry, so the delete isn't enough
    // Lower TTL and expire to ensure that all our entries have been wiped
    final int ttl = 1000;
    for (Store hstore: this.r.stores.values()) {
      HStore store = (HStore)hstore;
      ScanInfo old = store.getScanInfo();
      ScanInfo si = new ScanInfo(old.getFamily(),
          old.getMinVersions(), old.getMaxVersions(), ttl,
          old.getKeepDeletedCells(), 0, old.getComparator());
      store.setScanInfo(si);
    }
    Thread.sleep(ttl);

    r.compactStores(true);
    assertEquals(0, count());
  }
}
项目:pbase    文件:TestCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:pbase    文件:TestMinorCompaction.java   
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
  HRegionIncommon loader = new HRegionIncommon(r);
  for (int i = 0; i < compactionThreshold + 1; i++) {
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    r.flushcache();
  }

  Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // Now add deletes to memstore and then flush it.  That will put us over
  // the compaction threshold of 3 store files.  Compacting these store files
  // should result in a compacted store file that has no references to the
  // deleted row.
  r.delete(delete);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  r.flushcache();
  // should not change anything.
  // Let us check again

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // do a compaction
  Store store2 = this.r.stores.get(fam2);
  int numFiles1 = store2.getStorefiles().size();
  assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
  ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
  int numFiles2 = store2.getStorefiles().size();
  // Check that we did compact
  assertTrue("Number of store files should go down", numFiles1 > numFiles2);
  // Check that it was a minor compaction.
  assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
}
项目:pbase    文件:TestScanner.java   
/**
 * Make sure scanner returns correct result when we run a major compaction
 * with deletes.
 *
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
  HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
  this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);

  try {
    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);

    Delete dc = new Delete(firstRowBytes);
    /* delete column1 of firstRow */
    dc.deleteColumns(fam1, col1);
    r.delete(dc);
    r.flushcache();

    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    r.flushcache();

    InternalScanner s = r.getScanner(new Scan());
    // run a major compact, column1 of firstRow will be cleaned.
    r.compactStores(true);

    List<Cell> results = new ArrayList<Cell>();
    s.next(results);

    // make sure returns column2 of firstRow
    assertTrue("result is not correct, keyValues : " + results,
        results.size() == 1);
    assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); 
    assertTrue(CellUtil.matchingFamily(results.get(0), fam2));

    results = new ArrayList<Cell>();
    s.next(results);

    // get secondRow
    assertTrue(results.size() == 2);
    assertTrue(CellUtil.matchingRow(results.get(0), secondRowBytes));
    assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
    assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:pbase    文件:TestMajorCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:pbase    文件:TestMajorCompaction.java   
private void createSmallerStoreFile(final HRegion region) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
      "bbb").getBytes(), null);
  loader.flushcache();
}
项目:HIndex    文件:TestCompaction.java   
/**
 * Verify that you can stop a long-running compaction
 * (used during RS shutdown)
 * @throws Exception
 */
@Test
public void testInterruptCompaction() throws Exception {
  assertEquals(0, count());

  // lower the polling interval for this test
  int origWI = HStore.closeCheckInterval;
  HStore.closeCheckInterval = 10*1000; // 10 KB

  try {
    // Create a couple store files w/ 15KB (over 10KB interval)
    int jmax = (int) Math.ceil(15.0/compactionThreshold);
    byte [] pad = new byte[1000]; // 1 KB chunk
    for (int i = 0; i < compactionThreshold; i++) {
      HRegionIncommon loader = new HRegionIncommon(r);
      Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
      p.setDurability(Durability.SKIP_WAL);
      for (int j = 0; j < jmax; j++) {
        p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
      }
      HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
      loader.put(p);
      loader.flushcache();
    }

    HRegion spyR = spy(r);
    doAnswer(new Answer() {
      public Object answer(InvocationOnMock invocation) throws Throwable {
        r.writestate.writesEnabled = false;
        return invocation.callRealMethod();
      }
    }).when(spyR).doRegionCompactionPrep();

    // force a minor compaction, but not before requesting a stop
    spyR.compactStores();

    // ensure that the compaction stopped, all old files are intact,
    Store s = r.stores.get(COLUMN_FAMILY);
    assertEquals(compactionThreshold, s.getStorefilesCount());
    assertTrue(s.getStorefilesSize() > 15*1000);
    // and no new store files persisted past compactStores()
    FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
    assertEquals(0, ls.length);

  } finally {
    // don't mess up future tests
    r.writestate.writesEnabled = true;
    HStore.closeCheckInterval = origWI;

    // Delete all Store information once done using
    for (int i = 0; i < compactionThreshold; i++) {
      Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
      byte [][] famAndQf = {COLUMN_FAMILY, null};
      delete.deleteFamily(famAndQf[0]);
      r.delete(delete);
    }
    r.flushcache();

    // Multiple versions allowed for an entry, so the delete isn't enough
    // Lower TTL and expire to ensure that all our entries have been wiped
    final int ttl = 1000;
    for (Store hstore: this.r.stores.values()) {
      HStore store = (HStore)hstore;
      ScanInfo old = store.getScanInfo();
      ScanInfo si = new ScanInfo(old.getFamily(),
          old.getMinVersions(), old.getMaxVersions(), ttl,
          old.getKeepDeletedCells(), 0, old.getComparator());
      store.setScanInfo(si);
    }
    Thread.sleep(ttl);

    r.compactStores(true);
    assertEquals(0, count());
  }
}
项目:HIndex    文件:TestCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:HIndex    文件:TestMinorCompaction.java   
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
  HRegionIncommon loader = new HRegionIncommon(r);
  for (int i = 0; i < compactionThreshold + 1; i++) {
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    r.flushcache();
  }

  Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // Now add deletes to memstore and then flush it.  That will put us over
  // the compaction threshold of 3 store files.  Compacting these store files
  // should result in a compacted store file that has no references to the
  // deleted row.
  r.delete(delete);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  r.flushcache();
  // should not change anything.
  // Let us check again

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // do a compaction
  Store store2 = this.r.stores.get(fam2);
  int numFiles1 = store2.getStorefiles().size();
  assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
  ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
  int numFiles2 = store2.getStorefiles().size();
  // Check that we did compact
  assertTrue("Number of store files should go down", numFiles1 > numFiles2);
  // Check that it was a minor compaction.
  assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
}
项目:HIndex    文件:TestScanner.java   
/**
 * Make sure scanner returns correct result when we run a major compaction
 * with deletes.
 *
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
  HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
  this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);

  try {
    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);

    Delete dc = new Delete(firstRowBytes);
    /* delete column1 of firstRow */
    dc.deleteColumns(fam1, col1);
    r.delete(dc);
    r.flushcache();

    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    r.flushcache();

    InternalScanner s = r.getScanner(new Scan());
    // run a major compact, column1 of firstRow will be cleaned.
    r.compactStores(true);

    List<Cell> results = new ArrayList<Cell>();
    s.next(results);

    // make sure returns column2 of firstRow
    assertTrue("result is not correct, keyValues : " + results,
        results.size() == 1);
    assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); 
    assertTrue(CellUtil.matchingFamily(results.get(0), fam2));

    results = new ArrayList<Cell>();
    s.next(results);

    // get secondRow
    assertTrue(results.size() == 2);
    assertTrue(CellUtil.matchingRow(results.get(0), secondRowBytes));
    assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
    assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:HIndex    文件:TestMajorCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:HIndex    文件:TestMajorCompaction.java   
private void createSmallerStoreFile(final HRegion region) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
        "bbb").getBytes(), null);
  loader.flushcache();
}
项目:PyroDB    文件:TestCompaction.java   
/**
 * Verify that you can stop a long-running compaction
 * (used during RS shutdown)
 * @throws Exception
 */
@Test
public void testInterruptCompaction() throws Exception {
  assertEquals(0, count());

  // lower the polling interval for this test
  int origWI = HStore.closeCheckInterval;
  HStore.closeCheckInterval = 10*1000; // 10 KB

  try {
    // Create a couple store files w/ 15KB (over 10KB interval)
    int jmax = (int) Math.ceil(15.0/compactionThreshold);
    byte [] pad = new byte[1000]; // 1 KB chunk
    for (int i = 0; i < compactionThreshold; i++) {
      HRegionIncommon loader = new HRegionIncommon(r);
      Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
      p.setDurability(Durability.SKIP_WAL);
      for (int j = 0; j < jmax; j++) {
        p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
      }
      HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
      loader.put(p);
      loader.flushcache();
    }

    HRegion spyR = spy(r);
    doAnswer(new Answer() {
      public Object answer(InvocationOnMock invocation) throws Throwable {
        r.writestate.writesEnabled = false;
        return invocation.callRealMethod();
      }
    }).when(spyR).doRegionCompactionPrep();

    // force a minor compaction, but not before requesting a stop
    spyR.compactStores();

    // ensure that the compaction stopped, all old files are intact,
    Store s = r.stores.get(COLUMN_FAMILY);
    assertEquals(compactionThreshold, s.getStorefilesCount());
    assertTrue(s.getStorefilesSize() > 15*1000);
    // and no new store files persisted past compactStores()
    FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
    assertEquals(0, ls.length);

  } finally {
    // don't mess up future tests
    r.writestate.writesEnabled = true;
    HStore.closeCheckInterval = origWI;

    // Delete all Store information once done using
    for (int i = 0; i < compactionThreshold; i++) {
      Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
      byte [][] famAndQf = {COLUMN_FAMILY, null};
      delete.deleteFamily(famAndQf[0]);
      r.delete(delete);
    }
    r.flushcache();

    // Multiple versions allowed for an entry, so the delete isn't enough
    // Lower TTL and expire to ensure that all our entries have been wiped
    final int ttl = 1000;
    for (Store hstore: this.r.stores.values()) {
      HStore store = (HStore)hstore;
      ScanInfo old = store.getScanInfo();
      ScanInfo si = new ScanInfo(old.getFamily(),
          old.getMinVersions(), old.getMaxVersions(), ttl,
          old.getKeepDeletedCells(), 0, old.getComparator());
      store.setScanInfo(si);
    }
    Thread.sleep(ttl);

    r.compactStores(true);
    assertEquals(0, count());
  }
}
项目:PyroDB    文件:TestCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:PyroDB    文件:TestMinorCompaction.java   
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
  HRegionIncommon loader = new HRegionIncommon(r);
  for (int i = 0; i < compactionThreshold + 1; i++) {
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes,
      thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes,
      thirdRowBytes, i);
    r.flushcache();
  }

  Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // Now add deletes to memstore and then flush it.  That will put us over
  // the compaction threshold of 3 store files.  Compacting these store files
  // should result in a compacted store file that has no references to the
  // deleted row.
  r.delete(delete);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  r.flushcache();
  // should not change anything.
  // Let us check again

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // do a compaction
  Store store2 = this.r.stores.get(fam2);
  int numFiles1 = store2.getStorefiles().size();
  assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
  ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
  int numFiles2 = store2.getStorefiles().size();
  // Check that we did compact
  assertTrue("Number of store files should go down", numFiles1 > numFiles2);
  // Check that it was a minor compaction.
  assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
}
项目:PyroDB    文件:TestScanner.java   
/**
 * Make sure scanner returns correct result when we run a major compaction
 * with deletes.
 *
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
  HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
  this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);

  try {
    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);

    Delete dc = new Delete(firstRowBytes);
    /* delete column1 of firstRow */
    dc.deleteColumns(fam1, col1);
    r.delete(dc);
    r.flushcache();

    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    r.flushcache();

    InternalScanner s = r.getScanner(new Scan());
    // run a major compact, column1 of firstRow will be cleaned.
    r.compactStores(true);

    List<Cell> results = new ArrayList<Cell>();
    s.next(results);

    // make sure returns column2 of firstRow
    assertTrue("result is not correct, keyValues : " + results,
        results.size() == 1);
    assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); 
    assertTrue(CellUtil.matchingFamily(results.get(0), fam2));

    results = new ArrayList<Cell>();
    s.next(results);

    // get secondRow
    assertTrue(results.size() == 2);
    assertTrue(CellUtil.matchingRow(results.get(0), secondRowBytes));
    assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
    assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
  } finally {
    HRegion.closeHRegion(this.r);
  }
}
项目:PyroDB    文件:TestMajorCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:PyroDB    文件:TestMajorCompaction.java   
private void createSmallerStoreFile(final HRegion region) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
        "bbb").getBytes(), null);
  loader.flushcache();
}
项目:c5    文件:TestCompaction.java   
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
  HRegionIncommon loader = new HRegionIncommon(r);
  for (int i = 0; i < compactionThreshold + 1; i++) {
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col1), firstRowBytes, thirdRowBytes, i);
    HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i);
    r.flushcache();
  }

  Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // Now add deletes to memstore and then flush it.  That will put us over
  // the compaction threshold of 3 store files.  Compacting these store files
  // should result in a compacted store file that has no references to the
  // deleted row.
  r.delete(delete);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  r.flushcache();
  // should not change anything.
  // Let us check again

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());

  // do a compaction
  Store store2 = this.r.stores.get(fam2);
  int numFiles1 = store2.getStorefiles().size();
  assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
  ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);   // = 3
  int numFiles2 = store2.getStorefiles().size();
  // Check that we did compact
  assertTrue("Number of store files should go down", numFiles1 > numFiles2);
  // Check that it was a minor compaction.
  assertTrue("Was not supposed to be a major compaction", numFiles2 > 1);

  // Make sure that we have only deleted family2 from secondRowBytes
  result = r.get(new Get(secondRowBytes).addColumn(fam2, col2).setMaxVersions(100));
  assertEquals(expectedResultsAfterDelete, result.size());
  // but we still have firstrow
  result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100));
  assertEquals(compactionThreshold, result.size());
}
项目:c5    文件:TestCompaction.java   
/**
 * Verify that you can stop a long-running compaction
 * (used during RS shutdown)
 * @throws Exception
 */
@Test
public void testInterruptCompaction() throws Exception {
  assertEquals(0, count());

  // lower the polling interval for this test
  int origWI = HStore.closeCheckInterval;
  HStore.closeCheckInterval = 10*1000; // 10 KB

  try {
    // Create a couple store files w/ 15KB (over 10KB interval)
    int jmax = (int) Math.ceil(15.0/compactionThreshold);
    byte [] pad = new byte[1000]; // 1 KB chunk
    for (int i = 0; i < compactionThreshold; i++) {
      HRegionIncommon loader = new HRegionIncommon(r);
      Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
      p.setDurability(Durability.SKIP_WAL);
      for (int j = 0; j < jmax; j++) {
        p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
      }
      HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
      loader.put(p);
      loader.flushcache();
    }

    HRegion spyR = spy(r);
    doAnswer(new Answer() {
      public Object answer(InvocationOnMock invocation) throws Throwable {
        r.writestate.writesEnabled = false;
        return invocation.callRealMethod();
      }
    }).when(spyR).doRegionCompactionPrep();

    // force a minor compaction, but not before requesting a stop
    spyR.compactStores();

    // ensure that the compaction stopped, all old files are intact,
    Store s = r.stores.get(COLUMN_FAMILY);
    assertEquals(compactionThreshold, s.getStorefilesCount());
    assertTrue(s.getStorefilesSize() > 15*1000);
    // and no new store files persisted past compactStores()
    FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
    assertEquals(0, ls.length);

  } finally {
    // don't mess up future tests
    r.writestate.writesEnabled = true;
    HStore.closeCheckInterval = origWI;

    // Delete all Store information once done using
    for (int i = 0; i < compactionThreshold; i++) {
      Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
      byte [][] famAndQf = {COLUMN_FAMILY, null};
      delete.deleteFamily(famAndQf[0]);
      r.delete(delete);
    }
    r.flushcache();

    // Multiple versions allowed for an entry, so the delete isn't enough
    // Lower TTL and expire to ensure that all our entries have been wiped
    final int ttl = 1000;
    for (Store hstore: this.r.stores.values()) {
      HStore store = (HStore)hstore;
      ScanInfo old = store.getScanInfo();
      ScanInfo si = new ScanInfo(old.getFamily(),
          old.getMinVersions(), old.getMaxVersions(), ttl,
          old.getKeepDeletedCells(), 0, old.getComparator());
      store.setScanInfo(si);
    }
    Thread.sleep(ttl);

    r.compactStores(true);
    assertEquals(0, count());
  }
}
项目:c5    文件:TestCompaction.java   
private void createStoreFile(final HRegion region, String family) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, family);
  loader.flushcache();
}
项目:c5    文件:TestCompaction.java   
private void createSmallerStoreFile(final HRegion region) throws IOException {
  HRegionIncommon loader = new HRegionIncommon(region);
  HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
        "bbb").getBytes(), null);
  loader.flushcache();
}
项目:c5    文件:TestScanner.java   
/**
 * Make sure scanner returns correct result when we run a major compaction
 * with deletes.
 *
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
  HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
  this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
  HRegionIncommon hri = new HRegionIncommon(r);

  try {
    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        firstRowBytes, secondRowBytes);

    Delete dc = new Delete(firstRowBytes);
    /* delete column1 of firstRow */
    dc.deleteColumns(fam1, col1);
    r.delete(dc);
    r.flushcache();

    HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
        secondRowBytes, thirdRowBytes);
    r.flushcache();

    InternalScanner s = r.getScanner(new Scan());
    // run a major compact, column1 of firstRow will be cleaned.
    r.compactStores(true);

    List<Cell> results = new ArrayList<Cell>();
    s.next(results);

    // make sure returns column2 of firstRow
    assertTrue("result is not correct, keyValues : " + results,
        results.size() == 1);
    assertTrue(CellUtil.matchingRow(results.get(0), firstRowBytes)); 
    assertTrue(CellUtil.matchingFamily(results.get(0), fam2));

    results = new ArrayList<Cell>();
    s.next(results);

    // get secondRow
    assertTrue(results.size() == 2);
    assertTrue(CellUtil.matchingRow(results.get(0), secondRowBytes));
    assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
    assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
  } finally {
    HRegion.closeHRegion(this.r);
  }
}