Java 类org.apache.hadoop.hbase.HColumnDescriptor 实例源码

项目:worm    文件:StoreToHbase.java   
public void createTable() throws Exception {

        HColumnDescriptor family1 = new HColumnDescriptor(firstFamily);
        HColumnDescriptor family2 = new HColumnDescriptor(secondFamily);
        family1.setMaxVersions(3);
        family2.setMaxVersions(3);

        HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(nameSpaceName + ":" + tableName));
        descriptor.addFamily(family1);
        descriptor.addFamily(family2);
        descriptor.setRegionReplication(3); // replication
        admin.createTable(descriptor);
        // admin.split(TableName.valueOf("StudentInfo:student1"),
        // Bytes.toBytes("10"));
        // admin.split(TableName.valueOf("StudentInfo:student1"),
        // Bytes.toBytes("20"));
        // admin.split(TableName.valueOf("StudentInfo:student1"),
        // Bytes.toBytes("30"));
        // admin.split(TableName.valueOf("StudentInfo:student1"),
        // Bytes.toBytes("40"));
        // admin.split(TableName.valueOf("StudentInfo:student1"),
        // Bytes.toBytes("50"));
        // admin.split(TableName.valueOf("StudentInfo:student1"),
        // Bytes.toBytes("60"));
    }
项目:SparkDemo    文件:HBaseTest.java   
/**
 * create table
 */
public static void creatTable(String tableName, String[] familys)
        throws Exception {
    Admin admin = connection.getAdmin();
    List<HRegionInfo> list = admin.getTableRegions(TableName.valueOf(tableName));
    if (admin.tableExists(TableName.valueOf(tableName))) {
        System.out.println("table already exists!");
    } else {
        HTableDescriptor tableDesc = new HTableDescriptor(tableName);
        for (int i = 0; i < familys.length; i++) {
            tableDesc.addFamily(new HColumnDescriptor(familys[i]));
        }
        admin.createTable(tableDesc);
        System.out.println("create table " + tableName + " ok.");
    }
}
项目:ditb    文件:TestInvocationRecordFilter.java   
@Before
public void setUp() throws Exception {
  HTableDescriptor htd = new HTableDescriptor(
      TableName.valueOf(TABLE_NAME_BYTES));
  htd.addFamily(new HColumnDescriptor(FAMILY_NAME_BYTES));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), htd);

  Put put = new Put(ROW_BYTES);
  for (int i = 0; i < 10; i += 2) {
    // puts 0, 2, 4, 6 and 8
    put.add(FAMILY_NAME_BYTES, Bytes.toBytes(QUALIFIER_PREFIX + i), i,
        Bytes.toBytes(VALUE_PREFIX + i));
  }
  this.region.put(put);
  this.region.flush(true);
}
项目:mumu-hbase    文件:HBaseTableOperation.java   
/**
 * 往表中添加列族
 *
 * @param tableName  表名
 * @param familyName 列族名
 */
public void addColumn(String tableName, String familyName) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Admin admin = hBaseConfiguration.admin();
    TableName tb = TableName.valueOf(tableName);
    try {
        if (admin.tableExists(tb)) {
            HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);

            columnDescriptor.setMaxVersions(1);//设置列族保留的最多版本
            columnDescriptor.setCompressionType(Compression.Algorithm.GZ);//设置压缩算法
            columnDescriptor.setCompactionCompressionType(Compression.Algorithm.GZ);//合并压缩算法

            admin.addColumn(tb, columnDescriptor);
        } else {
            log.info("表名【" + tableName + "】不存在");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        hBaseConfiguration.close();
    }
}
项目:ditb    文件:TestWALRecordReader.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Make block sizes small.
  conf = TEST_UTIL.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.replication", 1);
  TEST_UTIL.startMiniDFSCluster(1);

  conf = TEST_UTIL.getConfiguration();
  fs = TEST_UTIL.getDFSCluster().getFileSystem();

  hbaseDir = TEST_UTIL.createRootDir();

  logDir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);

  htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(family));
}
项目:ditb    文件:CacheConfig.java   
/**
 * Create a cache configuration using the specified configuration object and
 * family descriptor.
 * @param conf hbase configuration
 * @param family column family configuration
 */
public CacheConfig(Configuration conf, HColumnDescriptor family) {
  this(CacheConfig.instantiateBlockCache(conf),
      family.isBlockCacheEnabled(),
      family.isInMemory(),
      // For the following flags we enable them regardless of per-schema settings
      // if they are enabled in the global configuration.
      conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_DATA_ON_WRITE) || family.isCacheDataOnWrite(),
      conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_INDEXES_ON_WRITE) || family.isCacheIndexesOnWrite(),
      conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
          DEFAULT_CACHE_BLOOMS_ON_WRITE) || family.isCacheBloomsOnWrite(),
      conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY,
          DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(),
      conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED),
      conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY,
          DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(),
      conf.getBoolean(HColumnDescriptor.CACHE_DATA_IN_L1,
          HColumnDescriptor.DEFAULT_CACHE_DATA_IN_L1) || family.isCacheDataInL1(),
      conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY,DROP_BEHIND_CACHE_COMPACTION_DEFAULT)
   );
}
项目:ditb    文件:TestAssignmentManagerOnCluster.java   
HRegionInfo createTableAndGetOneRegion(
    final TableName tableName) throws IOException, InterruptedException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY));
  admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);

  // wait till the table is assigned
  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  long timeoutTime = System.currentTimeMillis() + 1000;
  while (true) {
    List<HRegionInfo> regions = master.getAssignmentManager().
      getRegionStates().getRegionsOfTable(tableName);
    if (regions.size() > 3) {
      return regions.get(2);
    }
    long now = System.currentTimeMillis();
    if (now > timeoutTime) {
      fail("Could not find an online region");
    }
    Thread.sleep(10);
  }
}
项目:ditb    文件:TestJoinedScanners.java   
private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
    String callingMethod, Configuration conf, byte[]... families)
    throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte [] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
    htd.addFamily(hcd);
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
  Path path = new Path(DIR + callingMethod);
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(path)) {
    if (!fs.delete(path, true)) {
      throw new IOException("Failed delete of " + path);
    }
  }
  return HRegion.createHRegion(info, path, conf, htd);
}
项目:ditb    文件:TestTableDescriptorModification.java   
@Test
public void testModifyTable() throws IOException {
  Admin admin = TEST_UTIL.getHBaseAdmin();
  // Create a table with one family
  HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME);
  baseHtd.addFamily(new HColumnDescriptor(FAMILY_0));
  admin.createTable(baseHtd);
  admin.disableTable(TABLE_NAME);
  try {
    // Verify the table descriptor
    verifyTableDescriptor(TABLE_NAME, FAMILY_0);

    // Modify the table adding another family and verify the descriptor
    HTableDescriptor modifiedHtd = new HTableDescriptor(TABLE_NAME);
    modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_0));
    modifiedHtd.addFamily(new HColumnDescriptor(FAMILY_1));
    admin.modifyTable(TABLE_NAME, modifiedHtd);
    verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1);
  } finally {
    admin.deleteTable(TABLE_NAME);
  }
}
项目:ditb    文件:TestAdmin1.java   
@Test (timeout=300000)
public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException{
  byte[] tableName = Bytes.toBytes("testCreateTableWithEmptyRowInTheSplitKeys");
  byte[][] splitKeys = new byte[3][];
  splitKeys[0] = "region1".getBytes();
  splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
  splitKeys[2] = "region2".getBytes();
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  desc.addFamily(new HColumnDescriptor("col"));
  try {
    admin.createTable(desc, splitKeys);
    fail("Test case should fail as empty split key is passed.");
  } catch (IllegalArgumentException e) {
    LOG.info("Expected ", e);
  }
}
项目:ditb    文件:HBaseAdmin.java   
/**
 * Add a column to an existing table.
 * Asynchronous operation.
 *
 * @param tableName name of the table to add column to
 * @param column column descriptor of column to be added
 * @throws IOException if a remote or network exception occurs
 */
@Override
public void addColumn(final TableName tableName, final HColumnDescriptor column)
throws IOException {
  executeCallable(new MasterCallable<Void>(getConnection()) {
    @Override
    public Void call(int callTimeout) throws ServiceException {
      PayloadCarryingRpcController controller = rpcControllerFactory.newController();
      controller.setCallTimeout(callTimeout);
      controller.setPriority(tableName);
      AddColumnRequest req = RequestConverter.buildAddColumnRequest(
        tableName, column, ng.getNonceGroup(), ng.newNonce());
      master.addColumn(controller,req);
      return null;
    }
  });
}
项目:ditb    文件:TestConstraint.java   
/**
 * Test that we run a passing constraint
 * @throws Exception
 */
@SuppressWarnings("unchecked")
@Test
public void testConstraintPasses() throws Exception {
  // create the table
  // it would be nice if this was also a method on the util
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : new byte[][] { dummy, test }) {
    desc.addFamily(new HColumnDescriptor(family));
  }
  // add a constraint
  Constraints.add(desc, CheckWasRunConstraint.class);

  util.getHBaseAdmin().createTable(desc);
  Table table = new HTable(util.getConfiguration(), tableName);
  try {
    // test that we don't fail on a valid put
    Put put = new Put(row1);
    byte[] value = Integer.toString(10).getBytes();
    put.add(dummy, new byte[0], value);
    table.put(put);
  } finally {
    table.close();
  }
  assertTrue(CheckWasRunConstraint.wasRun);
}
项目:ditb    文件:TestLoadIncrementalHFiles.java   
private void testSplitStoreFileWithDifferentEncoding(DataBlockEncoding bulkloadEncoding,
    DataBlockEncoding cfEncoding) throws IOException {
  Path dir = util.getDataTestDirOnTestFS("testSplitHFileWithDifferentEncoding");
  FileSystem fs = util.getTestFileSystem();
  Path testIn = new Path(dir, "testhfile");
  HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
  familyDesc.setDataBlockEncoding(cfEncoding);
  HFileTestUtil.createHFileWithDataBlockEncoding(
      util.getConfiguration(), fs, testIn, bulkloadEncoding,
      FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000);

  Path bottomOut = new Path(dir, "bottom.out");
  Path topOut = new Path(dir, "top.out");

  LoadIncrementalHFiles.splitStoreFile(
      util.getConfiguration(), testIn,
      familyDesc, Bytes.toBytes("ggg"),
      bottomOut,
      topOut);

  int rowCount = verifyHFile(bottomOut);
  rowCount += verifyHFile(topOut);
  assertEquals(1000, rowCount);
}
项目:ditb    文件:TestBlocksRead.java   
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param family
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod,
    HBaseConfiguration conf, String family) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor familyDesc;
  for (int i = 0; i < BLOOM_TYPE.length; i++) {
    BloomType bloomType = BLOOM_TYPE[i];
    familyDesc = new HColumnDescriptor(family + "_" + bloomType)
        .setBlocksize(1)
        .setBloomFilterType(BLOOM_TYPE[i]);
    htd.addFamily(familyDesc);
  }

  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  blockCache = new CacheConfig(conf).getBlockCache();
  return r;
}
项目:ditb    文件:TestFilterWrapper.java   
private static void createTable() {
  assertNotNull("HBaseAdmin is not initialized successfully.", admin);
  if (admin != null) {

    HTableDescriptor desc = new HTableDescriptor(name);
    HColumnDescriptor coldef = new HColumnDescriptor(Bytes.toBytes("f1"));
    desc.addFamily(coldef);

    try {
      admin.createTable(desc);
      assertTrue("Fail to create the table", admin.tableExists(name));
    } catch (IOException e) {
      assertNull("Exception found while creating table", e);
    }

  }
}
项目:ditb    文件:TestRegionPlacement.java   
/**
 * Create a table with specified table name and region number.
 * @param tablename
 * @param regionNum
 * @return
 * @throws IOException
 */
private static void createTable(TableName tableName, int regionNum)
    throws IOException {
  int expectedRegions = regionNum;
  byte[][] splitKeys = new byte[expectedRegions - 1][];
  for (int i = 1; i < expectedRegions; i++) {
    byte splitKey = (byte) i;
    splitKeys[i - 1] = new byte[] { splitKey, splitKey, splitKey };
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
  admin.createTable(desc, splitKeys);

  HTable ht = (HTable) CONNECTION.getTable(tableName);
  @SuppressWarnings("deprecation")
  Map<HRegionInfo, ServerName> regions = ht.getRegionLocations();
  assertEquals("Tried to create " + expectedRegions + " regions "
      + "but only found " + regions.size(), expectedRegions, regions.size());
  ht.close();
}
项目:ditb    文件:TestCoprocessorScanPolicy.java   
@Override
public InternalScanner preFlushScannerOpen(
    final ObserverContext<RegionCoprocessorEnvironment> c,
    Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
  Long newTtl = ttls.get(store.getTableName());
  if (newTtl != null) {
    System.out.println("PreFlush:" + newTtl);
  }
  Integer newVersions = versions.get(store.getTableName());
  ScanInfo oldSI = store.getScanInfo();
  HColumnDescriptor family = store.getFamily();
  ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(),
      family.getName(), family.getMinVersions(),
      newVersions == null ? family.getMaxVersions() : newVersions,
      newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
      oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
  Scan scan = new Scan();
  scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
  return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner),
      ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(),
      HConstants.OLDEST_TIMESTAMP);
}
项目:ditb    文件:HFileOutputFormat2.java   
/**
 * Serialize column family to block size map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf)
    throws UnsupportedEncodingException {
  StringBuilder blockSizeConfigValue = new StringBuilder();
  if (tableDescriptor == null) {
    // could happen with mock table instance
    return;
  }
  Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
  int i = 0;
  for (HColumnDescriptor familyDescriptor : families) {
    if (i++ > 0) {
      blockSizeConfigValue.append('&');
    }
    blockSizeConfigValue.append(URLEncoder.encode(
        familyDescriptor.getNameAsString(), "UTF-8"));
    blockSizeConfigValue.append('=');
    blockSizeConfigValue.append(URLEncoder.encode(
        String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
  }
  // Get rid of the last ampersand
  conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}
项目:ditb    文件:TestCoprocessorInterface.java   
Region initHRegion (TableName tableName, String callingMethod,
    Configuration conf, Class<?> [] implClasses, byte [][] families)
    throws IOException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  for(byte [] family : families) {
    htd.addFamily(new HColumnDescriptor(family));
  }
  HRegionInfo info = new HRegionInfo(tableName, null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);

  // this following piece is a hack.
  RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
  r.setCoprocessorHost(host);

  for (Class<?> implClass : implClasses) {
    host.load(implClass, Coprocessor.PRIORITY_USER, conf);
    Coprocessor c = host.findCoprocessor(implClass.getName());
    assertNotNull(c);
  }

  // Here we have to call pre and postOpen explicitly.
  host.preOpen();
  host.postOpen();
  return r;
}
项目:ditb    文件:TestAddColumnFamilyProcedure.java   
@Test(timeout = 60000)
public void testRollbackAndDoubleExecution() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
  final String cf6 = "cf6";
  final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf6);
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Start the AddColumnFamily procedure && kill the executor
  long procId = procExec.submitProcedure(
    new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, columnDescriptor),
    nonceGroup,
    nonce);

  int numberOfSteps = AddColumnFamilyState.values().length - 2; // failing in the middle of proc
  MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps,
    AddColumnFamilyState.values());

  MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(),
    tableName, cf6);
}
项目:ditb    文件:MasterFileSystem.java   
/**
 * Modify Column of a table
 * @param tableName
 * @param hcd HColumnDesciptor
 * @return Modified HTableDescriptor with the column modified.
 * @throws IOException
 */
public HTableDescriptor modifyColumn(TableName tableName, HColumnDescriptor hcd)
    throws IOException {
  LOG.info("AddModifyColumn. Table = " + tableName
      + " HCD = " + hcd.toString());

  HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
  byte [] familyName = hcd.getName();
  if(!htd.hasFamily(familyName)) {
    throw new InvalidFamilyOperationException("Family '" +
      Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
  }
  htd.modifyFamily(hcd);
  this.services.getTableDescriptors().add(htd);
  return htd;
}
项目:ditb    文件:TestCoprocessorEndpoint.java   
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // set configure to indicate which cp should be loaded
  Configuration conf = util.getConfiguration();
  conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000);
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(),
      ProtobufCoprocessorService.class.getName());
  conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      ProtobufCoprocessorService.class.getName());
  util.startMiniCluster(2);

  Admin admin = util.getHBaseAdmin();
  HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]});
  util.waitUntilAllRegionsAssigned(TEST_TABLE);

  Table table = new HTable(conf, TEST_TABLE);
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
  table.close();
}
项目:ditb    文件:CCIndexAdmin.java   
/**
 * Creates a new table with indexes defined by IndexDescriptor.
 *
 * @param indexDesc table descriptor for table
 * @throws IOException
 * @throws IndexExistedException
 */
public void createTable(IndexTableDescriptor indexDesc)
    throws IOException, IndexExistedException {
  HTableDescriptor descriptor = new HTableDescriptor(indexDesc.getTableDescriptor());
  descriptor.remove(IndexConstants.INDEX_KEY);
  admin.createTable(descriptor, indexDesc.getSplitKeys());
  admin.disableTable(descriptor.getTableName());

  if (indexDesc.hasIndex()) {
    // corresponding cct
    if (indexDesc.getIndexSpecifications()[0].getIndexType() == IndexType.CCIndex) {
      System.out.println("winter new cct of main table: " + Bytes.toString(Bytes
          .add(indexDesc.getTableDescriptor().getTableName().getName(), IndexConstants.CCT_FIX)));
      HTableDescriptor cctDesc = new HTableDescriptor(TableName.valueOf(Bytes
          .add(indexDesc.getTableDescriptor().getTableName().getName(), IndexConstants.CCT_FIX)));
      for (HColumnDescriptor f : descriptor.getFamilies()) {
        cctDesc.addFamily(f);
      }
      admin.createTable(cctDesc, indexDesc.getSplitKeys());
    }
    this.addIndexes(indexDesc.getTableDescriptor().getTableName(),
        indexDesc.getIndexSpecifications());
  }
  enableTable(descriptor.getTableName());
}
项目:ditb    文件:TestStore.java   
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
项目:ditb    文件:TestRegionObserverStacking.java   
HRegion initHRegion (byte [] tableName, String callingMethod,
    Configuration conf, byte [] ... families) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte [] family : families) {
    htd.addFamily(new HColumnDescriptor(family));
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  Path path = new Path(DIR + callingMethod);
  HRegion r = HRegion.createHRegion(info, path, conf, htd);
  // this following piece is a hack. currently a coprocessorHost
  // is secretly loaded at OpenRegionHandler. we don't really
  // start a region server here, so just manually create cphost
  // and set it to region.
  RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
  r.setCoprocessorHost(host);
  return r;
}
项目:ditb    文件:TestAssignmentManagerOnCluster.java   
/**
 * Test force unassign/assign a region of a disabled table
 */
@Test (timeout=60000)
public void testAssignDisabledRegion() throws Exception {
  TableName table = TableName.valueOf("testAssignDisabledRegion");
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  MyMaster master = null;
  try {
    HTableDescriptor desc = new HTableDescriptor(table);
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin.createTable(desc);

    Table meta = new HTable(conf, TableName.META_TABLE_NAME);
    HRegionInfo hri = new HRegionInfo(
      desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
    MetaTableAccessor.addRegionToMeta(meta, hri);

    // Assign the region
    master = (MyMaster)cluster.getMaster();
    master.assignRegion(hri);
    AssignmentManager am = master.getAssignmentManager();
    RegionStates regionStates = am.getRegionStates();
    assertTrue(am.waitForAssignment(hri));

    // Disable the table
    admin.disableTable(table);
    assertTrue(regionStates.isRegionOffline(hri));

    // You can't assign a disabled region
    am.assign(hri, true, true);
    assertTrue(regionStates.isRegionOffline(hri));

    // You can't unassign a disabled region either
    am.unassign(hri, true);
    assertTrue(regionStates.isRegionOffline(hri));
  } finally {
    TEST_UTIL.deleteTable(table);
  }
}
项目:stroom-stats    文件:HBaseUniqueIdReverseMapTable.java   
@Override
public HTableDescriptor getDesc() {
    final HTableDescriptor desc = new HTableDescriptor(getName());
    final HColumnDescriptor colDesc = new HColumnDescriptor(NAME_FAMILY);
    colDesc.setMaxVersions(1);
    desc.addFamily(colDesc);
    return desc;
}
项目:ditb    文件:TestPerColumnFamilyFlush.java   
private void initHRegion(String callingMethod, Configuration conf) throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TABLENAME);
  for (byte[] family : FAMILIES) {
    htd.addFamily(new HColumnDescriptor(family));
  }
  HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false);
  Path path = new Path(DIR, callingMethod);
  region = HRegion.createHRegion(info, path, conf, htd);
}
项目:ditb    文件:TestConstraint.java   
/**
 * Check to make sure a constraint is unloaded when it fails
 * @throws Exception
 */
@Test
public void testIsUnloaded() throws Exception {
  // create the table
  HTableDescriptor desc = new HTableDescriptor(tableName);
  // add a family to the table
  for (byte[] family : new byte[][] { dummy, test }) {
    desc.addFamily(new HColumnDescriptor(family));
  }
  // make sure that constraints are unloaded
  Constraints.add(desc, RuntimeFailConstraint.class);
  // add a constraint to check to see if is run
  Constraints.add(desc, CheckWasRunConstraint.class);
  CheckWasRunConstraint.wasRun = false;

  util.getHBaseAdmin().createTable(desc);
  Table table = new HTable(util.getConfiguration(), tableName);

  // test that we do fail on violation
  Put put = new Put(row1);
  put.add(dummy, new byte[0], "pass".getBytes());

  try{
  table.put(put);
  fail("RuntimeFailConstraint wasn't triggered - this put shouldn't work!");
  } catch (Exception e) {// NOOP
  }

  // try the put again, this time constraints are not used, so it works
  table.put(put);
  // and we make sure that constraints were not run...
  assertFalse(CheckWasRunConstraint.wasRun);
  table.close();
}
项目:QDrill    文件:TestTableGenerator.java   
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
项目:QDrill    文件:TestTableGenerator.java   
public static void generateHBaseDatasetFloatOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
项目:ditb    文件:TestHFileOutputFormat.java   
private void setupMockColumnFamiliesForBlockSize(Table table,
    Map<String, Integer> familyToDataBlockEncoding) throws IOException {
  HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
  for (Entry<String, Integer> entry : familyToDataBlockEncoding.entrySet()) {
    mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
        .setMaxVersions(1)
        .setBlocksize(entry.getValue())
        .setBlockCacheEnabled(false)
        .setTimeToLive(0));
  }
  Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
项目:ditb    文件:TestWALReplay.java   
private HTableDescriptor createBasic3FamilyHTD(final TableName tableName) {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
  htd.addFamily(a);
  HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
  htd.addFamily(b);
  HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
  htd.addFamily(c);
  return htd;
}
项目:ditb    文件:TestModifyTableProcedure.java   
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionOffline() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
  final String familyName = "cf2";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, "cf1");
  UTIL.getHBaseAdmin().disableTable(tableName);

  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
  htd.setCompactionEnabled(newCompactionEnableOption);
  htd.addFamily(new HColumnDescriptor(familyName));
  htd.setRegionReplication(3);

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Restart the executor and rollback the step twice
  int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
  MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // cf2 should not be present
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, "cf1");
}
项目:ignite-hbase    文件:AdminContext.java   
private void ensureColumnFamilyExists(HColumnDescriptor family, TableName table)
    throws IOException {
  log.debug("Ensuring column family '{}' in HBase table '{}' exists",
      family.getNameAsString(), table.getNameAsString());
  if (!hasFamily(family, table)) {
    createColumnFamily(family, table);
  }
}
项目:ditb    文件:TestModifyTableProcedure.java   
@Test(timeout = 60000)
public void testModifyTableAddCF() throws Exception {
  final TableName tableName = TableName.valueOf("testModifyTableAddCF");
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1");
  HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertEquals(1, currentHtd.getFamiliesKeys().size());

  // Test 1: Modify the table descriptor online
  String cf2 = "cf2";
  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  htd.addFamily(new HColumnDescriptor(cf2));

  long procId = ProcedureTestingUtility.submitAndWait(
      procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));

  currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertEquals(2, currentHtd.getFamiliesKeys().size());
  assertTrue(currentHtd.hasFamily(cf2.getBytes()));

  // Test 2: Modify the table descriptor offline
  UTIL.getHBaseAdmin().disableTable(tableName);
  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  String cf3 = "cf3";
  HTableDescriptor htd2 =
      new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  htd2.addFamily(new HColumnDescriptor(cf3));

  long procId2 =
      ProcedureTestingUtility.submitAndWait(procExec,
        new ModifyTableProcedure(procExec.getEnvironment(), htd2));
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));

  currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertTrue(currentHtd.hasFamily(cf3.getBytes()));
  assertEquals(3, currentHtd.getFamiliesKeys().size());
}
项目:ditb    文件:TestAdmin1.java   
@Test (timeout=300000)
public void testHColumnValidName() {
     boolean exceptionThrown;
     try {
       new HColumnDescriptor("\\test\\abc");
     } catch(IllegalArgumentException iae) {
         exceptionThrown = true;
         assertTrue(exceptionThrown);
     }
 }
项目:ditb    文件:TestAdmin1.java   
/**
 * Multi-family scenario. Tests forcing split from client and
 * having scanners successfully ride over split.
 * @throws Exception
 * @throws IOException
 */
@Test (timeout=800000)
public void testForceSplitMultiFamily() throws Exception {
  int numVersions = HColumnDescriptor.DEFAULT_VERSIONS;

  // use small HFile block size so that we can have lots of blocks in HFile
  // Otherwise, if there is only one block,
  // HFileBlockIndex.midKey()'s value == startKey
  int blockSize = 256;
  byte[][] familyNames = new byte[][] { Bytes.toBytes("cf1"),
    Bytes.toBytes("cf2") };

  // one of the column families isn't splittable
  int[] rowCounts = new int[] { 6000, 1 };
  splitTest(null, familyNames, rowCounts, numVersions, blockSize);

  rowCounts = new int[] { 1, 6000 };
  splitTest(null, familyNames, rowCounts, numVersions, blockSize);

  // one column family has much smaller data than the other
  // the split key should be based on the largest column family
  rowCounts = new int[] { 6000, 300 };
  splitTest(null, familyNames, rowCounts, numVersions, blockSize);

  rowCounts = new int[] { 300, 6000 };
  splitTest(null, familyNames, rowCounts, numVersions, blockSize);

}
项目:ditb    文件:HStore.java   
/**
 * @param family
 * @return TTL in seconds of the specified family
 */
private static long determineTTLFromFamily(final HColumnDescriptor family) {
  // HCD.getTimeToLive returns ttl in seconds. Convert to milliseconds.
  long ttl = family.getTimeToLive();
  if (ttl == HConstants.FOREVER) {
    // Default is unlimited ttl.
    ttl = Long.MAX_VALUE;
  } else if (ttl == -1) {
    ttl = Long.MAX_VALUE;
  } else {
    // Second -> ms adjust for user data
    ttl *= 1000;
  }
  return ttl;
}
项目:ditb    文件:TestAtomicOperation.java   
private void initHRegion (byte [] tableName, String callingMethod, int [] maxVersions,
  byte[] ... families)
throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  int i=0;
  for(byte [] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setMaxVersions(maxVersions != null ? maxVersions[i++] : 1);
    htd.addFamily(hcd);
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  region = TEST_UTIL.createLocalHRegion(info, htd);
}