Java 类org.apache.hadoop.hbase.master.cleaner.LogCleaner 实例源码

项目:ditb    文件:HMaster.java   
private void startServiceThreads() throws IOException{
 // Start the executor service pools
 this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
    conf.getInt("hbase.master.executor.openregion.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
    conf.getInt("hbase.master.executor.closeregion.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
    conf.getInt("hbase.master.executor.logreplayops.threads", 10));

 // We depend on there being only one instance of this executor running
 // at a time.  To do concurrency, would need fencing of enable/disable of
 // tables.
 // Any time changing this maxThreads to > 1, pls see the comment at
 // AccessController#postCreateTableHandler
 this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
 startProcedureExecutor();

 // Start log cleaner thread
 int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
 this.logCleaner =
    new LogCleaner(cleanerInterval,
       this, conf, getMasterFileSystem().getFileSystem(),
       getMasterFileSystem().getOldLogDir());
  getChoreService().scheduleChore(logCleaner);

 //start the hfile archive cleaner thread
  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
      .getFileSystem(), archiveDir);
  getChoreService().scheduleChore(hfileCleaner);
  serviceStarted = true;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Started service threads");
  }
}
项目:PyroDB    文件:HMaster.java   
private void startServiceThreads() throws IOException{
 // Start the executor service pools
 this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
    conf.getInt("hbase.master.executor.openregion.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
    conf.getInt("hbase.master.executor.closeregion.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
    conf.getInt("hbase.master.executor.logreplayops.threads", 10));

 // We depend on there being only one instance of this executor running
 // at a time.  To do concurrency, would need fencing of enable/disable of
 // tables.
 this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

 // Start log cleaner thread
 int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
 this.logCleaner =
    new LogCleaner(cleanerInterval,
       this, conf, getMasterFileSystem().getFileSystem(),
       getMasterFileSystem().getOldLogDir());
       Threads.setDaemonThreadRunning(logCleaner.getThread(), getName() + ".oldLogCleaner");

 //start the hfile archive cleaner thread
  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
      .getFileSystem(), archiveDir);
  Threads.setDaemonThreadRunning(hfileCleaner.getThread(),
    getName() + ".archivedHFileCleaner");

  serviceStarted = true;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Started service threads");
  }
}
项目:LCIndex-HBase-0.94.16    文件:HMaster.java   
private void startServiceThreads() throws IOException{

   // Start the executor service pools
   this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
      conf.getInt("hbase.master.executor.openregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
      conf.getInt("hbase.master.executor.closeregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 3));
   this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 5));

   // We depend on there being only one instance of this executor running
   // at a time.  To do concurrency, would need fencing of enable/disable of
   // tables.
   this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

   // Start log cleaner thread
   String n = Thread.currentThread().getName();
   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
   this.logCleaner =
      new LogCleaner(cleanerInterval,
         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Start the health checker
   if (this.healthCheckChore != null) {
     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
   }

    // Start allowing requests to happen.
    this.rpcServer.openServer();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Started service threads");
    }

  }
项目:LCIndex-HBase-0.94.16    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:pbase    文件:HMaster.java   
private void startServiceThreads() throws IOException {
    // Start the executor service pools
    this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
            conf.getInt("hbase.master.executor.openregion.threads", 5));
    this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
            conf.getInt("hbase.master.executor.closeregion.threads", 5));
    this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
            conf.getInt("hbase.master.executor.serverops.threads", 5));
    this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
            conf.getInt("hbase.master.executor.serverops.threads", 5));
    this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
            conf.getInt("hbase.master.executor.logreplayops.threads", 10));

    // We depend on there being only one instance of this executor running
    // at a time.  To do concurrency, would need fencing of enable/disable of
    // tables.
    // Any time changing this maxThreads to > 1, pls see the comment at
    // AccessController#postCreateTableHandler
    this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

    // Start log cleaner thread
    int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
    this.logCleaner =
            new LogCleaner(cleanerInterval,
                    this, conf, getMasterFileSystem().getFileSystem(),
                    getMasterFileSystem().getOldLogDir());
    Threads.setDaemonThreadRunning(logCleaner.getThread(),
            getServerName().toShortString() + ".oldLogCleaner");

    //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
            .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(),
            getServerName().toShortString() + ".archivedHFileCleaner");

    serviceStarted = true;
    if (LOG.isTraceEnabled()) {
        LOG.trace("Started service threads");
    }
}
项目:HIndex    文件:HMaster.java   
void startServiceThreads() throws IOException{
 // Start the executor service pools
 this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
    conf.getInt("hbase.master.executor.openregion.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
    conf.getInt("hbase.master.executor.closeregion.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.executorService.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
    conf.getInt("hbase.master.executor.logreplayops.threads", 10));

 // We depend on there being only one instance of this executor running
 // at a time.  To do concurrency, would need fencing of enable/disable of
 // tables.
 // Any time changing this maxThreads to > 1, pls see the comment at
 // AccessController#postCreateTableHandler
 this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

 // Start log cleaner thread
 String n = Thread.currentThread().getName();
 int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
 this.logCleaner =
    new LogCleaner(cleanerInterval,
       this, conf, getMasterFileSystem().getFileSystem(),
       getMasterFileSystem().getOldLogDir());
       Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

 //start the hfile archive cleaner thread
  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
      .getFileSystem(), archiveDir);
  Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

  // Start the health checker
  if (this.healthCheckChore != null) {
    Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
  }

  // Start allowing requests to happen.
  this.rpcServer.openServer();
  this.rpcServerOpen = true;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Started service threads");
  }
}
项目:IRIndex    文件:HMaster.java   
private void startServiceThreads() throws IOException{

   // Start the executor service pools
   this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
      conf.getInt("hbase.master.executor.openregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
      conf.getInt("hbase.master.executor.closeregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 3));
   this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 5));

   // We depend on there being only one instance of this executor running
   // at a time.  To do concurrency, would need fencing of enable/disable of
   // tables.
   this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

   // Start log cleaner thread
   String n = Thread.currentThread().getName();
   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
   this.logCleaner =
      new LogCleaner(cleanerInterval,
         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Start the health checker
   if (this.healthCheckChore != null) {
     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
   }

    // Start allowing requests to happen.
    this.rpcServer.openServer();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Started service threads");
    }

  }
项目:IRIndex    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:hbase    文件:HMaster.java   
private void startServiceThreads() throws IOException{
 // Start the executor service pools
 this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
    conf.getInt("hbase.master.executor.openregion.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
    conf.getInt("hbase.master.executor.closeregion.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.meta.serverops.threads", 5));
 this.executorService.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
    conf.getInt("hbase.master.executor.logreplayops.threads", 10));

 // We depend on there being only one instance of this executor running
 // at a time.  To do concurrency, would need fencing of enable/disable of
 // tables.
 // Any time changing this maxThreads to > 1, pls see the comment at
 // AccessController#postCompletedCreateTableAction
 this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
 startProcedureExecutor();

 // Start log cleaner thread
 int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
 this.logCleaner =
    new LogCleaner(cleanerInterval,
       this, conf, getMasterWalManager().getFileSystem(),
       getMasterWalManager().getOldLogDir());
  getChoreService().scheduleChore(logCleaner);

 //start the hfile archive cleaner thread
  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  Map<String, Object> params = new HashMap<>();
  params.put(MASTER, this);
  this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
      .getFileSystem(), archiveDir, params);
  getChoreService().scheduleChore(hfileCleaner);
  serviceStarted = true;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Started service threads");
  }
  replicationMetaCleaner = new ReplicationMetaCleaner(this, this, cleanerInterval);
  getChoreService().scheduleChore(replicationMetaCleaner);
}
项目:hbase    文件:HMaster.java   
public LogCleaner getLogCleaner() {
  return this.logCleaner;
}
项目:c5    文件:HMaster.java   
void startServiceThreads() throws IOException{
 // Start the executor service pools
 this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
    conf.getInt("hbase.master.executor.openregion.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
    conf.getInt("hbase.master.executor.closeregion.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.executorService.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
    conf.getInt("hbase.master.executor.logreplayops.threads", 10));

 // We depend on there being only one instance of this executor running
 // at a time.  To do concurrency, would need fencing of enable/disable of
 // tables.
 this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

 // Start log cleaner thread
 String n = Thread.currentThread().getName();
 int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
 this.logCleaner =
    new LogCleaner(cleanerInterval,
       this, conf, getMasterFileSystem().getFileSystem(),
       getMasterFileSystem().getOldLogDir());
       Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

 //start the hfile archive cleaner thread
  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
      .getFileSystem(), archiveDir);
  Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

  // Start the health checker
  if (this.healthCheckChore != null) {
    Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
  }

  // Start allowing requests to happen.
  this.rpcServer.openServer();
  this.rpcServerOpen = true;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Started service threads");
  }
}
项目:HBase-Research    文件:HMaster.java   
private void startServiceThreads() throws IOException{

   // Start the executor service pools
   this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
      conf.getInt("hbase.master.executor.openregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
      conf.getInt("hbase.master.executor.closeregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 3));
   this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 5));

   // We depend on there being only one instance of this executor running
   // at a time.  To do concurrency, would need fencing of enable/disable of
   // tables.
   this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

   // Start log cleaner thread
   String n = Thread.currentThread().getName();
   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
   this.logCleaner =
      new LogCleaner(cleanerInterval,
         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Put up info server.
   int port = this.conf.getInt("hbase.master.info.port", 60010);
   if (port >= 0) {
     String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
     this.infoServer = new InfoServer(MASTER, a, port, false, this.conf);
     this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class);
     this.infoServer.addServlet("dump", "/dump", MasterDumpServlet.class);
     this.infoServer.setAttribute(MASTER, this);
     this.infoServer.start();
    }

   // Start the health checker
   if (this.healthCheckChore != null) {
     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
   }

    // Start allowing requests to happen.
    this.rpcServer.openServer();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Started service threads");
    }

  }
项目:HBase-Research    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:hbase-0.94.8-qod    文件:HMaster.java   
private void startServiceThreads() throws IOException{

   // Start the executor service pools
   this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
      conf.getInt("hbase.master.executor.openregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
      conf.getInt("hbase.master.executor.closeregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 3));
   this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 5));

   // We depend on there being only one instance of this executor running
   // at a time.  To do concurrency, would need fencing of enable/disable of
   // tables.
   this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

   // Start log cleaner thread
   String n = Thread.currentThread().getName();
   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
   this.logCleaner =
      new LogCleaner(cleanerInterval,
         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Put up info server.
   int port = this.conf.getInt("hbase.master.info.port", 60010);
   if (port >= 0) {
     String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
     this.infoServer = new InfoServer(MASTER, a, port, false, this.conf);
     this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class);
     this.infoServer.addServlet("dump", "/dump", MasterDumpServlet.class);
     this.infoServer.setAttribute(MASTER, this);
     this.infoServer.start();
    }

   // Start the health checker
   if (this.healthCheckChore != null) {
     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
   }

    // Start allowing requests to happen.
    this.rpcServer.openServer();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Started service threads");
    }

  }
项目:hbase-0.94.8-qod    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:hbase-0.94.8-qod    文件:HMaster.java   
private void startServiceThreads() throws IOException{

   // Start the executor service pools
   this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
      conf.getInt("hbase.master.executor.openregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
      conf.getInt("hbase.master.executor.closeregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 3));
   this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 5));

   // We depend on there being only one instance of this executor running
   // at a time.  To do concurrency, would need fencing of enable/disable of
   // tables.
   this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

   // Start log cleaner thread
   String n = Thread.currentThread().getName();
   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
   this.logCleaner =
      new LogCleaner(cleanerInterval,
         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Put up info server.
   int port = this.conf.getInt("hbase.master.info.port", 60010);
   if (port >= 0) {
     String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
     this.infoServer = new InfoServer(MASTER, a, port, false, this.conf);
     this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class);
     this.infoServer.addServlet("dump", "/dump", MasterDumpServlet.class);
     this.infoServer.setAttribute(MASTER, this);
     this.infoServer.start();
    }

   // Start the health checker
   if (this.healthCheckChore != null) {
     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
   }

    // Start allowing requests to happen.
    this.rpcServer.openServer();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Started service threads");
    }

  }
项目:hbase-0.94.8-qod    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:DominoHBase    文件:HMaster.java   
void startServiceThreads() throws IOException{

   // Start the executor service pools
   this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
      conf.getInt("hbase.master.executor.openregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
      conf.getInt("hbase.master.executor.closeregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 3));
   this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 5));

   // We depend on there being only one instance of this executor running
   // at a time.  To do concurrency, would need fencing of enable/disable of
   // tables.
   this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

   // Start log cleaner thread
   String n = Thread.currentThread().getName();
   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
   this.logCleaner =
      new LogCleaner(cleanerInterval,
         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Put up info server.
   int port = this.conf.getInt(HConstants.MASTER_INFO_PORT, 60010);
   if (port >= 0) {
     String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
     this.infoServer = new InfoServer(MASTER, a, port, false, this.conf);
     this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class);
     this.infoServer.addServlet("dump", "/dump", MasterDumpServlet.class);
     this.infoServer.setAttribute(MASTER, this);
     this.infoServer.start();
    }

   // Start the health checker
   if (this.healthCheckChore != null) {
     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
   }

    // Start allowing requests to happen.
    this.rpcServer.openServer();
    this.rpcServerOpen = true;
    if (LOG.isDebugEnabled()) {
      LOG.debug("Started service threads");
    }
  }
项目:DominoHBase    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}
项目:hindex    文件:HMaster.java   
private void startServiceThreads() throws IOException{

   // Start the executor service pools
   this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
      conf.getInt("hbase.master.executor.openregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
      conf.getInt("hbase.master.executor.closeregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 3));
   this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 5));

   // We depend on there being only one instance of this executor running
   // at a time.  To do concurrency, would need fencing of enable/disable of
   // tables.
   this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

   // Start log cleaner thread
   String n = Thread.currentThread().getName();
   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
   this.logCleaner =
      new LogCleaner(cleanerInterval,
         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Put up info server.
   int port = this.conf.getInt("hbase.master.info.port", 60010);
   if (port >= 0) {
     String a = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
     this.infoServer = new InfoServer(MASTER, a, port, false, this.conf);
     this.infoServer.addServlet("status", "/master-status", MasterStatusServlet.class);
     this.infoServer.addServlet("dump", "/dump", MasterDumpServlet.class);
     this.infoServer.setAttribute(MASTER, this);
     this.infoServer.start();
    }

   // Start the health checker
   if (this.healthCheckChore != null) {
     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
   }

    // Start allowing requests to happen.
    this.rpcServer.openServer();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Started service threads");
    }

  }
项目:hindex    文件:TestLogsCleaner.java   
@Test
public void testLogCleaning() throws Exception{
  Configuration conf = TEST_UTIL.getConfiguration();
  // set TTL
  long ttl = 2000;
  conf.setLong("hbase.master.logcleaner.ttl", ttl);
  conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  Replication.decorateMasterConfiguration(conf);
  Server server = new DummyServer();
  ReplicationZookeeper zkHelper =
      new ReplicationZookeeper(server, new AtomicBoolean(true));

  Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
      HConstants.HREGION_OLDLOGDIR_NAME);
  String fakeMachineName =
    URLEncoder.encode(server.getServerName().toString(), "UTF8");

  FileSystem fs = FileSystem.get(conf);
  LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);

  // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
  long now = System.currentTimeMillis();
  fs.delete(oldLogDir, true);
  fs.mkdirs(oldLogDir);
  // Case 1: 2 invalid files, which would be deleted directly
  fs.createNewFile(new Path(oldLogDir, "a"));
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
  // Case 2: 1 "recent" file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  System.out.println("Now is: " + now);
  for (int i = 1; i < 31; i++) {
    // Case 3: old files which would be deletable for the first log cleaner
    // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
    Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
    fs.createNewFile(fileName);
    // Case 4: put 3 old log files in ZK indicating that they are scheduled
    // for replication so these files would pass the first log cleaner
    // (TimeToLiveLogCleaner) but would be rejected by the second
    // (ReplicationLogCleaner)
    if (i % (30/3) == 1) {
      zkHelper.addLogToList(fileName.getName(), fakeMachineName);
      System.out.println("Replication log file: " + fileName);
    }
  }

  // sleep for sometime to get newer modifcation time 
  Thread.sleep(ttl);
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

  // Case 2: 1 newer file, not even deletable for the first log cleaner
  // (TimeToLiveLogCleaner), so we are not going down the chain
  fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));

  for (FileStatus stat : fs.listStatus(oldLogDir)) {
    System.out.println(stat.getPath().toString());
  }

  assertEquals(34, fs.listStatus(oldLogDir).length);

  cleaner.chore();

  // We end up with the current log file, a newer one and the 3 old log
  // files which are scheduled for replication
  assertEquals(5, fs.listStatus(oldLogDir).length);

  for (FileStatus file : fs.listStatus(oldLogDir)) {
    System.out.println("Kept log files: " + file.getPath().getName());
  }
}