Java 类org.apache.commons.logging.impl.Log4JLogger 实例源码

项目:hadoop    文件:FSNamesystem.java   
private static void enableAsyncAuditLog() {
  if (!(auditLog instanceof Log4JLogger)) {
    LOG.warn("Log4j is required to enable async auditlog");
    return;
  }
  Logger logger = ((Log4JLogger)auditLog).getLogger();
  @SuppressWarnings("unchecked")
  List<Appender> appenders = Collections.list(logger.getAllAppenders());
  // failsafe against trying to async it more than once
  if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
    AsyncAppender asyncAppender = new AsyncAppender();
    // change logger to have an async appender containing all the
    // previously configured appenders
    for (Appender appender : appenders) {
      logger.removeAppender(appender);
      asyncAppender.addAppender(appender);
    }
    logger.addAppender(asyncAppender);        
  }
}
项目:hadoop    文件:TestAuditLogs.java   
private void configureAuditLogs() throws IOException {
  // Shutdown the LogManager to release all logger open file handles.
  // Unfortunately, Apache commons logging library does not provide
  // means to release underlying loggers. For additional info look up
  // commons library FAQ.
  LogManager.shutdown();

  File file = new File(auditLogFile);
  if (file.exists()) {
    assertTrue(file.delete());
  }
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  // disable logging while the cluster startup preps files
  logger.setLevel(Level.OFF);
  PatternLayout layout = new PatternLayout("%m%n");
  RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
  logger.addAppender(appender);
}
项目:ditb    文件:TestScannerHeartbeatMessages.java   
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  ((Log4JLogger) ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) HeartbeatRPCServices.LOG).getLogger().setLevel(Level.ALL);
  Configuration conf = TEST_UTIL.getConfiguration();

  conf.setStrings(HConstants.REGION_IMPL, HeartbeatHRegion.class.getName());
  conf.setStrings(HConstants.REGION_SERVER_IMPL, HeartbeatHRegionServer.class.getName());
  conf.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, CLIENT_TIMEOUT);
  conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, CLIENT_TIMEOUT);
  conf.setInt(HConstants.HBASE_CLIENT_PAUSE, 1);

  // Check the timeout condition after every cell
  conf.setLong(StoreScanner.HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK, 1);
  TEST_UTIL.startMiniCluster(1);

  TABLE = createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE);
}
项目:ditb    文件:TestIPCUtil.java   
/**
 * For running a few tests of methods herein.
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  int count = 1024;
  int size = 10240;
  for (String arg: args) {
    if (arg.startsWith(COUNT)) {
      count = Integer.parseInt(arg.replace(COUNT, ""));
    } else if (arg.startsWith(SIZE)) {
      size = Integer.parseInt(arg.replace(SIZE, ""));
    } else {
      usage(1);
    }
  }
  IPCUtil util = new IPCUtil(HBaseConfiguration.create());
  ((Log4JLogger)IPCUtil.LOG).getLogger().setLevel(Level.ALL);
  timerTests(util, count, size,  new KeyValueCodec(), null);
  timerTests(util, count, size,  new KeyValueCodec(), new DefaultCodec());
  timerTests(util, count, size,  new KeyValueCodec(), new GzipCodec());
}
项目:aliyun-oss-hadoop-fs    文件:AdHocLogDumper.java   
@Override
public void run() {
  Log log = LogFactory.getLog(name);
  if (log instanceof Log4JLogger) {
    Logger logger = ((Log4JLogger) log).getLogger();
    logger.removeAppender(AD_HOC_DUMPER_APPENDER);
    logger.setLevel(currentLogLevel);
    for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
      .hasMoreElements();) {
      Object obj = appenders.nextElement();
      if (obj instanceof AppenderSkeleton) {
        AppenderSkeleton appender = (AppenderSkeleton) obj;
        appender.setThreshold(appenderLevels.get(appender.getName()));
      }
    }
    logFlag = false;
    LOG.info("Done dumping adhoc logs for " + name);
  }
}
项目:aliyun-oss-hadoop-fs    文件:ErrorsAndWarningsBlock.java   
MetricsBase(ViewContext ctx) {
  super(ctx);
  cutoffs = new ArrayList<>();

  // cutoff has to be in seconds
  long now = Time.now();
  cutoffs.add((now - 60 * 1000) / 1000);
  cutoffs.add((now - 300 * 1000) / 1000);
  cutoffs.add((now - 900 * 1000) / 1000);
  cutoffs.add((now - 3600 * 1000) / 1000);
  cutoffs.add((now - 21600 * 1000) / 1000);
  cutoffs.add((now - 43200 * 1000) / 1000);
  cutoffs.add((now - 84600 * 1000) / 1000);

  Log log = LogFactory.getLog(ErrorsAndWarningsBlock.class);
  if (log instanceof Log4JLogger) {
    appender =
        Log4jWarningErrorMetricsAppender.findAppender();
  }
}
项目:aliyun-oss-hadoop-fs    文件:ErrorsAndWarningsBlock.java   
@Override
protected void render(Block html) {
  Log log = LogFactory.getLog(ErrorsAndWarningsBlock.class);
  if (log instanceof Log4JLogger) {
    Hamlet.DIV<Hamlet> div =
        html.div().$class("metrics").$style("padding-bottom: 20px");
    div.h3(tableHeading).table("#metricsoverview").thead()
      .$class("ui-widget-header").tr().th().$class("ui-state-default")
      ._("Last 1 minute")._().th().$class("ui-state-default")
      ._("Last 5 minutes")._().th().$class("ui-state-default")
      ._("Last 15 minutes")._().th().$class("ui-state-default")
      ._("Last 1 hour")._().th().$class("ui-state-default")
      ._("Last 6 hours")._().th().$class("ui-state-default")
      ._("Last 12 hours")._().th().$class("ui-state-default")
      ._("Last 24 hours")._()._()._().tbody().$class("ui-widget-content")
      .tr().td(String.valueOf(values.get(0)))
      .td(String.valueOf(values.get(1))).td(String.valueOf(values.get(2)))
      .td(String.valueOf(values.get(3))).td(String.valueOf(values.get(4)))
      .td(String.valueOf(values.get(5))).td(String.valueOf(values.get(6)))
      ._()._()._();
    div._();
  }
}
项目:aliyun-oss-hadoop-fs    文件:FSNamesystem.java   
private static void enableAsyncAuditLog() {
  if (!(auditLog instanceof Log4JLogger)) {
    LOG.warn("Log4j is required to enable async auditlog");
    return;
  }
  Logger logger = ((Log4JLogger)auditLog).getLogger();
  @SuppressWarnings("unchecked")
  List<Appender> appenders = Collections.list(logger.getAllAppenders());
  // failsafe against trying to async it more than once
  if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
    AsyncAppender asyncAppender = new AsyncAppender();
    // change logger to have an async appender containing all the
    // previously configured appenders
    for (Appender appender : appenders) {
      logger.removeAppender(appender);
      asyncAppender.addAppender(appender);
    }
    logger.addAppender(asyncAppender);        
  }
}
项目:aliyun-oss-hadoop-fs    文件:MetricsLoggerTask.java   
/**
 * Make the metrics logger async and add all pre-existing appenders to the
 * async appender.
 */
public static void makeMetricsLoggerAsync(Log metricsLog) {
  if (!(metricsLog instanceof Log4JLogger)) {
    LOG.warn("Metrics logging will not be async since "
        + "the logger is not log4j");
    return;
  }
  org.apache.log4j.Logger logger = ((Log4JLogger) metricsLog).getLogger();
  logger.setAdditivity(false); // Don't pollute actual logs with metrics dump

  @SuppressWarnings("unchecked")
  List<Appender> appenders = Collections.list(logger.getAllAppenders());
  // failsafe against trying to async it more than once
  if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
    AsyncAppender asyncAppender = new AsyncAppender();
    // change logger to have an async appender containing all the
    // previously configured appenders
    for (Appender appender : appenders) {
      logger.removeAppender(appender);
      asyncAppender.addAppender(appender);
    }
    logger.addAppender(asyncAppender);
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestAuditLogs.java   
private void configureAuditLogs() throws IOException {
  // Shutdown the LogManager to release all logger open file handles.
  // Unfortunately, Apache commons logging library does not provide
  // means to release underlying loggers. For additional info look up
  // commons library FAQ.
  LogManager.shutdown();

  File file = new File(auditLogFile);
  if (file.exists()) {
    assertTrue(file.delete());
  }
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  // disable logging while the cluster startup preps files
  logger.setLevel(Level.OFF);
  PatternLayout layout = new PatternLayout("%m%n");
  RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
  logger.addAppender(appender);
}
项目:big-c    文件:FSNamesystem.java   
private static void enableAsyncAuditLog() {
  if (!(auditLog instanceof Log4JLogger)) {
    LOG.warn("Log4j is required to enable async auditlog");
    return;
  }
  Logger logger = ((Log4JLogger)auditLog).getLogger();
  @SuppressWarnings("unchecked")
  List<Appender> appenders = Collections.list(logger.getAllAppenders());
  // failsafe against trying to async it more than once
  if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
    AsyncAppender asyncAppender = new AsyncAppender();
    // change logger to have an async appender containing all the
    // previously configured appenders
    for (Appender appender : appenders) {
      logger.removeAppender(appender);
      asyncAppender.addAppender(appender);
    }
    logger.addAppender(asyncAppender);        
  }
}
项目:big-c    文件:TestAuditLogs.java   
private void configureAuditLogs() throws IOException {
  // Shutdown the LogManager to release all logger open file handles.
  // Unfortunately, Apache commons logging library does not provide
  // means to release underlying loggers. For additional info look up
  // commons library FAQ.
  LogManager.shutdown();

  File file = new File(auditLogFile);
  if (file.exists()) {
    assertTrue(file.delete());
  }
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  // disable logging while the cluster startup preps files
  logger.setLevel(Level.OFF);
  PatternLayout layout = new PatternLayout("%m%n");
  RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
  logger.addAppender(appender);
}
项目:flink    文件:HBaseTestingClusterAutostarter.java   
@BeforeClass
public static void setUp() throws Exception {
    LOG.info("HBase minicluster: Starting");
    ((Log4JLogger) RpcServer.LOG).getLogger().setLevel(Level.ALL);
    ((Log4JLogger) AbstractRpcClient.LOG).getLogger().setLevel(Level.ALL);
    ((Log4JLogger) ScannerCallable.LOG).getLogger().setLevel(Level.ALL);

    TEST_UTIL.startMiniCluster(1);

    // https://issues.apache.org/jira/browse/HBASE-11711
    TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", -1);

    // Make sure the zookeeper quorum value contains the right port number (varies per run).
    TEST_UTIL.getConfiguration().set("hbase.zookeeper.quorum", "localhost:" + TEST_UTIL.getZkCluster().getClientPort());

    conf = initialize(TEST_UTIL.getConfiguration());
    LOG.info("HBase minicluster: Running");
}
项目:flink    文件:HBaseTestingClusterAutostarter.java   
@BeforeClass
public static void setUp() throws Exception {
    LOG.info("HBase minicluster: Starting");
    ((Log4JLogger) RpcServer.LOG).getLogger().setLevel(Level.ALL);
    ((Log4JLogger) AbstractRpcClient.LOG).getLogger().setLevel(Level.ALL);
    ((Log4JLogger) ScannerCallable.LOG).getLogger().setLevel(Level.ALL);

    TEST_UTIL.startMiniCluster(1);

    // https://issues.apache.org/jira/browse/HBASE-11711
    TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", -1);

    // Make sure the zookeeper quorum value contains the right port number (varies per run).
    TEST_UTIL.getConfiguration().set("hbase.zookeeper.quorum", "localhost:" + TEST_UTIL.getZkCluster().getClientPort());

    initialize(TEST_UTIL.getConfiguration());
    LOG.info("HBase minicluster: Running");
}
项目:hadoop-2.6.0-cdh5.4.3    文件:FSNamesystem.java   
private static void enableAsyncAuditLog() {
  if (!(auditLog instanceof Log4JLogger)) {
    LOG.warn("Log4j is required to enable async auditlog");
    return;
  }
  Logger logger = ((Log4JLogger)auditLog).getLogger();
  @SuppressWarnings("unchecked")
  List<Appender> appenders = Collections.list(logger.getAllAppenders());
  // failsafe against trying to async it more than once
  if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
    AsyncAppender asyncAppender = new AsyncAppender();
    // change logger to have an async appender containing all the
    // previously configured appenders
    for (Appender appender : appenders) {
      logger.removeAppender(appender);
      asyncAppender.addAppender(appender);
    }
    logger.addAppender(asyncAppender);        
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestAuditLogs.java   
private void configureAuditLogs() throws IOException {
  // Shutdown the LogManager to release all logger open file handles.
  // Unfortunately, Apache commons logging library does not provide
  // means to release underlying loggers. For additional info look up
  // commons library FAQ.
  LogManager.shutdown();

  File file = new File(auditLogFile);
  if (file.exists()) {
    assertTrue(file.delete());
  }
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  // disable logging while the cluster startup preps files
  logger.setLevel(Level.OFF);
  PatternLayout layout = new PatternLayout("%m%n");
  RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
  logger.addAppender(appender);
}
项目:hadoop-EAR    文件:TestHftpFileSystem.java   
/**
 * Setup hadoop mini-cluster for test.
 */
private static void oneTimeSetUp() throws IOException {
  ((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);

  final long seed = RAN.nextLong();
  System.out.println("seed=" + seed);
  RAN.setSeed(seed);

  config = new Configuration();
  config.set(FSConstants.SLAVE_HOST_NAME, "localhost");

  cluster = new MiniDFSCluster(config, 2, true, null);
  hdfs = cluster.getFileSystem();
  final String hftpuri = "hftp://" + config.get("dfs.http.address");
  System.out.println("hftpuri=" + hftpuri);
  hftpFs = (HftpFileSystem) new Path(hftpuri).getFileSystem(config);
}
项目:hadoop-plus    文件:TestAuditLogs.java   
/** Sets up log4j logger for auditlogs */
private void setupAuditLogs() throws IOException {
  // Shutdown the LogManager to release all logger open file handles.
  // Unfortunately, Apache commons logging library does not provide
  // means to release underlying loggers. For additional info look up
  // commons library FAQ.
  LogManager.shutdown();

  File file = new File(auditLogFile);
  if (file.exists()) {
    assertTrue(file.delete());
  }
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.INFO);
  PatternLayout layout = new PatternLayout("%m%n");
  RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
  logger.addAppender(appender);
}
项目:hadoop-plus    文件:TestAuditLogs.java   
private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe)
    throws IOException {
  // Turn off the logs
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.OFF);

  BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
  String line = null;
  boolean ret = true;

  try {
    for (int i = 0; i < ndupe; i++) {
      line = reader.readLine();
      assertNotNull(line);
      assertTrue("Expected audit event not found in audit log",
          auditPattern.matcher(line).matches());
      ret &= successPattern.matcher(line).matches();
    }
    assertNull("Unexpected event in audit log", reader.readLine());
    assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
  } finally {
    reader.close();
  }
}
项目:hadoop-plus    文件:TestFsck.java   
private void verifyAuditLogs() throws IOException {
  // Turn off the logs
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.OFF);

  // Audit log should contain one getfileinfo and one fsck
  BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
  String line = reader.readLine();
  assertNotNull(line);
  assertTrue("Expected getfileinfo event not found in audit log",
      getfileinfoPattern.matcher(line).matches());
  line = reader.readLine();
  assertNotNull(line);
  assertTrue("Expected fsck event not found in audit log",
      fsckPattern.matcher(line).matches());
  assertNull("Unexpected event in audit log", reader.readLine());
}
项目:hadoop-plus    文件:TestMultipleNNDataBlockScanner.java   
@Test(timeout=120000)
public void test2NNBlockRescanInterval() throws IOException {
  ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
  Configuration conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
      .build();

  try {
    FileSystem fs = cluster.getFileSystem(1);
    Path file2 = new Path("/test/testBlockScanInterval");
    DFSTestUtil.createFile(fs, file2, 30, (short) 1, 0);

    fs = cluster.getFileSystem(0);
    Path file1 = new Path("/test/testBlockScanInterval");
    DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
    for (int i = 0; i < 8; i++) {
      LOG.info("Verifying that the blockscanner scans exactly once");
      waitAndScanBlocks(1, 1);
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hadoop-plus    文件:TestMultipleNNDataBlockScanner.java   
/**
 * HDFS-3828: DN rescans blocks too frequently
 * 
 * @throws Exception
 */
@Test(timeout=120000)
public void testBlockRescanInterval() throws IOException {
  ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
  Configuration conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    FileSystem fs = cluster.getFileSystem();
    Path file1 = new Path("/test/testBlockScanInterval");
    DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
    for (int i = 0; i < 4; i++) {
      LOG.info("Verifying that the blockscanner scans exactly once");
      waitAndScanBlocks(1, 1);
    }
  } finally {
    cluster.shutdown();
  }
}
项目:FlexMap    文件:FSNamesystem.java   
private static void enableAsyncAuditLog() {
  if (!(auditLog instanceof Log4JLogger)) {
    LOG.warn("Log4j is required to enable async auditlog");
    return;
  }
  Logger logger = ((Log4JLogger)auditLog).getLogger();
  @SuppressWarnings("unchecked")
  List<Appender> appenders = Collections.list(logger.getAllAppenders());
  // failsafe against trying to async it more than once
  if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
    AsyncAppender asyncAppender = new AsyncAppender();
    // change logger to have an async appender containing all the
    // previously configured appenders
    for (Appender appender : appenders) {
      logger.removeAppender(appender);
      asyncAppender.addAppender(appender);
    }
    logger.addAppender(asyncAppender);        
  }
}
项目:FlexMap    文件:TestAuditLogs.java   
private void configureAuditLogs() throws IOException {
  // Shutdown the LogManager to release all logger open file handles.
  // Unfortunately, Apache commons logging library does not provide
  // means to release underlying loggers. For additional info look up
  // commons library FAQ.
  LogManager.shutdown();

  File file = new File(auditLogFile);
  if (file.exists()) {
    assertTrue(file.delete());
  }
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  // disable logging while the cluster startup preps files
  logger.setLevel(Level.OFF);
  PatternLayout layout = new PatternLayout("%m%n");
  RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
  logger.addAppender(appender);
}
项目:hops    文件:TestAuditLogs.java   
private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe)
    throws IOException {
  // Turn off the logs
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.OFF);

  BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
  String line = null;
  boolean ret = true;

  try {
    for (int i = 0; i < ndupe; i++) {
      line = reader.readLine();
      assertNotNull(line);
      assertTrue("Expected audit event not found in audit log",
          auditPattern.matcher(line).matches());
      ret &= successPattern.matcher(line).matches();
    }
    assertNull("Unexpected event in audit log", reader.readLine());
    assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
  } finally {
    reader.close();
  }
}
项目:FlexMap    文件:TestMultipleNNDataBlockScanner.java   
@Test(timeout=120000)
public void test2NNBlockRescanInterval() throws IOException {
  ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
  Configuration conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
      .build();

  try {
    FileSystem fs = cluster.getFileSystem(1);
    Path file2 = new Path("/test/testBlockScanInterval");
    DFSTestUtil.createFile(fs, file2, 30, (short) 1, 0);

    fs = cluster.getFileSystem(0);
    Path file1 = new Path("/test/testBlockScanInterval");
    DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
    for (int i = 0; i < 8; i++) {
      LOG.info("Verifying that the blockscanner scans exactly once");
      waitAndScanBlocks(1, 1);
    }
  } finally {
    cluster.shutdown();
  }
}
项目:FlexMap    文件:TestMultipleNNDataBlockScanner.java   
/**
 * HDFS-3828: DN rescans blocks too frequently
 * 
 * @throws Exception
 */
@Test(timeout=120000)
public void testBlockRescanInterval() throws IOException {
  ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
  Configuration conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    FileSystem fs = cluster.getFileSystem();
    Path file1 = new Path("/test/testBlockScanInterval");
    DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
    for (int i = 0; i < 4; i++) {
      LOG.info("Verifying that the blockscanner scans exactly once");
      waitAndScanBlocks(1, 1);
    }
  } finally {
    cluster.shutdown();
  }
}
项目:hops    文件:TestFsck.java   
private void verifyAuditLogs() throws IOException {
  // Turn off the logs
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.OFF);

  // Audit log should contain one getfileinfo and one fsck
  BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
  String line = reader.readLine();
  assertNotNull(line);
   assertTrue("Expected getfileinfo event not found in audit log",
      getfileinfoPattern.matcher(line).matches());
  line = reader.readLine();
  assertNotNull(line);
  assertTrue("Expected fsck event not found in audit log",
      fsckPattern.matcher(line).matches());
  assertNull("Unexpected event in audit log", reader.readLine());
}
项目:HIndex    文件:TestIPCUtil.java   
/**
 * For running a few tests of methods herein.
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  int count = 1024;
  int size = 10240;
  for (String arg: args) {
    if (arg.startsWith(COUNT)) {
      count = Integer.parseInt(arg.replace(COUNT, ""));
    } else if (arg.startsWith(SIZE)) {
      size = Integer.parseInt(arg.replace(SIZE, ""));
    } else {
      usage(1);
    }
  }
  IPCUtil util = new IPCUtil(HBaseConfiguration.create());
  ((Log4JLogger)IPCUtil.LOG).getLogger().setLevel(Level.ALL);
  timerTests(util, count, size,  new KeyValueCodec(), null);
  timerTests(util, count, size,  new KeyValueCodec(), new DefaultCodec());
  timerTests(util, count, size,  new KeyValueCodec(), new GzipCodec());
}
项目:hops    文件:AdHocLogDumper.java   
@Override
public void run() {
  Log log = LogFactory.getLog(name);
  if (log instanceof Log4JLogger) {
    Logger logger = ((Log4JLogger) log).getLogger();
    logger.removeAppender(AD_HOC_DUMPER_APPENDER);
    logger.setLevel(currentLogLevel);
    for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
      .hasMoreElements();) {
      Object obj = appenders.nextElement();
      if (obj instanceof AppenderSkeleton) {
        AppenderSkeleton appender = (AppenderSkeleton) obj;
        appender.setThreshold(appenderLevels.get(appender.getName()));
      }
    }
    logFlag = false;
    LOG.info("Done dumping adhoc logs for " + name);
  }
}
项目:hops    文件:ErrorsAndWarningsBlock.java   
MetricsBase(ViewContext ctx) {
  super(ctx);
  cutoffs = new ArrayList<>();

  // cutoff has to be in seconds
  long now = Time.now();
  cutoffs.add((now - 60 * 1000) / 1000);
  cutoffs.add((now - 300 * 1000) / 1000);
  cutoffs.add((now - 900 * 1000) / 1000);
  cutoffs.add((now - 3600 * 1000) / 1000);
  cutoffs.add((now - 21600 * 1000) / 1000);
  cutoffs.add((now - 43200 * 1000) / 1000);
  cutoffs.add((now - 84600 * 1000) / 1000);

  Log log = LogFactory.getLog(ErrorsAndWarningsBlock.class);
  if (log instanceof Log4JLogger) {
    appender =
        Log4jWarningErrorMetricsAppender.findAppender();
  }
}
项目:hops    文件:ErrorsAndWarningsBlock.java   
@Override
protected void render(Block html) {
  Log log = LogFactory.getLog(ErrorsAndWarningsBlock.class);
  if (log instanceof Log4JLogger) {
    Hamlet.DIV<Hamlet> div =
        html.div().$class("metrics").$style("padding-bottom: 20px");
    div.h3(tableHeading).table("#metricsoverview").thead()
      .$class("ui-widget-header").tr().th().$class("ui-state-default")
      ._("Last 1 minute")._().th().$class("ui-state-default")
      ._("Last 5 minutes")._().th().$class("ui-state-default")
      ._("Last 15 minutes")._().th().$class("ui-state-default")
      ._("Last 1 hour")._().th().$class("ui-state-default")
      ._("Last 6 hours")._().th().$class("ui-state-default")
      ._("Last 12 hours")._().th().$class("ui-state-default")
      ._("Last 24 hours")._()._()._().tbody().$class("ui-widget-content")
      .tr().td(String.valueOf(values.get(0)))
      .td(String.valueOf(values.get(1))).td(String.valueOf(values.get(2)))
      .td(String.valueOf(values.get(3))).td(String.valueOf(values.get(4)))
      .td(String.valueOf(values.get(5))).td(String.valueOf(values.get(6)))
      ._()._()._();
    div._();
  }
}
项目:hadoop-oss    文件:Crc32PerformanceTest.java   
Crc32PerformanceTest(final int dataLengthMB, final int trials,
    final boolean direct) {
  this.dataLengthMB = dataLengthMB;
  this.trials = trials;
  this.direct = direct;

  crcs.add(Crc32.Zip.class);
  crcs.add(Crc32.PureJava.class);

  if (direct && NativeCrc32.isAvailable()) {
    crcs.add(Crc32.Native.class);
    ((Log4JLogger)LogFactory.getLog(NativeCodeLoader.class))
        .getLogger().setLevel(Level.ALL);
  }
}
项目:hadoop    文件:TestWebHDFS.java   
/** Test client retry with namenode restarting. */
@Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final Configuration conf = WebHdfsTestUtil.createConf();
  TestDFSClientRetries.namenodeRestartTest(conf, true);
}
项目:hadoop    文件:TestDelegationToken.java   
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
项目:hadoop    文件:TestAuditLogs.java   
@Before
 public void setupCluster() throws Exception {
   // must configure prior to instantiating the namesystem because it
   // will reconfigure the logger if async is enabled
   configureAuditLogs();
   conf = new HdfsConfiguration();
   final long precision = 1L;
   conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
   conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
   conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
   conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
   util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
       setNumFiles(20).build();
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
   fs = cluster.getFileSystem();
   util.createFiles(fs, fileName);

   // make sure the appender is what it's supposed to be
   Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
   @SuppressWarnings("unchecked")
   List<Appender> appenders = Collections.list(logger.getAllAppenders());
   assertEquals(1, appenders.size());
   assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender);

   fnames = util.getFileNames(fileName);
   util.waitReplication(fs, fileName, (short)3);
   userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
}
项目:hadoop    文件:TestAuditLogs.java   
private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe)
    throws IOException {
  // Turn off the logs
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.OFF);

  // Close the appenders and force all logs to be flushed
  Enumeration<?> appenders = logger.getAllAppenders();
  while (appenders.hasMoreElements()) {
    Appender appender = (Appender)appenders.nextElement();
    appender.close();
  }

  BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
  String line = null;
  boolean ret = true;

  try {
    for (int i = 0; i < ndupe; i++) {
      line = reader.readLine();
      assertNotNull(line);
      assertTrue("Expected audit event not found in audit log",
          auditPattern.matcher(line).matches());
      ret &= successPattern.matcher(line).matches();
    }
    assertNull("Unexpected event in audit log", reader.readLine());
    assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
  } finally {
    reader.close();
  }
}
项目:hadoop    文件:TestAuditLogs.java   
private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Pattern pattern)
    throws IOException {
  // Turn off the logs
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.OFF);

  // Close the appenders and force all logs to be flushed
  Enumeration<?> appenders = logger.getAllAppenders();
  while (appenders.hasMoreElements()) {
    Appender appender = (Appender)appenders.nextElement();
    appender.close();
  }

  BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
  String line = null;
  boolean ret = true;
  boolean patternMatches = false;

  try {
      for (int i = 0; i < ndupe; i++) {
        line = reader.readLine();
        assertNotNull(line);
        patternMatches |= pattern.matcher(line).matches();
        ret &= successPattern.matcher(line).matches();
      }
      assertNull("Unexpected event in audit log", reader.readLine());
      assertTrue("Expected audit event not found in audit log", patternMatches);
      assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
    } finally {
      reader.close();
    }
}
项目:hadoop    文件:TestFsck.java   
static String runFsck(Configuration conf, int expectedErrCode, 
                      boolean checkErrorCode,String... path)
                      throws Exception {
  ByteArrayOutputStream bStream = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(bStream, true);
  ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL);
  int errCode = ToolRunner.run(new DFSck(conf, out), path);
  if (checkErrorCode) {
    assertEquals(expectedErrCode, errCode);
  }
  ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO);
  FSImage.LOG.error("OUTPUT = " + bStream.toString());
  return bStream.toString();
}
项目:hadoop    文件:TestFsck.java   
/** Sets up log4j logger for auditlogs */
private void setupAuditLogs() throws IOException {
  File file = new File(auditLogFile);
  if (file.exists()) {
    file.delete();
  }
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.INFO);
  PatternLayout layout = new PatternLayout("%m%n");
  RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
  logger.addAppender(appender);
}