我们从Python开源项目中,提取了以下30个代码示例,用于说明如何使用logging.setLoggerClass()。
def log_to_console(level=logging.WARNING, override_root_logger=False, **kwargs): """ Configure the logging system to send log entries to the console. Note that the root logger will not log to Seq by default. :param level: The minimum level at which to log. :param override_root_logger: Override the root logger, too? Note - this might cause problems if third-party components try to be clever when using the logging.XXX functions. """ logging.setLoggerClass(StructuredLogger) if override_root_logger: _override_root_logger() logging.basicConfig( style='{', handlers=[ ConsoleStructuredLogHandler() ], level=level, **kwargs )
def setup(debug=False, statsd_host=None): level = 'DEBUG' if debug else 'INFO' dictConfig(dict( version=1, disable_existing_loggers=True, loggers={ '': { 'level': level, 'handlers': ['console'] }, }, handlers={ 'console': { 'class': 'logging.StreamHandler', 'formatter': 'simple', 'stream': sys.stdout }, }, formatters={ 'simple': { 'format': '[%(process)d] [%(levelname)s] %(message)s', 'class': 'logging.Formatter' }, } )) OdookuLogger._statsd_host = statsd_host logging.setLoggerClass(OdookuLogger) logging.addLevelName(25, 'INFO') # Prevent odoo from overriding log config import openerp.netsvc openerp.netsvc._logger_init = True
def initialize(self, p_config=None, p_override=None): logging.setLoggerClass(WrapperLogger) self.load_config(p_config, p_override) try: self._load_filters() self._load_formatters() self._load_handlers() self._load_loggers() except Exception as l_error: raise XtdError(__name__, "unable to initialize logging facility : %s" % str(l_error)) tools.info(__name__, "facility initialized") # Local Variables: # ispell-local-dictionary: "american" # End:
def set_logger_class(): logging.setLoggerClass(StructuredLogger) logging.getLogger().setLevel(logging.NOTSET)
def install(cls): # in case used as a library, rather than via the entrypoint, # install the logger globally, as this is the earliest point we can do # so, if not using the talisker entry point logging.setLoggerClass(logs.StructuredLogger)
def exception(self, message='', *args, **kw): exc_info = kw.pop('exc_info', None) or True self.error(message, *args, exc_info=exc_info, **kw) # logging.setLoggerClass(Logger)
def getLogger(name): og_class = logging.getLoggerClass() try: logging.setLoggerClass(Logger) return logging.getLogger(name) finally: logging.setLoggerClass(og_class) # The main 'eyed3' logger
def __init__(self, module, log_file_path, fail_fast=False): super(DSRFLogger, self).__init__('') self.first_error = None self._counts = defaultdict(int) self.fail_fast = fail_fast self.logger = logging.getLogger(module) self.log_file_path = log_file_path logging.basicConfig( filename=log_file_path, filemode='w', level=logging.DEBUG) logging.setLoggerClass(DSRFLogger)
def init_logger(self) -> None: try: log_path = QStandardPaths.writableLocation(QStandardPaths.AppConfigLocation) except AttributeError: if sys.platform == 'win32': log_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower()) elif sys.platform == 'darwin': log_path = os.path.join(QDir.homePath(), 'Library', 'Preferences', qApp.applicationName().lower()) else: log_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower()) os.makedirs(log_path, exist_ok=True) self.console = ConsoleWidget(self) self.consoleLogger = ConsoleHandler(self.console) handlers = [logging.handlers.RotatingFileHandler(os.path.join(log_path, '%s.log' % qApp.applicationName().lower()), maxBytes=1000000, backupCount=1), self.consoleLogger] if self.parser.isSet(self.debug_option) or self.verboseLogs: # noinspection PyTypeChecker handlers.append(logging.StreamHandler()) logging.setLoggerClass(VideoLogger) logging.basicConfig(handlers=handlers, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M', level=logging.INFO) logging.captureWarnings(capture=True) sys.excepthook = MainWindow.log_uncaught_exceptions
def get_logger(): """ Return the logger used by the DataStore :return: """ logging.setLoggerClass(DataStoreLogger) logger = logging.getLogger("Datastore") logger.setLevel(DataStore.LOG_LEVEL) logger.propagate = False return logger
def getLogger(name=None): OrigLoggerClass = logging.getLoggerClass() try: logging.setLoggerClass(Logger) return logging.getLogger(name) finally: logging.setLoggerClass(OrigLoggerClass)
def init_logger(logger_name, logfile_name=__name__, logging_level=logging.DEBUG, log_path=settings.LOG_PATH): '''save log to diffrent file by deffirent log level into the log path and print all log in console''' logging.setLoggerClass(AppLogger) formatter = logging.Formatter( '%(asctime)s %(name)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M:%S') log_files = { logging.DEBUG: os.path.join(log_path, logfile_name + '-debug.log'), logging.INFO: os.path.join(log_path, logfile_name + '-info.log'), logging.WARNING: os.path.join(log_path, logfile_name + '-warning.log'), logging.ERROR: os.path.join(log_path, logfile_name + '-error.log'), logging.CRITICAL: os.path.join(log_path, logfile_name + '-critical.log') # noqa } logger = logging.getLogger(logger_name) logger.setLevel(logging_level) for log_level, log_file in log_files.items(): file_handler = logging.FileHandler(log_file) file_handler.setLevel(log_level) file_handler.setFormatter(formatter) logger.addHandler(file_handler) console_handler = logging.StreamHandler() console_handler.name = "console" console_handler.setLevel(logging.DEBUG) console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger
def hook(): sys.excepthook = excepthook logging.setLoggerClass(BetExcLogger) patch_logging() if hasattr(sys, 'ps1'): print('WARNING: better_exceptions will only inspect code from the command line\n' ' when using: `python -m better_exceptions\'. Otherwise, only code\n' ' loaded from files will be inspected!', file=sys.stderr)
def setup(debug=False, statsd_host=None): level = 'DEBUG' if debug else 'INFO' dictConfig(dict( version=1, disable_existing_loggers=True, loggers={ '': { 'level': level, 'handlers': ['console'] }, }, handlers={ 'console': { 'class': 'logging.StreamHandler', 'formatter': 'standard', # Log to stderr so that click commands can make # use of stdout 'stream': sys.stderr }, }, formatters={ 'standard': { 'format': '[%(levelname)s]%(db)s%(message)s', '()': 'odooku.logger.DBFormatter' }, } )) OdookuLogger._statsd_host = statsd_host logging.setLoggerClass(OdookuLogger) logging.addLevelName(25, 'INFO') # Prevent odoo from overriding log config import odoo.netsvc odoo.netsvc._logger_init = True
def test_manager_loggerclass(self): logged = [] class MyLogger(logging.Logger): def _log(self, level, msg, args, exc_info=None, extra=None): logged.append(msg) man = logging.Manager(None) self.assertRaises(TypeError, man.setLoggerClass, int) man.setLoggerClass(MyLogger) logger = man.getLogger('test') logger.warning('should appear in logged') logging.warning('should not appear in logged') self.assertEqual(logged, ['should appear in logged'])
def test_set_logger_class(self): self.assertRaises(TypeError, logging.setLoggerClass, object) class MyLogger(logging.Logger): pass logging.setLoggerClass(MyLogger) self.assertEqual(logging.getLoggerClass(), MyLogger) logging.setLoggerClass(logging.Logger) self.assertEqual(logging.getLoggerClass(), logging.Logger)
def getLogger(self, name): logging.setLoggerClass(SLogger) return super(SManager, self).getLogger(name)
def _get_logger(self, name): old_class = logging.getLoggerClass() logging.setLoggerClass(MPILogger.CustomLogger) logger = logging.getLogger(name) logging.setLoggerClass(old_class) return logger ########################################################################### # Standard logging functions. Log only at mpi rank 0.
def __init_log(): """ Initialize the GUI log. """ global LOG class CustomHandler(logging.Logger): def __init__(self, name, level=logging.NOTSET): """ Object constructor. :param name: The logger name :param level: The default logging level """ logging.Logger.__init__(self, name, level) def critical(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'CRITICAL' and raise an Exception. """ logging.Logger.critical(self, msg, *args, **kwargs) raise Exception(msg % args) logging.setLoggerClass(CustomHandler) LOG = logging.getLogger('B3') handler = logging.FileHandler(B3_LOG, mode='w') handler.setFormatter(logging.Formatter('%(asctime)s\t%(levelname)s\t%(message)r', '%y%m%d %H:%M:%S')) LOG.addHandler(handler) LOG.setLevel(logging.DEBUG)
def getLogger(name): og_class = logging.getLoggerClass() try: logging.setLoggerClass(Logger) return logging.getLogger(name) finally: logging.setLoggerClass(og_class) ## The main 'eyed3' logger
def install_logger_class() -> None: logging.setLoggerClass(Logger)
def register(cls): """ Register custom trace logger with the logging subsystem """ # Register a new level / name mapping logging.addLevelName(TraceLogger.TRACE, TraceLogger.TRACE_NAME) # Override the default logging class for the python logger logging.setLoggerClass(TraceLogger)
def log_to_seq(server_url, api_key=None, level=logging.WARNING, batch_size=10, auto_flush_timeout=None, additional_handlers=None, override_root_logger=False, **kwargs): """ Configure the logging system to send log entries to Seq. Note that the root logger will not log to Seq by default. :param server_url: The Seq server URL. :param api_key: The Seq API key (optional). :param level: The minimum level at which to log. :param batch_size: The number of log entries to collect before publishing to Seq. :param auto_flush_timeout: If specified, the time (in seconds) before the current batch is automatically flushed. :param additional_handlers: Additional `LogHandler`s (if any). :param override_root_logger: Override the root logger, too? Note - this might cause problems if third-party components try to be clever when using the logging.XXX functions. :return: The `SeqLogHandler` that sends events to Seq. Can be used to forcibly flush records to Seq. :rtype: SeqLogHandler """ logging.setLoggerClass(StructuredLogger) if override_root_logger: _override_root_logger() log_handlers = [ SeqLogHandler(server_url, api_key, batch_size, auto_flush_timeout) ] if additional_handlers: for additional_handler in additional_handlers: log_handlers.append(additional_handler) logging.basicConfig( style='{', handlers=log_handlers, level=level, **kwargs ) return log_handlers[0]
def logsetup(app): global _setup_done if _setup_done: return _setup_done = True logging.setLoggerClass(ContextAwareLogger) syslog_path = '/dev/log' if sys.platform == 'darwin': syslog_path = '/var/run/syslog' # Install log file handler handler = SysLogHandler(address=syslog_path, facility=SysLogHandler.LOG_USER) handler.name = "serverlog" handler.setFormatter(ServerLogFormatter()) logging.root.addHandler(handler) # Install eventLog file handler handler = SysLogHandler(address=syslog_path, facility=SysLogHandler.LOG_LOCAL0) handler.name = "eventlog" handler.setFormatter(EventLogFormatter()) l = logging.getLogger("eventlog") l.propagate = False l.addHandler(handler) # Install client file handler handler = SysLogHandler(address=syslog_path, facility=SysLogHandler.LOG_LOCAL1) handler.name = "clientlog" handler.setFormatter(ClientLogFormatter()) l = logging.getLogger("clientlog") l.propagate = False l.addHandler(handler) # Quiet down copule of very chatty loggers. This can be overridden in config.json. for logger_name in ['sqlalchemy', 'werkzeug', 'requests.packages.urllib3.connectionpool']: logging.getLogger(logger_name).setLevel('WARNING') # Apply additional 'level' and 'propagate' settings for handlers and # loggers. See https://docs.python.org/2.7/library/logging.config.html# # Example format: # "logging": { # "version": 1, # "incremental": true, # "loggers": { # "my_chatty_logger": { # "level": "WARNING" # } # }, # "handlers": { # "serverlog": { # "level": "INFO", # } # } # } if 'logging' in app.config: logging.config.dictConfig(app.config['logging'])
def config_logging(data): logging_filename = "{}.log".format(get_current_time(with_dashes=True)) logging_path = data["logging"]["path"] path = os.path.join(logging_path, logging_filename) if not os.path.exists(logging_path): os.mkdir(logging_path) dictLogConfig = { "version": 1, 'disable_existing_loggers': False, "handlers": { "default": { "class": "logging.StreamHandler", "formatter": "basic_formatter", "stream": 'ext://sys.stdout', }, "fileHandler": { "class": "logging.FileHandler", "formatter": "detailed", "filename": path, }, }, "loggers": { __name__: { "handlers": ["fileHandler", "default"], "level": data["logging"]["level"], "propagate": False } }, "formatters": { "basic_formatter": { 'format': '[%(levelname)s] %(message)s', }, "detailed": { 'format': '%(asctime)s %(name)s[%(levelname)s] %(filename)s:%(lineno)d %(message)s', 'datefmt': "%Y-%m-%d %H:%M:%S", } } } logging.setLoggerClass(SlackLogger) logger = logging.getLogger(__name__) logging.config.dictConfig(dictLogConfig) return logger