我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.handlers()。
def setup_logging(verbose=0, colors=False, name=None): """Configure console logging. Info and below go to stdout, others go to stderr. :param int verbose: Verbosity level. > 0 print debug statements. > 1 passed to sphinx-build. :param bool colors: Print color text in non-verbose mode. :param str name: Which logger name to set handlers to. Used for testing. """ root_logger = logging.getLogger(name) root_logger.setLevel(logging.DEBUG if verbose > 0 else logging.INFO) formatter = ColorFormatter(verbose > 0, colors) if colors: colorclass.Windows.enable() handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setFormatter(formatter) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(type('', (logging.Filter,), {'filter': staticmethod(lambda r: r.levelno <= logging.INFO)})) root_logger.addHandler(handler_stdout) handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(formatter) handler_stderr.setLevel(logging.WARNING) root_logger.addHandler(handler_stderr)
def __init__(self, pool_names, max_restarts=0, options=None): self.names = pool_names self.queue = multiprocessing.Queue() self.pool = dict() self.max_restarts = max_restarts self.options = options or dict() self.dog_path = os.curdir self.dog_handler = LiveReload(self) # self.dog_observer = Observer() # self.dog_observer.schedule(self.dog_handler, self.dog_path, recursive=True) if multiprocessing.get_start_method() != 'fork': # pragma: no cover root_logger = logging.getLogger() self.log_listener = QueueListener(self.queue, *root_logger.handlers) # TODO: Find out how to get the watchdog + livereload working on a later moment. # self.dog_observer.start() self._restarts = dict()
def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(_checkLevel(level)) if not incremental: # Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters)
def setup_logging(): '''Sets up internal logging. Run this once at startup.''' logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') if options.log.filename: handler = logging.handlers.TimedRotatingFileHandler(filename=options.log.filename, when=options.log.when, backupCount=options.log.backup_count, utc=True) handler.setFormatter(formatter) logger.addHandler(handler) if not options.main.daemon: handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) level = getattr(logging, options.log.level.upper()) logger.setLevel(level)
def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(logging._checkLevel(level)) if not incremental: #Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters)
def __init__(self, filename=None, directory=None, suffix=".log", program_name=None, formatter=formatter.TEXT_FORMATTER, level=None): """Log file output. :param filename: The log file path to write to. If directory is also specified, both will be combined. :param directory: The log directory to write to. If no filename is specified, the program name and suffix will be used to contruct the full path relative to the directory. :param suffix: The log file name suffix. This will be only used if no filename has been provided. :param program_name: Program name. Autodetected by default. """ logpath = _get_log_file_path(filename, directory, program_name, suffix) handler = logging.handlers.WatchedFileHandler(logpath) super(File, self).__init__(handler, formatter, level)
def __init__(self, filename=None, directory=None, suffix='.log', program_name=None, formatter=formatter.TEXT_FORMATTER, level=None, max_size_bytes=0, backup_count=0): """Rotating log file output. :param filename: The log file path to write to. If directory is also specified, both will be combined. :param directory: The log directory to write to. If no filename is specified, the program name and suffix will be used to contruct the full path relative to the directory. :param suffix: The log file name suffix. This will be only used if no filename has been provided. :param program_name: Program name. Autodetected by default. :param max_size_bytes: allow the file to rollover at a predetermined size. :param backup_count: the maximum number of files to rotate logging output between. """ logpath = _get_log_file_path(filename, directory, program_name, suffix) handler = logging.handlers.RotatingFileHandler( logpath, maxBytes=max_size_bytes, backupCount=backup_count) super(RotatingFile, self).__init__(handler, formatter, level)
def configure_logging(debug=False, verbose=True, stderr=True): config = copy.deepcopy(LOG_CONFIG) for handler in config["handlers"].values(): if verbose: handler["level"] = "INFO" if debug: handler["level"] = "DEBUG" if verbose: config["handlers"]["stderr"]["formatter"] = "verbose" if debug: config["handlers"]["stderr"]["formatter"] = "debug" if stderr: config["loggers"][LOG_NAMESPACE]["handlers"].append("stderr") logging.config.dictConfig(config)
def __syslog_handler_init(self): """ Initialize the syslog handler if it hasn't been """ if self.syslog_handler is None: try: # TODO: /dev/log is Linux-specific. self.syslog_handler = logging.handlers.SysLogHandler( '/dev/log', facility=self.facility) self.syslog_handler.setFormatter( logging.Formatter( fmt='%(name)s %(levelname)-8s %(message)s' ) ) self.logger.addHandler(self.syslog_handler) except: self.__syslog_handler_deinit()
def getLogger(self): ''' Initialize and load log handlers ''' logger = logging.getLogger(self.proc_name) logger.setLevel(logging.INFO) if "debug" in self.config['logging']: if self.config['logging']['debug']: logger.setLevel(logging.DEBUG) # Load and add a handler for each logging mechanism for loghandler in self.config['logging']['plugins'].keys(): plugin = __import__("plugins.logging." + loghandler, globals(), locals(), ['Logger'], -1) lh = plugin.Logger(config=self.config, proc_name=self.proc_name) logger.addHandler(lh.setup()) return logger
def init_logging(): main_logger = logging.getLogger() formatter = logging.Formatter( fmt='%(asctime)s.%(msecs)03d %(levelname)-8s [%(name)s] %(message)s' , datefmt='%Y-%m-%d %H:%M:%S') handler_stream = logging.StreamHandler(sys.stdout) handler_stream.setFormatter(formatter) main_logger.addHandler(handler_stream) if LOG_TO_FILE: handler_file = logging.handlers.RotatingFileHandler("debug.log" , maxBytes = 2**24 , backupCount = 10) handler_file.setFormatter(formatter) main_logger.addHandler(handler_file) main_logger.setLevel(logging.DEBUG) return main_logger # ============================================================================
def setup_logging(app): """Setup logging.""" from logging.handlers import RotatingFileHandler from logging import Formatter log_file_path = app.config.get('LOG_FILE') log_level = app.config.get('LOG_LEVEL', logging.WARN) if log_file_path: # pragma: no cover file_handler = RotatingFileHandler(log_file_path) file_handler.setFormatter(Formatter( '%(name)s:%(levelname)s:[%(asctime)s] %(message)s ' '[in %(pathname)s:%(lineno)d]' )) file_handler.setLevel(log_level) app.logger.addHandler(file_handler) logger = logging.getLogger('pybossa') logger.setLevel(log_level) logger.addHandler(file_handler)
def make_logger(base_dir=None, log_name=None, log_fn=None, level=logging.DEBUG, fmt=None, datefmt=None): # if log_name is None, get the root logger logger = logging.getLogger(log_name) logger.setLevel(level) if base_dir is None: base_dir = config.log_dir # do not add 2 handlers to one logger by default if len(logger.handlers) == 0: if log_fn is None: if log_name is None: log_fn = get_root_log_fn() else: log_fn = log_name + '.' + log_suffix logger.addHandler(make_file_handler(base_dir, log_fn, fmt=fmt, datefmt=datefmt)) return logger
def create_logger(logger_name, handlers, level=logging.INFO, formatter=LOG_FORMATTER): """?????? :param logger_name ???? :param filename ?????? :param level ???? :param formatter ?????? :param handlers ?????? """ logger = logging.getLogger(logger_name) logger.setLevel(level) for handler in handlers: handler.setFormatter(logging.Formatter(LOG_FORMATTER)) logger.addHandler(handler) return logger # GF_LOG = create_logger("girlfriend", (stdout_handler(),))
def Logger(name, **kargs): """ Create and return logger """ path_dirs = PathDirs(**kargs) logging.captureWarnings(True) logger = logging.getLogger(name) logger.setLevel(logging.INFO) handler = logging.handlers.WatchedFileHandler(os.path.join( path_dirs.meta_dir, "vent.log")) handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s:%(lineno)-4d - ' '%(levelname)s - %(message)s') handler.setFormatter(formatter) if not len(logger.handlers): logger.addHandler(handler) return logger
def _cli_log_message(msg, logger_name=None, level="INFO"): """ Log a single message to Flightlog. Intended for CLI usage. Calling this function multiple times within the same process will configure duplicate handlers and result in duplicate messages. """ logger = logging.getLogger(logger_name) levelnum = logging.getLevelName(level.upper()) try: int(levelnum) except ValueError: raise ValueError("level must be one of DEBUG, INFO, WARNING, ERROR, CRITICAL") handler = FlightlogHandler(background=False) logger.addHandler(handler) logger.setLevel(levelnum) if msg == "-": msg = sys.stdin.read() for line in msg.splitlines(): if line: logger.log(levelnum, line) exit_code = 0 return None, exit_code
def init_app(cls, app): Config.init_app(app) # email errors to the administrators import logging from logging.handlers import SMTPHandler credentials = None secure = None if getattr(cls, 'MAIL_USERNAME', None) is not None: credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD) if getattr(cls, 'MAIL_USE_TLS', None): secure = () mail_handler = SMTPHandler( mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT), fromaddr=cls.CIRCULATE_MAIL_SENDER, toaddrs=[cls.CIRCULATE_ADMIN], subject=cls.CIRCULATE_MAIL_SUBJECT_PREFIX + ' Application Error', credentials=credentials, secure=secure) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler)
def loggingInit(logname): isExists = os.path.exists('../log') if not isExists: os.mkdir('../log') LogExists = os.path.exists(logname) if not LogExists: f = open(logname, 'w') f.close() log = logging.getLogger(logname) log.setLevel(logging.DEBUG) logHandler = logging.handlers.RotatingFileHandler(logname,maxBytes=10*1024*1024,backupCount=5) logHandler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') logHandler.setFormatter(formatter) log.addHandler(logHandler) return log
def loglevel(level=logging.DEBUG, update_custom_handlers=False): """ Set the minimum loglevel for the default logger (`logzero.logger`). This reconfigures only the internal handlers of the default logger (eg. stream and logfile). You can also update the loglevel for custom handlers by using `update_custom_handlers=True`. :arg int level: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ to display (default: `logging.DEBUG`). :arg bool update_custom_handlers: If you added custom handlers to this logger and want this to update them too, you need to set `update_custom_handlers` to `True` """ logger.setLevel(level) # Reconfigure existing internal handlers for handler in list(logger.handlers): if hasattr(handler, LOGZERO_INTERNAL_LOGGER_ATTR) or update_custom_handlers: # Don't update the loglevel if this handler uses a custom one if hasattr(handler, LOGZERO_INTERNAL_HANDLER_IS_CUSTOM_LOGLEVEL): continue # Update the loglevel for all default handlers handler.setLevel(level) global _loglevel _loglevel = level
def formatter(formatter, update_custom_handlers=False): """ Set the formatter for all handlers of the default logger (``logzero.logger``). This reconfigures only the logzero internal handlers by default, but you can also reconfigure custom handlers by using ``update_custom_handlers=True``. Beware that setting a formatter which uses colors also may write the color codes to logfiles. :arg Formatter formatter: `Python logging Formatter object <https://docs.python.org/2/library/logging.html#formatter-objects>`_ (by default uses the internal LogFormatter). :arg bool update_custom_handlers: If you added custom handlers to this logger and want this to update them too, you need to set ``update_custom_handlers`` to `True` """ for handler in list(logger.handlers): if hasattr(handler, LOGZERO_INTERNAL_LOGGER_ATTR) or update_custom_handlers: handler.setFormatter(formatter) global _formatter _formatter = formatter
def create_logger(app_name, logfilename=None, level=logging.INFO, console=False, syslog=False): """ Build and return a custom logger. Accepts the application name, log filename, loglevel and console logging toggle and syslog toggle """ log=logging.getLogger(app_name) log.setLevel(logging.DEBUG) # Add file handler if logfilename != None: log.addHandler(logging.FileHandler(logfilename)) if syslog: log.addHandler(logging.handlers.SysLogHandler(address='/dev/log')) if console: log.addHandler(logging.StreamHandler()) # Add formatter for handle in log.handlers: formatter = logging.Formatter('%(asctime)s : %(levelname)-8s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') handle.setFormatter(formatter) return log
def get_logger(self): """?logger?????????????logger?????????? ????????????????????????????????? ?????????????????????? """ if not self.logger.handlers: # ?????? console_handler = logging.StreamHandler() console_handler.setFormatter(self.formatter) console_handler.setLevel(self.console_output_level) self.logger.addHandler(console_handler) # ?????????????????backup_count? file_handler = TimedRotatingFileHandler(filename=os.path.join(LOG_PATH, self.log_file_name), when='D', interval=1, backupCount=self.backup_count, delay=True, encoding='utf-8' ) file_handler.setFormatter(self.formatter) file_handler.setLevel(self.file_output_level) self.logger.addHandler(file_handler) return self.logger
def _find_facility_from_conf(): facility_names = logging.handlers.SysLogHandler.facility_names facility = getattr(logging.handlers.SysLogHandler, CONF.syslog_log_facility, None) if facility is None and CONF.syslog_log_facility in facility_names: facility = facility_names.get(CONF.syslog_log_facility) if facility is None: valid_facilities = facility_names.keys() consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] valid_facilities.extend(consts) raise TypeError(_('syslog facility must be one of: %s') % ', '.join("'%s'" % fac for fac in valid_facilities)) return facility
def __get_file_formatter(): """Get logging formatter with Google logging like format. Each line in the log should look like: [DIWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] <message> Returns: Formatter object for use in logging handlers. """ # [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] <message> ASCII_TIME_FORMAT = "%m%d %H:%M:%S" # mmdd hh:mm:ss.uuuuuu LINE_FORMAT = ("%(levelname).1s" # [DIWEF] "%(asctime)s.%(msecs)s " # ASCII_TIME_FORMAT "%(threadName)s " # threadid "%(pathname)s:%(lineno)d] " # file:line] "%(message)s") # <message> return logging.Formatter(fmt=LINE_FORMAT, datefmt=ASCII_TIME_FORMAT)
def _initialize_logging(self): """Printing to console is dirty""" main_logger = logging.getLogger('main') log_filename = os.path.join('logs', 'log.txt') main_logger.setLevel(logging.DEBUG) # 5 rotating logs of 1 MB each handler = logging.handlers.RotatingFileHandler( log_filename, maxBytes = 1024 * 1024, backupCount = 1 ) main_logger.addHandler(handler) return main_logger
def _initialize_logging(self): """ Printing to console is dirty """ main_logger = logging.getLogger('main') log_filename = os.path.join('logs', 'log.txt') main_logger.setLevel(logging.DEBUG) # 5 rotating logs of 1 MB each handler = logging.handlers.RotatingFileHandler( log_filename, maxBytes = 1024 * 1024, backupCount = 1 ) main_logger.addHandler(handler) return main_logger
def setup_logging(verbosity_level, save_debug_log): logging.captureWarnings(True) # if config['logging']['config_file']: # # Logging config from file must be read before other handlers are # # added. If not, the other handlers will have no effect. # try: # path = config['logging']['config_file'] # logging.config.fileConfig(path, disable_existing_loggers=False) # except Exception as e: # # Catch everything as logging does not specify what can go wrong. # logger.error('Loading logging config %r failed. %s', path, e) setup_console_logging(verbosity_level) if save_debug_log: print('Here we would call setup_debug_logging_to_file(config)') # setup_debug_logging_to_file(config) _delayed_handler.release()
def colourise(self, text, colour, bold=False): colour = self.COLOUR_BASE.format(colour + 30) output = [] if bold: output.append(self.BOLD) output.append(colour) output.append(text) output.append(self.RESET) return ''.join(output) # logfile # logfile = logging.handlers.RotatingFileHandler(LOGPATH, maxBytes=LOGSIZE, # backupCount=5) # formatter = logging.Formatter( # '%(asctime)s %(levelname)-8s [%(name)-12s] %(message)s', # datefmt="%d/%m %H:%M:%S") # logfile.setFormatter(formatter) # logfile.setLevel(logging.DEBUG) # console output
def my_log(): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[logging.FileHandler('message.log', 'a', 'utf-8')]) # ?????_?????__ _log = logging.getLogger('app.' + __name__) host = '10.0.0.175' port = 8080 # ??? 'xxxx' % (aa, bb)???????? _log.error('error to connect to %s:%d', host, port) _log.addFilter(FilterFunc('foo')) # ?????foo()??????? lgg = logging.getLogger('app.network.client') lgg.propagate = False # ?????? lgg.error('do you see me?') # ???????? lgg.setLevel(logging.CRITICAL) lgg.error('now you see me?') logging.disable(logging.DEBUG) # ???????? # ??log??????main????????? config.fileConfig('applogcfg.ini')
def test_it(self, mock_register, mock_get, mock_except_hook, mock_sys): mock_sys.argv = ['--debug'] mock_sys.version_info = sys.version_info self._call() mock_root_logger = mock_get() mock_root_logger.setLevel.assert_called_once_with(logging.DEBUG) self.assertEqual(mock_root_logger.addHandler.call_count, 2) MemoryHandler = logging.handlers.MemoryHandler memory_handler = None for call in mock_root_logger.addHandler.call_args_list: handler = call[0][0] if memory_handler is None and isinstance(handler, MemoryHandler): memory_handler = handler else: self.assertTrue(isinstance(handler, logging.StreamHandler)) self.assertTrue( isinstance(memory_handler.target, logging.StreamHandler)) mock_register.assert_called_once_with(logging.shutdown) mock_sys.excepthook(1, 2, 3) mock_except_hook.assert_called_once_with( memory_handler, 1, 2, 3, debug=True, log_path=mock.ANY)
def setUp(self): super(PostArgParseSetupTest, self).setUp() self.config.debug = False self.config.max_log_backups = 1000 self.config.quiet = False self.config.verbose_count = constants.CLI_DEFAULTS['verbose_count'] self.devnull = open(os.devnull, 'w') from certbot.log import ColoredStreamHandler self.stream_handler = ColoredStreamHandler(six.StringIO()) from certbot.log import MemoryHandler, TempHandler self.temp_handler = TempHandler() self.temp_path = self.temp_handler.path self.memory_handler = MemoryHandler(self.temp_handler) self.root_logger = mock.MagicMock( handlers=[self.memory_handler, self.stream_handler])
def getLogger(): # ??log????????,????????????????? # ??a?????log???b????a???????????? # ????????????? if Logger.logger is not None: return Logger.logger Logger.logger = logging.Logger(Logger.log_name) if Logger.log_print == "True": print_handler = logging.StreamHandler() print_fmt = logging.Formatter( Logger.log_formatter, datefmt=Logger.log_formatter_datefmt ) print_handler.setFormatter(print_fmt) Logger.logger.addHandler(print_handler) file_handler = logging.handlers.RotatingFileHandler( filename = Logger.log_file, maxBytes = Logger.log_max_byte, backupCount = Logger.log_backup_count ) file_fmt = logging.Formatter( Logger.log_formatter, datefmt=Logger.log_formatter_datefmt ) file_handler.setFormatter(file_fmt) Logger.logger.addHandler(file_handler) Logger.logger.setLevel(Logger.levels.get(Logger.log_level)) return Logger.logger
def add_handlers(self, logger, handlers): """Add handlers to a logger from a list of names.""" for h in handlers: try: logger.addHandler(self.config['handlers'][h]) except StandardError as e: raise ValueError('Unable to add handler %r: %s' % (h, e))
def _open(self): ensure_dir(os.path.dirname(self.baseFilename)) return logging.handlers.RotatingFileHandler._open(self)
def __init__(self, filename, max_bytes=0, backup_count=0, encoding=None, delay=0, when='h', interval=1, utc=False): # If rotation/rollover is wanted, it doesn't make sense to use another # mode. If for example 'w' were specified, then if there were multiple # runs of the calling application, the logs from previous runs would be # lost if the 'w' is respected, because the log file would be truncated # on each run. handlers.TimedRotatingFileHandler.__init__( self, filename, when, interval, backup_count, encoding, delay, utc) self.maxBytes = max_bytes # noinspection PyIncorrectDocstring
def flush(self): for handler in self.logger.handlers: handler.flush()