我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.root()。
def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(logging._checkLevel(level)) if not incremental: #Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters)
def log_to_console(level=logging.WARNING, override_root_logger=False, **kwargs): """ Configure the logging system to send log entries to the console. Note that the root logger will not log to Seq by default. :param level: The minimum level at which to log. :param override_root_logger: Override the root logger, too? Note - this might cause problems if third-party components try to be clever when using the logging.XXX functions. """ logging.setLoggerClass(StructuredLogger) if override_root_logger: _override_root_logger() logging.basicConfig( style='{', handlers=[ ConsoleStructuredLogHandler() ], level=level, **kwargs )
def build_logger_tree(): """ Build a DFS tree representing the logger layout. Adapted with much appreciation from: https://github.com/brandon-rhodes/logging_tree """ cache = {} tree = make_logger_node("", root) for name, logger in sorted(root.manager.loggerDict.items()): if "." in name: parent_name = ".".join(name.split(".")[:-1]) parent = cache[parent_name] else: parent = tree cache[name] = make_logger_node(name, logger, parent) return tree
def getRoot(self): depth = 0 if not self.root: p = self while p.getParent() != None and depth < self._maxResourceDepth: if p is p.getParent(): self.logger.error("Loop in resource parent link. " "Parent link points back to this resource.") return None p = p.getParent() depth += 1 if isinstance(p, RootResource): return p elif depth >= self._maxResourceDepth: self.logger.error("Possible loop in resource parent links, aborting search") return None else: return None return self.root
def serve(services: List[Service], credentials: BrokerCredentials, logger: logging.Logger = logging.root, port=5000, debug=False): """ Starts flask with the given broker :param services: Services that this broker provides :param credentials: Username and password that will be required to communicate with service broker :param logger: Used for api logs. This will not influence Flasks logging behavior :param port: Port :param debug: Enables debugging in flask app """ from flask import Flask app = Flask(__name__) blueprint = get_blueprint(services, credentials, logger) logger.debug("Register openbrokerapi blueprint") app.register_blueprint(blueprint) logger.info("Start Flask on 0.0.0.0:%s" % port) app.run('0.0.0.0', port, debug)
def _handle_existing_loggers(existing, child_loggers, disable_existing): """ When (re)configuring logging, handle loggers which were in the previous configuration but are not in the new configuration. There's no point deleting them as other threads may continue to hold references to them; and by disabling them, you stop them doing any logging. However, don't disable children of named loggers, as that's probably not what was intended by the user. Also, allow existing loggers to NOT be disabled if disable_existing is false. """ root = logging.root for log in existing: logger = root.manager.loggerDict[log] if log in child_loggers: logger.level = logging.NOTSET logger.handlers = [] logger.propagate = True elif disable_existing: logger.disabled = True
def get_logger(name=None, level=None, stream=DEFAULT_STREAM, clobber_root_handler=True, logger_factory=None, wrapper_class=None): """Configure and return a logger with structlog and stdlib.""" _configure_logger( logger_factory=logger_factory, wrapper_class=wrapper_class) log = structlog.get_logger(name) root_logger = logging.root if log == root_logger: if not _has_streamhandler(root_logger, level=level, stream=stream): stream_handler = logging.StreamHandler(stream) stream_handler.setLevel(level) stream_handler.setFormatter(logging.Formatter(fmt=LOG_FORMAT)) root_logger.addHandler(stream_handler) else: if clobber_root_handler: for handler in root_logger.handlers: handler.setFormatter(logging.Formatter(fmt=LOG_FORMAT)) if level: log.setLevel(level) return log
def _test_log(self, method, level=None): called = [] patch(self, logging, 'basicConfig', lambda *a, **kw: called.append((a, kw))) recording = RecordingHandler() logging.root.addHandler(recording) log_method = getattr(logging, method) if level is not None: log_method(level, "test me: %r", recording) else: log_method("test me: %r", recording) self.assertEqual(len(recording.records), 1) record = recording.records[0] self.assertEqual(record.getMessage(), "test me: %r" % recording) expected_level = level if level is not None else getattr(logging, method.upper()) self.assertEqual(record.levelno, expected_level) # basicConfig was not called! self.assertEqual(called, [])
def test_no_kwargs(self): logging.basicConfig() # handler defaults to a StreamHandler to sys.stderr self.assertEqual(len(logging.root.handlers), 1) handler = logging.root.handlers[0] self.assertIsInstance(handler, logging.StreamHandler) self.assertEqual(handler.stream, sys.stderr) formatter = handler.formatter # format defaults to logging.BASIC_FORMAT self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT) # datefmt defaults to None self.assertIsNone(formatter.datefmt) # style defaults to % self.assertIsInstance(formatter._style, logging.PercentStyle) # level is not explicitly set self.assertEqual(logging.root.level, self.original_logging_level)
def test_handlers(self): handlers = [ logging.StreamHandler(), logging.StreamHandler(sys.stdout), logging.StreamHandler(), ] f = logging.Formatter() handlers[2].setFormatter(f) logging.basicConfig(handlers=handlers) self.assertIs(handlers[0], logging.root.handlers[0]) self.assertIs(handlers[1], logging.root.handlers[1]) self.assertIs(handlers[2], logging.root.handlers[2]) self.assertIsNotNone(handlers[0].formatter) self.assertIsNotNone(handlers[1].formatter) self.assertIs(handlers[2].formatter, f) self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None): # logging.root has no handlers so basicConfig should be called called = [] old_basic_config = logging.basicConfig def my_basic_config(*a, **kw): old_basic_config() old_level = logging.root.level logging.root.setLevel(100) # avoid having messages in stderr self.addCleanup(logging.root.setLevel, old_level) called.append((a, kw)) patch(self, logging, 'basicConfig', my_basic_config) log_method = getattr(logging, method) if level is not None: log_method(level, "test me") else: log_method("test me") # basicConfig was called with no arguments self.assertEqual(called, [((), {})])
def setUp(self): super(LoggerAdapterTest, self).setUp() old_handler_list = logging._handlerList[:] self.recording = RecordingHandler() self.logger = logging.root self.logger.addHandler(self.recording) self.addCleanup(self.logger.removeHandler, self.recording) self.addCleanup(self.recording.close) def cleanup(): logging._handlerList[:] = old_handler_list self.addCleanup(cleanup) self.addCleanup(logging.shutdown) self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def _handle_existing_loggers(existing, child_loggers, disable_existing): """ When (re)configuring logging, handle loggers which were in the previous configuration but are not in the new configuration. There's no point deleting them as other threads may continue to hold references to them; and by disabling them, you stop them doing any logging. However, don't disable children of named loggers, as that's probably not what was intended by the user. Also, allow existing loggers to NOT be disabled if disable_existing is false. """ root = logging.root for log in existing: logger = root.manager.loggerDict[log] if log in child_loggers: logger.level = logging.NOTSET logger.handlers = [] logger.propagate = True else: logger.disabled = disable_existing
def create_file(self, update_data=True): # TODO: save changes in current file file_name = get_save_file_name(_('Create SportOrg file'), _("SportOrg file (*.sportorg)"), str(time.strftime("%Y%m%d"))) if file_name is not '': try: GlobalAccess().clear_filters(remove_condition=False) File(file_name, logging.root).create() self.file = file_name self.add_recent_file(self.file) self.set_title(file_name) except Exception as e: logging.exception(str(e)) QMessageBox.warning(self, _('Error'), _('Cannot create file') + ': ' + file_name) # remove data if update_data: races[0] = Race() self.refresh()
def test_filename(self): def cleanup(h1, h2, fn): h1.close() h2.close() os.remove(fn) logging.basicConfig(filename='test.log') self.assertEqual(len(logging.root.handlers), 1) handler = logging.root.handlers[0] self.assertIsInstance(handler, logging.FileHandler) expected = logging.FileHandler('test.log', 'a') self.assertEqual(handler.stream.mode, expected.stream.mode) self.assertEqual(handler.stream.name, expected.stream.name) self.addCleanup(cleanup, handler, expected, 'test.log')
def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(logging._levelNames[level]) if not incremental: #Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters)
def set_log_levels(): """ Read the logging config file and set up log levels for the different loggers. """ config = get_logging_conf() if 'levels' not in config.sections(): return for logger_name in config.options('levels'): level = config.get('levels', logger_name) # Allow the config file to specify the root logger as 'root' if logger_name.lower() == 'root': logger_name = '' logger = logging.getLogger(logger_name) logger.setLevel(translate_log_level(level))
def set_custom_log_file(): """Read logging config and add additional file handlers to specified logs""" logdir = os.path.join(nav.buildconf.localstatedir, 'log') config = get_logging_conf() section = 'files' if section not in config.sections(): return for logger_name in config.options(section): filename = config.get(section, logger_name) # Allow the config file to specify the root logger as 'root' if logger_name.lower() == 'root': logger_name = '' logger = logging.getLogger(logger_name) filehandler = logging.FileHandler(os.path.join(logdir, filename)) filehandler.setFormatter(DEFAULT_LOG_FORMATTER) logger.addHandler(filehandler)
def reopen_log_files(): """ Function to iterate over all FileHandlers in the logger hierarchy, close their streams and reopen them. """ # Get the manager of the root logger root = logging.getLogger() manager = root.manager mylog = logging.getLogger('nav.logs') for logger in [root] + manager.loggerDict.values(): try: for hdl in logger.handlers: if isinstance(hdl, logging.FileHandler): mylog.debug("Reopening " + hdl.baseFilename) hdl.flush() hdl.acquire() hdl.stream.close() hdl.stream = open(hdl.baseFilename, hdl.mode) hdl.release() mylog.debug("Reopened " + hdl.baseFilename) except AttributeError: continue
def get_logfile_from_logger(logger=logging.root): """Return the file object of the first FileHandler of a given logger. This can be used as shorthand for redirecting the low-level stderr file descriptor to a log file after daemonization. Example usage: nav.daemon.daemonize('/var/run/nav/mydaemon.pid', stderr=get_logfile_from_logger()) Arguments: ``logger'' the logger object whose first FileHandler's file will be returned. If omitted, the root logger is searched for a FileHandler. Returns: A file object, or None if no FileHandlers were found. """ for handler in logger.handlers: if isinstance(handler, logging.FileHandler): return handler.stream
def setup_logging(tag): """Sets up the logging module. """ # Create formatter, with same format as C extension fmt = "[%s] %%(asctime)s %%(levelname)s: %%(message)s" % tag formatter = LoggingDateFormatter(fmt) # Console logger handler = logging.StreamHandler() handler.setLevel(logging.INFO) handler.setFormatter(formatter) # Set up logger logger = logging.root logger.setLevel(logging.INFO) logger.addHandler(handler)
def reconfigure_rootlogger(cls, use_stderr=False, use_file='', verbose=False): cls._configured = True level = logging.DEBUG if verbose else logging.INFO # Clear out current handlers to allow re-use of basicConfig. rootlogger = logging.root # old school: logging.getLogger('') cls._remove_handlers(rootlogger) if use_stderr: # does logging.RootLogger.addHandler() logging.basicConfig( # only the first call matters stream=sys.stderr, format=cls._format, datefmt=cls._datefmt, level=level) if use_file: h = cls._setup_filehandler(use_file, backupCount=3) rootlogger.addHandler(h) rootlogger.setLevel(level)
def configure_logger(self, name, config, incremental=False): """Configure a non-root logger from a dictionary.""" logger = logging.getLogger(name) self.common_logger_config(logger, config, incremental) propagate = config.get('propagate', None) if propagate is not None: logger.propagate = propagate
def configure_root(self, config, incremental=False): """Configure a root logger from a dictionary.""" root = logging.getLogger() self.common_logger_config(root, config, incremental)
def _resetExistingLoggers(parent="root"): """ Reset the logger named 'parent' and all its children to their initial state, if they already exist in the current configuration. """ root = logging.root # get sorted list of all existing loggers existing = sorted(root.manager.loggerDict.keys()) if parent == "root": # all the existing loggers are children of 'root' loggers_to_reset = [parent] + existing elif parent not in existing: # nothing to do return elif parent in existing: loggers_to_reset = [parent] # collect children, starting with the entry after parent name i = existing.index(parent) + 1 prefixed = parent + "." pflen = len(prefixed) num_existing = len(existing) while i < num_existing: if existing[i][:pflen] == prefixed: loggers_to_reset.append(existing[i]) i += 1 for name in loggers_to_reset: if name == "root": root.setLevel(logging.WARNING) for h in root.handlers[:]: root.removeHandler(h) for f in root.filters[:]: root.removeFilters(f) root.disabled = False else: logger = root.manager.loggerDict[name] logger.level = logging.NOTSET logger.handlers = [] logger.filters = [] logger.propagate = True logger.disabled = False
def _override_root_logger(): """ Override the root logger with a `StructuredRootLogger`. """ logging.root = StructuredRootLogger(logging.WARNING) logging.Logger.root = logging.root logging.Logger.manager = logging.Manager(logging.Logger.root)