我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.FATAL。
def __init__(self, config_file=DEFAULT_CONFIG_FILE): self.config = ConfigReader(config_file) self.states = {} self.geo_locator = Nominatim() self.tweet_count = 0 self.city_cache_appender = CacheAppender(self.config.cache_file_path) def get_level(): return { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARN': logging.WARNING, 'ERROR': logging.ERROR, 'FATAL': logging.FATAL, 'CRITICAL': logging.CRITICAL }[self.config.logging_level] logging.basicConfig(format="[%(levelname)s] %(name)s: %(message)s", level=get_level()) self.logger = logging.getLogger(self.__class__.__name__) self.logger.info("Analysing city names using config in %s" % config_file)
def ConvertLog4ToCFLevel( log4level ): if log4level == logging.FATAL+1 : return CF.LogLevels.OFF if log4level == logging.FATAL : return CF.LogLevels.FATAL if log4level == logging.ERROR : return CF.LogLevels.ERROR if log4level == logging.WARN : return CF.LogLevels.WARN if log4level == logging.INFO : return CF.LogLevels.INFO if log4level == logging.DEBUG : return CF.LogLevels.DEBUG if log4level == logging.TRACE : return CF.LogLevels.TRACE if log4level == logging.NOTSET: return CF.LogLevels.ALL return CF.LogLevels.INFO
def ConvertToLog4Level( newLevel ): level = logging.INFO if newLevel == CF.LogLevels.OFF : level=logging.FATAL+1 if newLevel == CF.LogLevels.FATAL : level=logging.FATAL if newLevel == CF.LogLevels.ERROR : level=logging.ERROR if newLevel == CF.LogLevels.WARN : level=logging.WARN if newLevel == CF.LogLevels.INFO: level=logging.INFO if newLevel == CF.LogLevels.DEBUG: level=logging.DEBUG if newLevel == CF.LogLevels.TRACE: level=logging.TRACE if newLevel == CF.LogLevels.ALL: level=logging.TRACE return level
def _logWriter(self,level,message,exception=None): self._logger.setLevel(level) self._fh.setLevel(level) self._ch.setLevel(level) if(exception!=None): exFormatted = self._formatException(exception) msg = "%s%s" % (message,exFormatted) if(level==logging.DEBUG): logging.debug(msg) elif(level==logging.INFO): logging.info(msg) elif(level==logging.WARN): logging.warn(msg) elif(level==logging.FATAL): logging.fatal(msg) if(level==logging.ERROR): logging.error(msg)
def level_to_int(level: Union[str, int]) -> int: if isinstance(level, int): if logging.NOTSET <= level <= logging.FATAL: return level else: raise ValueError('Log level must be 0 <= level <= 50,' 'but gat: {}'.format(level)) elif isinstance(level, str): try: return getattr(logging, level.upper()) except AttributeError: raise ValueError('Invalid log level: {}'.format(level)) else: raise TypeError( 'Log level must be int (0 ~ 50) or string,' 'but gat type: {}'.format(type(level)))
def log_at_level(self, value): """ Make sure logging is always set at a valid level """ if value in [ logging.CRITICAL, logging.DEBUG, logging.ERROR, logging.FATAL, logging.INFO, logging.WARNING, ]: self._log_at_level = value self._set_logging() else: if not self._log_at_level: self._log_at_level = logging.WARNING self._set_logging() # ******************************************************************* # methods # *******************************************************************
def test_nested_with_virtual_parent(self): # Logging levels when some parent does not exist yet. m = self.next_message INF = logging.getLogger("INF") GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF") CHILD = logging.getLogger("INF.BADPARENT") INF.setLevel(logging.INFO) # These should log. GRANDCHILD.log(logging.FATAL, m()) GRANDCHILD.info(m()) CHILD.log(logging.FATAL, m()) CHILD.info(m()) # These should not log. GRANDCHILD.debug(m()) CHILD.debug(m()) self.assert_log_lines([ ('INF.BADPARENT.UNDEF', 'CRITICAL', '1'), ('INF.BADPARENT.UNDEF', 'INFO', '2'), ('INF.BADPARENT', 'CRITICAL', '3'), ('INF.BADPARENT', 'INFO', '4'), ])
def _late_addoptions(parser, logcfg): """Add options to control logger""" parser.addini( name='logger_logsdir', help='base directory with log files for file loggers [basetemp]', default=None, ) group = parser.getgroup('logger') group.addoption('--logger-logsdir', help='pick you own logs directory instead of default ' 'directory under session tmpdir') if logcfg._enabled: parser = _log_option_parser(logcfg._loggers) group.addoption('--log', default=parser(logcfg._log_option_default), type=parser, metavar='LOGGER,LOGGER.LEVEL,...', help='comma delimited list of loggers optionally suffixed with level ' 'preceded by a dot. Levels can be lower or uppercase, or numeric. ' 'For example: "logger1,logger2.info,logger3.FATAL,logger4.25"')
def test_set_default_log_level2(self, mock_connect): import logging from datastore import get_logger from logging.handlers import RotatingFileHandler from datastore.postgresstore import PostgresLogHandler self.dsb.set_default_log_level(logging.FATAL) self.assertEqual(DataStore.LOG_LEVEL, logging.FATAL) self.dsb.add_file_db("config-example.json", None) self.dsb.add_postgres_db("") logger = get_logger() fdbh = None pdbh = None for handler in logger.handlers: if isinstance(handler, RotatingFileHandler): fdbh = handler if isinstance(handler, PostgresLogHandler): pdbh = handler self.assertEqual(fdbh.level, logging.FATAL) self.assertEqual(pdbh.level, logging.FATAL)
def manage(): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() verbosity_group = parser.add_mutually_exclusive_group() verbosity_group.add_argument('--verbose', '-v', action='count', default=0, help='Increase verbosity') verbosity_group.add_argument('--quiet', '-q', action='store_true', help='Reduce verbosity') # FIXME: Database path incorrect! Depends on installation path! parser.add_argument('--database', type=str, default='sqlite:///tests.sqlite', help='Change the default database') parser.set_defaults(fun=run) parser_setup = subparsers.add_parser('reset-database') parser_setup.set_defaults(fun=reset_database) args = parser.parse_args() lut_verbosity = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG} level = logging.FATAL if getattr(args, 'quiet') else lut_verbosity.get(getattr(args, 'verbose', 0), logging.DEBUG) # logging.basicConfig(level=level, format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(level=level, format='%(name)s: %(levelname)s: %(message)s') logging.info('Configuration:\n\t{}'.format('\n\t'.join(['{}: {}'.format(k, getattr(v, '__name__', v)) for k, v in sorted(args.__dict__.items())]))) args.fun(args)
def get_console_handler(): if config['silent']: target_level = logging.FATAL elif config['verbose']: target_level = logging.DEBUG elif config['quiet']: target_level = logging.ERROR else: target_level = logging.INFO handler = logging.StreamHandler(sys.stderr) handler.setLevel(target_level) log_format = LOG_FORMAT if config['verbose'] else "%(message)s" handler.setFormatter(logging.Formatter(log_format)) return handler
def main(): args = get_args() if args.silent: logging.basicConfig(level=logging.FATAL) else: logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) load_package(args.package) extract_dir = args.extract_dir num_archives = args.num_archives paths = get_paths(args.archive_path) if num_archives < len(paths): if args.random: paths = sample(paths, num_archives) else: paths = paths[:num_archives] if len(paths) > 1: process_reports(paths, extract_dir, args.num_workers) else: print_response(process_report(paths[0], extract_dir))
def _emit(self, record): msg = self.format(record) fs = "%s\n" if hasattr(record, '__nonewline__'): msg = msg.rstrip() fs = "%s" stream = self.stream if record.levelno in [ERROR, CRITICAL, FATAL]: stream = self.error_stream if not hasattr(types, "UnicodeType"): # if no unicode support... stream.write(fs % msg) else: try: stream.write(fs % msg) except UnicodeError: stream.write(fs % msg.encode("UTF-8")) self.flush()
def ConvertLogLevel( oldstyle_level ): if oldstyle_level == 0 : return CF.LogLevels.FATAL if oldstyle_level == 1 : return CF.LogLevels.ERROR if oldstyle_level == 2 : return CF.LogLevels.WARN if oldstyle_level == 3 : return CF.LogLevels.INFO if oldstyle_level == 4 : return CF.LogLevels.DEBUG if oldstyle_level == 5 : return CF.LogLevels.ALL return CF.LogLevels.INFO
def ConvertLevelNameToDebugLevel( level_name ): if level_name == "OFF" : return 0 if level_name == "FATAL" : return 0 if level_name == "ERROR" : return 1 if level_name == "WARN" : return 2 if level_name == "INFO" : return 3 if level_name == "DEBUG" : return 4 if level_name == "TRACE": return 5 if level_name == "ALL" : return 5 return 3
def ConvertLevelNameToCFLevel( level_name ): if level_name == "OFF" : return CF.LogLevels.OFF if level_name == "FATAL" : return CF.LogLevels.FATAL if level_name == "ERROR" : return CF.LogLevels.ERROR if level_name == "WARN" : return CF.LogLevels.WARN if level_name == "INFO" : return CF.LogLevels.INFO if level_name == "DEBUG" : return CF.LogLevels.DEBUG if level_name == "TRACE": return CF.LogLevels.TRACE if level_name == "ALL" : return CF.LogLevels.ALL return CF.LogLevels.INFO
def fatal(self, msg, *args, **kw): self.log(self.FATAL, msg, *args, **kw)
def logging_level(runlevel): """ Translates a runlevel into the value expected by the logging module. :param stem.util.log.Runlevel runlevel: runlevel to be returned, no logging if **None** """ if runlevel: return LOG_VALUES[runlevel] else: return logging.FATAL + 5
def fatal(self,message,exception): self._logWriter(logging.FATAL,message,exception)
def __init__(self, region, verbose_log=None): self.region = region self.config_conn = boto3.client('config', region_name=region) if verbose_log is None: self.verbose_log = logging.getLogger("configService") self.verbose_log.setLevel(level=logging.FATAL) else: self.verbose_log = verbose_log
def fatal(self, *args, **kwargs): self._level_write(logging.FATAL, *args, **kwargs) raise SystemExit(1)
def get_logging_level(MESSAGELEVEL=None): '''get_logging_level will return a logging level based on first a variable going into the function, then an environment variable MESSAGELEVEL, and then the default is DEBUG. :param MESSAGELEVEL: the level to get. ''' if MESSAGELEVEL == None: MESSAGELEVEL = os.environ.get("MESSAGELEVEL","DEBUG") if MESSAGELEVEL in ["DEBUG","INFO"]: print("Environment message level found to be %s" %MESSAGELEVEL) if MESSAGELEVEL == "FATAL": return logging.FATAL elif MESSAGELEVEL == "CRITICAL": return logging.CRITICAL elif MESSAGELEVEL == "ERROR": return logging.ERROR elif MESSAGELEVEL == "WARNING": return logging.WARNING elif MESSAGELEVEL == "INFO": return logging.INFO elif MESSAGELEVEL in "DEBUG": return logging.DEBUG return logging.DEBUG
def parse_options(): """Parse additional cli logging options. """ parser = NoErrArgumentParser(usage=argparse.SUPPRESS, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--logdir", dest="logdir", default=None, help="Directory path to store log files.") parser.add_argument("--loglevel", dest="loglevel", default="INFO", help="Logging level (DEBUG, INFO, WARNING, ERROR, FATAL, CRITICAL).") parser.add_argument("--logprefix", dest="logprefix", default="main", help="Log files prefix.") parser.add_argument("--silent", action="store_true", dest="silent", default=False, help="Do not print logging to console.") parser.add_argument("-k", action="store", dest="keyword", default=None, help="pytest kewords.") parser.add_argument("-m", action="store", dest="markexpr", default=None, help="pytest markers expression.") opts = parser.parse_args() if opts.markexpr is None: opts.markexpr = "" if opts.keyword is None: opts.keyword = "" return opts
def test_stdout_handlers_many_loggers(testdir): makefile(testdir, ['conftest.py'], """ import logging def pytest_logger_stdoutloggers(item): return [ 'foo', ('bar', logging.ERROR), ('baz', logging.FATAL) ] """) makefile(testdir, ['test_case.py'], """ import logging def test_case(): for lgr in (logging.getLogger(name) for name in ['foo', 'bar', 'baz']): lgr.fatal('this is fatal') lgr.error('this is error') lgr.warning('this is warning') """) result = testdir.runpytest('-s') assert result.ret == 0 result.stdout.fnmatch_lines([ '', 'test_case.py ', '* foo: this is fatal', '* foo: this is error', '* foo: this is warning', '* bar: this is fatal', '* bar: this is error', '* baz: this is fatal', '.', '' ])
def test_sanitize_level(): assert plugin._sanitize_level(logging.INFO) == logging.INFO assert plugin._sanitize_level('15') == 15 assert plugin._sanitize_level('warn') == logging.WARN assert plugin._sanitize_level('FATAL') == logging.FATAL with pytest.raises(TypeError): plugin._sanitize_level('WARN ') with pytest.raises(TypeError): plugin._sanitize_level('unknown') with pytest.raises(TypeError): plugin._sanitize_level(1.0) assert plugin._sanitize_level('WARN ', raises=False) is None
def test_loggers_from_logcfg(): logcfg = plugin.LoggerConfig() logcfg.add_loggers(['a', 'b', 'c'], stdout_level=logging.ERROR, file_level='warn') logcfg.add_loggers(['d'], stdout_level='10') log_option = [('b', logging.FATAL), 'd'] loggers = plugin._loggers_from_logcfg(logcfg, log_option) assert loggers.stdout == [('b', logging.FATAL), ('d', 10)] assert loggers.file == [('a', logging.WARN), ('b', logging.WARN), ('c', logging.WARN), ('d', 0)] assert loggers
def setVerbosity(verbosity=0): """ set the verbosity level of logging Args: verbosity: set the verbosity level using an integer {0, 1, 2, 3, 4} e.g. verbosity=0, imply DEBUG logging, it logs all level of logs verbosity=1, imply INFO logging verbosity=2, imply WARN logging verbosity=3, imply ERROR logging verbosity=4, imply FATAL logging, it logs only the lowest FATAL level """ _logger.setLevel(_verbosity(str(verbosity)))
def _verbosity(verbosity): return{ '0': DEBUG, '1': INFO, '2': WARN, '3': ERROR, '4': FATAL, }[verbosity]
def error(msg, *args, **kwargs): """ Logs the level ERROR logging, it logs level ERROR and FATAL Args: msg: the message to log """ _logger.error(_log_prefix() + msg, *args, **kwargs)
def fatal(msg, *args, **kwargs): """ Logs thE level FATAL logging, it logs only FATAL Args: msg: the message to log """ _logger.fatal(_log_prefix() + msg, *args, **kwargs)