我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.WARNING。
def init_logger(self, args): level = logging.INFO if args.verbose: level = logging.VERBOSE if args.debug: level = logging.DEBUG logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=level) Rthandler = RotatingFileHandler('arbitrage.log', maxBytes=100*1024*1024,backupCount=10) Rthandler.setLevel(level) formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s') Rthandler.setFormatter(formatter) logging.getLogger('').addHandler(Rthandler) logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING)
def setup_logging(log_level=logging.INFO): """Set up the logging.""" logging.basicConfig(level=log_level) fmt = ("%(asctime)s %(levelname)s (%(threadName)s) " "[%(name)s] %(message)s") colorfmt = "%(log_color)s{}%(reset)s".format(fmt) datefmt = '%Y-%m-%d %H:%M:%S' # Suppress overly verbose logs from libraries that aren't helpful logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) logging.getLogger('aiohttp.access').setLevel(logging.WARNING) try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( colorfmt, datefmt=datefmt, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError: pass logger = logging.getLogger('') logger.setLevel(log_level)
def setup_logging(verbose=0, colors=False, name=None): """Configure console logging. Info and below go to stdout, others go to stderr. :param int verbose: Verbosity level. > 0 print debug statements. > 1 passed to sphinx-build. :param bool colors: Print color text in non-verbose mode. :param str name: Which logger name to set handlers to. Used for testing. """ root_logger = logging.getLogger(name) root_logger.setLevel(logging.DEBUG if verbose > 0 else logging.INFO) formatter = ColorFormatter(verbose > 0, colors) if colors: colorclass.Windows.enable() handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setFormatter(formatter) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(type('', (logging.Filter,), {'filter': staticmethod(lambda r: r.levelno <= logging.INFO)})) root_logger.addHandler(handler_stdout) handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(formatter) handler_stderr.setLevel(logging.WARNING) root_logger.addHandler(handler_stderr)
def start_server(): log.setLevel(logging.INFO) logging.getLogger('PokeAlarm').setLevel(logging.INFO) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('pyswgi').setLevel(logging.WARNING) logging.getLogger('connectionpool').setLevel(logging.WARNING) logging.getLogger('gipc').setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) parse_settings(os.path.abspath(os.path.dirname(__file__))) # Start Webhook Manager in a Thread spawn(manage_webhook_data, data_queue) # Start up Server log.info("PokeAlarm is listening for webhooks on: http://{}:{}".format(config['HOST'], config['PORT'])) server = wsgi.WSGIServer((config['HOST'], config['PORT']), app, log=logging.getLogger('pyswgi')) server.serve_forever() ################################################## CONFIG UTILITIES ###################################################
def format(self, record): """Apply little arrow and colors to the record. Arrow and colors are only applied to sphinxcontrib.versioning log statements. :param logging.LogRecord record: The log record object to log. """ formatted = super(ColorFormatter, self).format(record) if self.verbose or not record.name.startswith(self.SPECIAL_SCOPE): return formatted # Arrow. formatted = '=> ' + formatted # Colors. if not self.colors: return formatted if record.levelno >= logging.ERROR: formatted = str(colorclass.Color.red(formatted)) elif record.levelno >= logging.WARNING: formatted = str(colorclass.Color.yellow(formatted)) else: formatted = str(colorclass.Color.cyan(formatted)) return formatted
def __init__(self, appname, dllname=None, logtype="Application"): logging.Handler.__init__(self) try: import win32evtlogutil, win32evtlog self.appname = appname self._welu = win32evtlogutil if not dllname: dllname = os.path.split(self._welu.__file__) dllname = os.path.split(dllname[0]) dllname = os.path.join(dllname[0], r'win32service.pyd') self.dllname = dllname self.logtype = logtype self._welu.AddSourceToRegistry(appname, dllname, logtype) self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE self.typemap = { logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, } except ImportError: print("The Python Win32 extensions for NT (service, event "\ "logging) appear not to be available.") self._welu = None
def log(self, message, level=logging.DEBUG, depth=0): """Prepend string to log messages to denote class.""" if depth <= 0: prefix = 'AmazonAccountUtils: ' else: prefix = "\t" * depth if level == CRITICAL: self.logger.critical(prefix + str(message)) elif level == ERROR: self.logger.error(prefix + str(message)) elif level == WARNING: self.logger.warning(prefix + str(message)) elif level == INFO: self.logger.info(prefix + str(message)) else: self.logger.debug(prefix + str(message))
def main(args=None): from fontTools import configLogger if args is None: args = sys.argv[1:] options = Options() args = options.parse_opts(args) if len(args) < 1: print("usage: pyftmerge font...", file=sys.stderr) return 1 configLogger(level=logging.INFO if options.verbose else logging.WARNING) if options.timing: timer.logger.setLevel(logging.DEBUG) else: timer.logger.disabled = True merger = Merger(options=options) font = merger.merge(args) outfile = 'merged.ttf' with timer("compile and save font"): font.save(outfile)
def log_msg(msg, level): """Write message to logfile""" # If we are NOT in debug mode, only write non-debug messages to the log if level == 'DEBUG': if DEBUG: logging.debug(msg) print BOLD + "DEBUG: " + msg + ENDC elif level == 'ERROR': logging.error(msg) tf.write('ERROR:' + msg + '\n') print ERROR + "ERROR: " + msg + ENDC elif level == 'WARNING': logging.warning(msg) tf.write('WARNING:' + msg + '\n') print WARNING + "WARNING: " + msg + ENDC # Otherwise if we ARE in debug, write everything to the log AND stdout else: logging.info(msg) tf.write(msg + '\n')
def __init__(sql, args): sql.module = args.module sql.connect_args = args.connect_args sql.prefix = args.prefix sql.config = args.config sql.log = logging.getLogger(__name__) sql.sqllog = logging.getLogger(__name__ + ".sql") if not args.log_sql: sql.sqllog.setLevel(logging.WARNING) sql._conn = None sql._cursor = None sql.auto_reconnect = False sql.in_transaction = False sql._set_flavour()
def __init__(self, debug=False): """ Constructor of the Application. :param debug: Sets the logging level of the application :raises NotImplementedError: When ``Application.base_title`` not set in the class definition. """ self.debug = debug loglevel = logging.DEBUG if debug else logging.WARNING logging.basicConfig( format='%(asctime)s - [%(levelname)s] %(message)s', datefmt='%I:%M:%S %p', level=loglevel) self.processor = EventProcessor() self.server = EventServer(processor=self.processor) if self.base_title is None: raise NotImplementedError self.services = {} self.views = {} self.current_view = None self.register('init', lambda evt, interface: self._load_view('default'))
def configure_logging(debug): '''Sets the data kennel logger to appropriate levels of chattiness.''' default_logger = logging.getLogger('') datadog_logger = logging.getLogger('datadog.api') requests_logger = logging.getLogger('requests') if debug: default_logger.setLevel(logging.DEBUG) datadog_logger.setLevel(logging.INFO) requests_logger.setLevel(logging.INFO) else: default_logger.setLevel(logging.INFO) datadog_logger.setLevel(logging.WARNING) requests_logger.setLevel(logging.WARNING) stream_handler = logging.StreamHandler(sys.__stdout__) stream_handler.setLevel(logging.DEBUG) stream_handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')) default_logger.addHandler(stream_handler)
def parse_args(): """ Parse the command line arguments """ parser = argparse.ArgumentParser( description="Integrate Hugo and PhotoSwipe") parser.add_argument('-v', '--verbose', help="Verbose mode", action="store_const", dest="loglevel", const=logging.INFO, default=logging.WARNING) parser.add_argument('-f', '--fast', action="store_true", help=('Fast mode ' '(tries less potential crops)')) parser.add_argument('command', choices=['new', 'update', 'clean', 'init'], help="action to do") parser.add_argument('album', nargs='?', help="album to apply the action to") args = parser.parse_args() logging.basicConfig(level=args.loglevel, datefmt="[%Y-%m-%d %H:%M:%S]", format="%(asctime)s - %(message)s") settings.verbose = args.loglevel == logging.INFO settings.fast = args.fast return args.command, args.album
def emit(self, record): # default implementation try: try: self.stream = record.stream except AttributeError: if record.levelno >= logging.WARNING: record.stream = self.stream = sys.stderr else: record.stream = self.stream = sys.stdout self.emit_override(record) self.flush() except (KeyboardInterrupt, SystemExit): raise except: # from the python library -_- self.handleError(record)
def runCmd(cmd,cmd_timeout=300): ''' run command without showing console window on windows - return stdout and stderr as strings ''' startupinfo = None output = "" output_err = "" debug_log("runCmd: {}".format(cmd)) if os.name == 'nt': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW try: proc = subprocess.Popen(cmd,bufsize=-1,startupinfo=startupinfo,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=None,shell=False,universal_newlines=False) except SubprocessError as e: proc = None debug_log("exception in runCmd: {}".format(e),logging.ERROR) if proc is not None: try: outputb, output_errb = proc.communicate() output = outputb.decode('utf-8','replace') output_err = output_errb.decode('utf-8','replace') except subprocess.TimeoutExpired(timeout=cmd_timeout): proc.kill() debug_log("runCmd: Process killed due to timeout",logging.WARNING) else: debug_log("runCmd: Proc was none",logging.WARNING) return output,output_err
def log_to_console(level=logging.WARNING, override_root_logger=False, **kwargs): """ Configure the logging system to send log entries to the console. Note that the root logger will not log to Seq by default. :param level: The minimum level at which to log. :param override_root_logger: Override the root logger, too? Note - this might cause problems if third-party components try to be clever when using the logging.XXX functions. """ logging.setLoggerClass(StructuredLogger) if override_root_logger: _override_root_logger() logging.basicConfig( style='{', handlers=[ ConsoleStructuredLogHandler() ], level=level, **kwargs )
def _get_logging_level(): """ Converts our ENV variable HA_LOG_LEVEL to a logging level object :return: logging level object """ _log_level = _get_config('LOG_LEVEL', 'info').lower() to_return = logging.INFO if _log_level == 'critical': to_return = logging.CRITICAL if _log_level == 'error': to_return = logging.ERROR if _log_level == 'warning': to_return = logging.WARNING if _log_level == 'debug': to_return = logging.DEBUG return to_return
def add_coloring_to_emit_ansi(fn): RED_BOLD = '\x1b[31;1m' RED = '\x1b[31m' GREEN = '\x1b[32m' YELLOW = '\x1b[33m' BLUE = '\x1b[34m' PINK = '\x1b[35m' CYAN = '\x1b[36m' DEFAULT = '\x1b[0m' def new(*args): levelno = args[1].levelno color = DEFAULT if levelno >= logging.CRITICAL: color = RED_BOLD elif levelno >= logging.ERROR: color = RED elif levelno >= logging.WARNING: color = YELLOW elif levelno >= logging.INFO: color = DEFAULT elif levelno >= logging.DEBUG: color = GREEN args[1].msg = color + str(args[1].msg) + DEFAULT return fn(*args) return new
def _logging_levels(verbosity, quiet): # type: (int, bool) -> Sequence[int] """Determines the proper logging levels given required verbosity level and quiet. :param int verbosity: Requested level of verbosity :param bool quiet: Suppresses all logging when true :returns: local and root logging levels :rtype: list of int """ if quiet: return logging.CRITICAL, logging.CRITICAL if verbosity is None or verbosity <= 0: return logging.WARNING, logging.CRITICAL normalized_local = min(verbosity, MAX_LOGGING_LEVEL) normalized_root = min(verbosity - normalized_local, MAX_LOGGING_LEVEL) return LOGGING_LEVELS[normalized_local], LOGGING_LEVELS[normalized_root]
def __init__(self, server='http://localhost/zabbix', user=httpuser, passwd=httppasswd, log_level=logging.WARNING, timeout=10, r_query_len=10, validate_certs=True, **kwargs): """ Create an API object. """ self._setuplogging() self.set_log_level(log_level) self.server = server self.url = server + '/api_jsonrpc.php' self.proto = self.server.split("://")[0] # self.proto=proto self.httpuser = user self.httppasswd = passwd self.timeout = timeout self.kwargs = kwargs self.id = 0 self.r_query = deque([], maxlen=r_query_len) self.validate_certs = validate_certs self.debug(logging.INFO, "url: " + self.url)
def query_one(self,command,cursor_type = 'tuple'): try: cursor = None if cursor_type == 'dict': cursor = self.conn.cursor(pymysql.cursors.DictCursor) else: cursor = self.cursor cursor.execute(command) data = cursor.fetchone() self.conn.commit() return data except Exception as e: log_helper.log(e,logging.WARNING) return None
def report_unused_values(self, logger, optional_configs=None): optional_configs = [] if optional_configs is None else optional_configs has_error = False for config in self.iter_configs(): messages = config.describe_unused_values() if len(messages) > 0: if config in optional_configs: log_level = logging.WARNING else: log_level = logging.ERROR has_error = True for message in messages: logger.log(log_level, message) if has_error: raise AssertionException('Detected unused keys that are not ignorable.')
def run(): # pragma: no cover """Run Markdown from the command line.""" # Parse options and adjust logging level if necessary options, logging_level = parse_options() if not options: sys.exit(2) logger.setLevel(logging_level) console_handler = logging.StreamHandler() logger.addHandler(console_handler) if logging_level <= WARNING: # Ensure deprecation warnings get displayed warnings.filterwarnings('default') logging.captureWarnings(True) warn_logger = logging.getLogger('py.warnings') warn_logger.addHandler(console_handler) # Run markdown.markdownFromFile(**options)
def main(): parser = argparse.ArgumentParser( description="Backup files preserving metadata") parser.add_argument("-n", "--dry-run", dest="dryrun", action="store_true", help="dry run, do not perform actual action") parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="be quiet") parser.add_argument("-d", "--debug", dest="debug", action="store_true", help="show verbose debug information") parser.add_argument("config", help="configuration file") args = parser.parse_args() if args.quiet and not args.dryrun: logging.basicConfig(level=logging.WARNING) if args.debug: logging.basicConfig(level=logging.DEBUG) now = datetime.now() logger.info("=== %s @ %s ===" % (" ".join(sys.argv), now.isoformat())) if args.dryrun: logger.info("*** DRY RUN ***") backup = Backup(args.config, dryrun=args.dryrun, debug=args.debug) backup.backup() backup.cleanup() logger.info("=== Backup Finished! @ %s ===" % datetime.now().isoformat())
def configure_logging(): # Create handler to log only `DEBUG` and `INFO` messages to stdout stream, and add to logger. stdout_handler = logging_.StreamHandler(sys.stdout) stdout_handler.setLevel(logging_.DEBUG) stdout_level_filter = LevelFilter([logging_.DEBUG, logging_.INFO]) stdout_handler.addFilter(stdout_level_filter) logging.addHandler(stdout_handler) # Create handler to log levels greater than `INFO` to stderr stream, and add to logger. stderr_handler = logging_.StreamHandler() stderr_handler.setLevel(logging_.WARNING) logging.addHandler(stderr_handler) # Configure format of logged messages. formatter = logging_.Formatter(logging_.BASIC_FORMAT) stdout_handler.setFormatter(formatter) stderr_handler.setFormatter(formatter)
def setup_in_process(self): # Set up signal handlers for graceful exit gevent.signal(gevent.signal.SIGINT, self.stop) gevent.signal(gevent.signal.SIGTERM, self.stop) # Update config config['TIMEZONE'] = self.__timezone config['API_KEY'] = self.__google_key config['UNITS'] = self.__units config['DEBUG'] = self.__debug config['ROOT_PATH'] = os.path.abspath("{}/..".format(os.path.dirname(__file__))) # Hush some new loggers logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) if config['DEBUG'] is True: logging.getLogger().setLevel(logging.DEBUG) # Conect the alarms and send the start up message for alarm in self.__alarms: alarm.connect() alarm.startup_message() # Main event handler loop
def format(self, record): if self.debug and self.color: if record.levelno >= logging.CRITICAL: color = TEXT_RED elif record.levelno >= logging.ERROR: color = TEXT_RED elif record.levelno >= logging.WARNING: color = TEXT_YELLOW elif record.levelno >= logging.INFO: color = TEXT_GREEN elif record.levelno >= logging.DEBUG: color = TEXT_CYAN else: color = TEXT_NORMAL record.levelname = "\x1b[%sm%s\x1b[%sm" % (color, record.levelname, TEXT_NORMAL) return logging.Formatter.format(self, record)
def update_logging_settings(self, file_path=None, level=None, format=None): """ Update global logging. If None is set to the arguments, it will keep the previous setting. Args: file_path (str): It is Initialized to 'log.log'. level (str): It can be 'error', 'warning' or 'info'. It is Initialized to 'error'. format (str): It is Initialized to '%(asctime)s %(levelname)s %(message)s'. """ LOGGING_STRING_MAP = {'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR} if file_path is not None: self._logger_config['file_path'] = self._get_abs_path(file_path) if level is not None: self._logger_config['level'] = level if format is not None: self._logger_config['format'] = format logger = logging.getLogger(Configuration.LOGGER_NAME) log_file = logging.FileHandler(self._logger_config['file_path']) logger.addHandler(log_file) log_file.setFormatter(logging.Formatter(self._logger_config['format'])) logger.setLevel(LOGGING_STRING_MAP[self._logger_config['level']]) self._logger = logger
def configure_logging(info=False, debug=False): """Configure logging The function configures log messages. By default, log messages are sent to stderr. Set the parameter `debug` to activate the debug mode. :param debug: set the debug mode """ if info: logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urrlib3').setLevel(logging.WARNING) logging.getLogger('elasticsearch').setLevel(logging.WARNING) elif debug: logging.basicConfig(level=logging.DEBUG, format=DEBUG_LOG_FORMAT) else: logging.basicConfig(level=logging.WARNING, format=LOG_FORMAT) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urrlib3').setLevel(logging.WARNING) logging.getLogger('elasticsearch').setLevel(logging.WARNING)
def test_warn_on_deprecated_flags(self): sys.argv = ["[executable]", "evaluate", "--archive_file", "tests/fixtures/bidaf/serialization/model.tar.gz", "--evaluation_data_file", "tests/fixtures/data/squad.json", "--cuda_device", "-1"] with self.assertLogs(level=logging.WARNING) as context: main() assert set(context.output) == { 'WARNING:allennlp.commands:Argument name --archive_file is deprecated ' '(and will likely go away at some point), please use --archive-file instead', 'WARNING:allennlp.commands:Argument name --evaluation_data_file is deprecated ' '(and will likely go away at some point), please use --evaluation-data-file instead', 'WARNING:allennlp.commands:Argument name --cuda_device is deprecated ' '(and will likely go away at some point), please use --cuda-device instead', }
def get_fallback_logger(stream=None): global _fallback_logger if _fallback_logger: return _fallback_logger log_format = '%(asctime)s:%(levelname)s:%(message)s' formatter = logging.Formatter(log_format) level = logging.WARNING handler = logging.StreamHandler(stream) handler.setLevel(level) handler.setFormatter(formatter) logger = logging.Logger('powerline') logger.setLevel(level) logger.addHandler(handler) _fallback_logger = PowerlineLogger(None, logger, '_fallback_') return _fallback_logger
def draw(self, context): layout = self.layout scene = context.scene for line in logList: lines = line.split(":", 1) if lines[0] == 'CRITICAL': lineicon = 'RADIO' elif lines[0] == 'ERROR': lineicon = 'CANCEL' elif lines[0] == 'WARNING': lineicon = 'ERROR' elif lines[0] == 'INFO': lineicon = 'INFO' else: lineicon = 'TEXT' layout.label(text = lines[1], icon = lineicon) logList[:] = [] # Clear log list for next call (otherwise list is not updated when 'OK' button is not clicked) # Export button
def get_logging_level(log_level): logging_level = logging.INFO if log_level == 'DEBUG': logging_level = logging.DEBUG elif log_level == 'INFO': logging_level = logging.INFO elif log_level == 'WARNING': logging_level = logging.WARNING elif log_level == 'ERROR': logging_level = logging.ERROR elif log_level == 'CRITICAL': logging_level = logging.CRITICAL else: print('Unknown or unset logging level. Using INFO') return logging_level
def debug(self, value): """ Sets the debug status. :param value: The debug status, True or False. :type: bool """ self.__debug = value if self.__debug: # if debug status is True, turn on debug logging for _, logger in iteritems(self.logger): logger.setLevel(logging.DEBUG) # turn on httplib debug httplib.HTTPConnection.debuglevel = 1 else: # if debug status is False, turn off debug logging, # setting log level to default `logging.WARNING` for _, logger in iteritems(self.logger): logger.setLevel(logging.WARNING) # turn off httplib debug httplib.HTTPConnection.debuglevel = 0
def set_up_logging(debug): if debug: logging_level = logging.DEBUG else: logging_level = logging.INFO logging.basicConfig(format='%(asctime)s ~ %(levelname)-10s %(name)-25s %(message)s', datefmt='%Y-%m-%d %H:%M', level=logging_level) logging.getLogger('telegram').setLevel(logging.WARNING) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('JobQueue').setLevel(logging.WARNING) logging.addLevelName(logging.DEBUG, '?? DEBUG') logging.addLevelName(logging.INFO, '?? INFO') logging.addLevelName(logging.WARNING, '?? WARNING') logging.addLevelName(logging.ERROR, '?? ERROR')
def format(self, record): stdout_template = '{levelname}' + Fore.RESET + '] {threadName}: ' + '{message}' stdout_head = '[%s' allFormats = { logging.DEBUG: logging.StrFormatStyle(stdout_head % Fore.LIGHTBLUE_EX + stdout_template), logging.INFO: logging.StrFormatStyle(stdout_head % Fore.GREEN + stdout_template), logging.WARNING: logging.StrFormatStyle(stdout_head % Fore.LIGHTYELLOW_EX + stdout_template), logging.ERROR: logging.StrFormatStyle(stdout_head % Fore.LIGHTRED_EX + stdout_template), logging.CRITICAL: logging.StrFormatStyle(stdout_head % Fore.RED + stdout_template) } self._style = allFormats.get(record.levelno, logging.StrFormatStyle(logging._STYLES['{'][1])) self._fmt = self._style._fmt result = logging.Formatter.format(self, record) return result
def show_notices(printer=logger.log): if Config.scout_notices: for notice in Config.scout_notices: try: if isinstance(notice, str): printer(logging.WARNING, notice) else: lvl = notice['level'].upper() msg = notice['message'] if isinstance(lvl, str): lvl = getattr(logging, lvl, logging.INFO) printer(lvl, msg) except KeyError: printer(logging.WARNING, json.dumps(notice))
def clean_notices(notices): cleaned = [] for notice in notices: try: if isinstance(notice, str): cleaned.append({ "level": "WARNING", "message": notice }) else: lvl = notice['level'].upper() msg = notice['message'] cleaned.append({ "level": lvl, "message": msg }) except KeyError: cleaned.append({ "level": "WARNING", "message": json.dumps(notice) }) except: cleaned.append({ "level": "ERROR", "message": json.dumps(notice) }) return cleaned
def test_log_level_from_config(self): cfg = {'verbose_level': 0} self.assertEqual(logging.ERROR, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 1} self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 2} self.assertEqual(logging.INFO, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 3} self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 1, 'log_level': 'critical'} self.assertEqual(logging.CRITICAL, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 1, 'log_level': 'error'} self.assertEqual(logging.ERROR, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 1, 'log_level': 'warning'} self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 1, 'log_level': 'info'} self.assertEqual(logging.INFO, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 1, 'log_level': 'debug'} self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 1, 'log_level': 'bogus'} self.assertEqual(logging.WARNING, logs.log_level_from_config(cfg)) cfg = {'verbose_level': 1, 'log_level': 'info', 'debug': True} self.assertEqual(logging.DEBUG, logs.log_level_from_config(cfg))
def __load_logging_level(self, config): var = config.get_value('engine/replace-with-kanji-python', 'logging_level') if var is None or var.get_type_string() != 's' or not var.get_string() in _name_to_logging_level: level = 'WARNING' if var: config.unset('engine/replace-with-kanji-python', 'logging_level') else: level = var.get_string() logger.info("logging_level: %s", level) logging.getLogger().setLevel(_name_to_logging_level[level]) return level
def setup_logs(args): """ Initialize the api loggers. Args: args: dict containing the configuration options. """ flask_logging.create_logger = lambda app: use(app.logger_name) if not args.get("debug", True): set_level("werkzeug", logging.ERROR) level = [logging.WARNING, logging.INFO, logging.DEBUG][ min(args.get("verbose", 1), 2)] internal_error_log = ExceptionHandler() internal_error_log.setLevel(logging.ERROR) log.root.setLevel(level) log.root.addHandler(internal_error_log) if api.config.get_settings()["email"]["enable_email"]: severe_error_log = SevereHandler() severe_error_log.setLevel(logging.CRITICAL) log.root.addHandler(severe_error_log) stats_log = StatsHandler() stats_log.setLevel(logging.INFO) log.root.addHandler(stats_log)
def _cli_argument_parser(): argp = argparse.ArgumentParser( description='Produce Lilypond documentation') argp.add_argument( 'path', metavar='PATH', help='The file or directory to parse') argp.add_argument( '--output', '-o', metavar='FILE', help='The output file. If not given, prints to standard output') argp.add_argument( '--trace-parser', action='store_true', dest='trace_parser', help='Print debug information from the parser') argp.add_argument( '-d', '--debug', help="Detailed debugging information", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.WARNING) argp.add_argument( '-v', '--verbose', help="Verbose output", action="store_const", dest="loglevel", const=logging.INFO) return argp