我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用tornado.options.options.logging()。
def kill_all_sessions(timeout=False): """ Calls all 'kill_session_callbacks' attached to all `SESSIONS`. If *timeout* is ``True``, emulate a session timeout event in order to *really* kill any user sessions (to ensure things like dtach processes get killed too). """ logging.debug(_("Killing all sessions...")) for session in list(SESSIONS.keys()): if timeout: if "timeout_callbacks" in SESSIONS[session]: if SESSIONS[session]["timeout_callbacks"]: for callback in SESSIONS[session]["timeout_callbacks"]: callback(session) else: if "kill_session_callbacks" in SESSIONS[session]: if SESSIONS[session]["kill_session_callbacks"]: for callback in SESSIONS[session]["kill_session_callbacks"]: callback(session)
def add_handler(self, pattern, handler, **kwargs): """ Adds the given *handler* (`tornado.web.RequestHandler`) to the Tornado Application (`self.ws.application`) to handle URLs matching *pattern*. If given, *kwargs* will be added to the `tornado.web.URLSpec` when the complete handler is assembled. .. note:: If the *pattern* does not start with the configured `url_prefix` it will be automatically prepended. """ logging.debug("Adding handler: (%s, %s)" % (pattern, handler)) url_prefix = self.ws.settings['url_prefix'] if not pattern.startswith(url_prefix): if pattern.startswith('/'): # Get rid of the / (it will be in the url_prefix) pattern = pattern.lstrip('/') spec = tornado.web.URLSpec(pattern, handler, kwargs) # Why the Tornado devs didn't give us a simple way to do this is beyond # me. self.ws.application.handlers[0][1].append(spec)
def _deliver(cls, message, upn="AUTHENTICATED", session=None): """ Writes the given *message* (string) to all users matching *upn* using the write_message() function. If *upn* is not provided or is "AUTHENTICATED", will send the *message* to all users. Alternatively a *session* ID may be specified instead of a *upn*. This is useful when more than one user shares a UPN (i.e. ANONYMOUS). """ #print 'deliver message',message logging.debug("_deliver(%s, upn=%s, session=%s)" % (message, upn, session)) for instance in cls.instances: try: # Only send to users that have authenticated user = instance.current_user except (WebSocketClosedError, AttributeError): continue if session and user and user.get('session', None) == session: instance.write_message(message) elif upn == "AUTHENTICATED": instance.write_message(message) elif user and upn == user.get('upn', None): instance.write_message(message)
def __init__(self, color, *args, **kwargs): logging.Formatter.__init__(self, *args, **kwargs) self._color = color if color: # The curses module has some str/bytes confusion in python3. # Most methods return bytes, but only accept strings. # The explict calls to unicode() below are harmless in python2, # but will do the right conversion in python3. fg_color = unicode(curses.tigetstr("setaf") or curses.tigetstr("setf") or "", "ascii") self._colors = { logging.DEBUG: unicode(curses.tparm(fg_color, 4), # Blue "ascii"), logging.INFO: unicode(curses.tparm(fg_color, 2), # Green "ascii"), logging.WARNING: unicode(curses.tparm(fg_color, 3), # Yellow "ascii"), logging.ERROR: unicode(curses.tparm(fg_color, 1), # Red "ascii"), } self._normal = unicode(curses.tigetstr("sgr0"), "ascii")
def enable_pretty_logging(): """Turns on formatted logging output as configured.""" if (options.log_to_stderr or (options.log_to_stderr is None and not options.log_file_prefix)): # Set up color if we are in a tty and curses is installed color = False if curses and sys.stderr.isatty(): try: curses.setupterm() if curses.tigetnum("colors") > 0: color = True except: pass channel = logging.StreamHandler() channel.setFormatter(_LogFormatter(color=color)) logging.getLogger().addHandler(channel) if options.log_file_prefix: channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups) channel.setFormatter(_LogFormatter(color=False)) logging.getLogger().addHandler(channel)
def define_logging_options(options=None): if options is None: # late import to prevent cycle from tornado.options import options options.define("logging", default="info", help=("Set the Python log level. If 'none', tornado won't touch the " "logging configuration."), metavar="debug|info|warning|error|none") options.define("log_to_stderr", type=bool, default=None, help=("Send log output to stderr (colorized if possible). " "By default use stderr if --log_file_prefix is not set and " "no other logging is configured.")) options.define("log_file_prefix", type=str, default=None, metavar="PATH", help=("Path prefix for log files. " "Note that if you are running multiple tornado processes, " "log_file_prefix must be different for each of them (e.g. " "include the port number)")) options.define("log_file_max_size", type=int, default=100 * 1000 * 1000, help="max size of log files before rollover") options.define("log_file_num_backups", type=int, default=10, help="number of log files to keep") options.add_parse_callback(enable_pretty_logging)
def enable_pretty_logging(options=None, logger=None): if options is None: from tornado.options import options if options.logging is None or options.logging.lower() == 'none': return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: rotate_mode = options.log_rotate_mode if rotate_mode == 'size': channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups) elif rotate_mode == 'time': channel = logging.handlers.TimedRotatingFileHandler( filename=options.log_file_prefix, when=options.log_rotate_when, interval=options.log_rotate_interval, backupCount=options.log_file_num_backups) else: error_message = 'The value of log_rotate_mode option should be ' + \ '"size" or "time", not "%s".' % rotate_mode raise ValueError(error_message) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if (options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers)): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel)
def __init__(self, color=True, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS): r""" :arg bool color: Enables color support. :arg string fmt: Log message format. It will be applied to the attributes dict of log records. The text between ``%(color)s`` and ``%(end_color)s`` will be colored depending on the level if color support is on. :arg dict colors: color mappings from logging level to terminal color code :arg string datefmt: Datetime format. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. .. versionchanged:: 3.2 Added ``fmt`` and ``datefmt`` arguments. """ logging.Formatter.__init__(self, datefmt=datefmt) self._fmt = fmt self._colors = {} if color and _stderr_supports_color(): # The curses module has some str/bytes confusion in # python3. Until version 3.2.3, most methods return # bytes, but only accept strings. In addition, we want to # output these strings with the logging module, which # works with unicode strings. The explicit calls to # unicode() below are harmless in python2 but will do the # right conversion in python 3. fg_color = (curses.tigetstr("setaf") or curses.tigetstr("setf") or "") if (3, 0) < sys.version_info < (3, 2, 3): fg_color = unicode_type(fg_color, "ascii") for levelno, code in colors.items(): self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii") self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii") else: self._normal = ''
def enable_pretty_logging(options=None, logger=None): """Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """ if options is None: from tornado.options import options if options.logging is None or options.logging.lower() == 'none': return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: rotate_mode = options.log_rotate_mode if rotate_mode == 'size': channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups) elif rotate_mode == 'time': channel = logging.handlers.TimedRotatingFileHandler( filename=options.log_file_prefix, when=options.log_rotate_when, interval=options.log_rotate_interval, backupCount=options.log_file_num_backups) else: error_message = 'The value of log_rotate_mode option should be ' +\ '"size" or "time", not "%s".' % rotate_mode raise ValueError(error_message) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if (options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers)): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel)
def define_logging_options(options=None): """Add logging-related flags to ``options``. These options are present automatically on the default options instance; this method is only necessary if you have created your own `.OptionParser`. .. versionadded:: 4.2 This function existed in prior versions but was broken and undocumented until 4.2. """ if options is None: # late import to prevent cycle from tornado.options import options options.define("logging", default="info", help=("Set the Python log level. If 'none', tornado won't touch the " "logging configuration."), metavar="debug|info|warning|error|none") options.define("log_to_stderr", type=bool, default=None, help=("Send log output to stderr (colorized if possible). " "By default use stderr if --log_file_prefix is not set and " "no other logging is configured.")) options.define("log_file_prefix", type=str, default=None, metavar="PATH", help=("Path prefix for log files. " "Note that if you are running multiple tornado processes, " "log_file_prefix must be different for each of them (e.g. " "include the port number)")) options.define("log_file_max_size", type=int, default=100 * 1000 * 1000, help="max size of log files before rollover") options.define("log_file_num_backups", type=int, default=10, help="number of log files to keep") options.define("log_rotate_when", type=str, default='midnight', help=("specify the type of TimedRotatingFileHandler interval " "other options:('S', 'M', 'H', 'D', 'W0'-'W6')")) options.define("log_rotate_interval", type=int, default=1, help="The interval value of timed rotating") options.define("log_rotate_mode", type=str, default='size', help="The mode of rotating files(time or size)") options.add_parse_callback(lambda: enable_pretty_logging(options))
def main(): options.logging = "warning" parse_command_line() elapsed = yield benchmark() print_result(2, elapsed)
def main(): options.logging = None parse_command_line() options.subpath = options.subpath.strip('/') if options.subpath: options.subpath = '/' + options.subpath # Connect to mongodb io_loop = ioloop.IOLoop.instance() connect(config.DB_NAME, host=config.DB_HOST, port=config.DB_PORT, io_loop=io_loop, username=config.DB_USER, password=config.DB_PWD) # Star application from application import app if options.unix_socket: server = tornado.httpserver.HTTPServer(app) socket = tornado.netutil.bind_unix_socket(options.unix_socket, 0o666) server.add_socket(socket) print('Server is running at %s' % options.unix_socket) print('Quit the server with Control-C') else: http_server = tornado.httpserver.HTTPServer(app) http_server.listen(options.port) print('Server is running at http://127.0.0.1:%s%s' % (options.port, options.subpath)) print('Quit the server with Control-C') io_loop.start()
def cleanup_old_sessions(): """ Cleans up old session directories inside the `session_dir`. Any directories found that are older than the `auth_timeout` (global gateone setting) will be removed. The modification time is what will be checked. """ logging.debug("cleanup_old_sessions()") disabled = timedelta(0) # If the user sets auth_timeout to "0" session_dir = define_options()['session_dir'] #settings = get_settings(options.settings_dir) settings = get_settings(define_options()['settings_dir']) expiration_str = settings['*']['gateone'].get('auth_timeout', "14d") expiration = convert_to_timedelta(expiration_str) if expiration != disabled: #for session in os.listdir(options.session_dir): for session in os.listdir(session_dir): # If it's in the SESSIONS dict it's still valid for sure if session not in SESSIONS: if len(session) != 45: # Sessions are always 45 characters long. This check allows # us to skip the 'broadcast' file which also lives in the # session_dir. Why not just check for 'broacast'? Just in # case we put something else there in the future. continue #session_path = os.path.join(options.session_dir, session) session_path = os.path.join(session_dir, session) mtime = time.localtime(os.stat(session_path).st_mtime) # Convert to a datetime object for easier comparison mtime = datetime.fromtimestamp(time.mktime(mtime)) if datetime.now() - mtime > expiration: import shutil from applications.utils import kill_session_processes # The log is older than expiration, remove it and kill any # processes that may be remaining. kill_session_processes(session) logger.info(_( "Removing old session files due to age (>%s old): %s" % (expiration_str, session_path))) shutil.rmtree(session_path, ignore_errors=True)
def clean_cache(): cache_dir = settings['cache_dir'] logging.debug('clean cache dir') for file in os.listdir(cache_dir): file_name = os.path.join(cache_dir,file) os.remove(file_name)
def watch_file(cls, path, func): """ A classmethod that registers the given file *path* and *func* in `ApplicationWebSocket.watched_files` and `ApplicationWebSocket.file_update_funcs`, respectively. The given *func* will be called (by `ApplicationWebSocket.file_checker`) whenever the file at *path* is modified. """ logging.debug("watch_file('{path}', {func}())".format( path=path, func=func.__name__)) cls.watched_files.update({path: os.stat(path).st_mtime}) cls.file_update_funcs.update({path: func})
def _list_connected_users(cls): """ Returns a tuple of user objects representing the users that are currently connected (and authenticated) to this Gate One server. """ logging.debug("_list_connected_users()") out = [] for instance in cls.instances: try: # We only care about authenticated users out.append(instance.current_user) except AttributeError: continue return tuple(out)
def parse_command_line(args=None): """Parses all options given on the command line. We return all command line arguments that are not options as a list. """ if args is None: args = sys.argv remaining = [] for i in xrange(1, len(args)): # All things after the last option are command line arguments if not args[i].startswith("-"): remaining = args[i:] break if args[i] == "--": remaining = args[i+1:] break arg = args[i].lstrip("-") name, equals, value = arg.partition("=") name = name.replace('-', '_') if not name in options: print_help() raise Error('Unrecognized command line option: %r' % name) option = options[name] if not equals: if option.type == bool: value = "true" else: raise Error('Option %r requires a value' % name) option.parse(value) if options.help: print_help() sys.exit(0) # Set up log level and pretty console logging by default if options.logging != 'none': logging.getLogger().setLevel(getattr(logging, options.logging.upper())) enable_pretty_logging() return remaining
def enable_pretty_logging(): """Turns on formatted logging output as configured. This is called automatically by `parse_command_line`. """ root_logger = logging.getLogger() if options.log_file_prefix: channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups) channel.setFormatter(_LogFormatter(color=False)) root_logger.addHandler(channel) if (options.log_to_stderr or (options.log_to_stderr is None and not root_logger.handlers)): # Set up color if we are in a tty and curses is installed color = False if curses and sys.stderr.isatty(): try: curses.setupterm() if curses.tigetnum("colors") > 0: color = True except Exception: pass channel = logging.StreamHandler() channel.setFormatter(_LogFormatter(color=color)) root_logger.addHandler(channel)
def __init__(self, color, *args, **kwargs): logging.Formatter.__init__(self, *args, **kwargs) self._color = color if color: fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or "" self._colors = { logging.DEBUG: curses.tparm(fg_color, 4), # Blue logging.INFO: curses.tparm(fg_color, 2), # Green logging.WARNING: curses.tparm(fg_color, 3), # Yellow logging.ERROR: curses.tparm(fg_color, 1), # Red } self._normal = curses.tigetstr("sgr0")
def enable_pretty_logging(options=None, logger=None): """Turns on formatted logging output as configured. This is called automatically by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """ if options is None: from tornado.options import options if options.logging is None or options.logging.lower() == 'none': return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if (options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers)): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel)
def define_logging_options(options=None): """Add logging-related flags to ``options``. These options are present automatically on the default options instance; this method is only necessary if you have created your own `.OptionParser`. .. versionadded:: 4.2 This function existed in prior versions but was broken and undocumented until 4.2. """ if options is None: # late import to prevent cycle from tornado.options import options options.define("logging", default="info", help=("Set the Python log level. If 'none', tornado won't touch the " "logging configuration."), metavar="debug|info|warning|error|none") options.define("log_to_stderr", type=bool, default=None, help=("Send log output to stderr (colorized if possible). " "By default use stderr if --log_file_prefix is not set and " "no other logging is configured.")) options.define("log_file_prefix", type=str, default=None, metavar="PATH", help=("Path prefix for log files. " "Note that if you are running multiple tornado processes, " "log_file_prefix must be different for each of them (e.g. " "include the port number)")) options.define("log_file_max_size", type=int, default=100 * 1000 * 1000, help="max size of log files before rollover") options.define("log_file_num_backups", type=int, default=10, help="number of log files to keep") options.add_parse_callback(lambda: enable_pretty_logging(options))
def __init__(self, color=True, *args, **kwargs): logging.Formatter.__init__(self, *args, **kwargs) self._color = color and _stderr_supports_color() if self._color: # The curses module has some str/bytes confusion in # python3. Until version 3.2.3, most methods return # bytes, but only accept strings. In addition, we want to # output these strings with the logging module, which # works with unicode strings. The explicit calls to # unicode() below are harmless in python2 but will do the # right conversion in python 3. fg_color = (curses.tigetstr("setaf") or curses.tigetstr("setf") or "") if (3, 0) < sys.version_info < (3, 2, 3): fg_color = unicode_type(fg_color, "ascii") self._colors = { logging.DEBUG: unicode_type(curses.tparm(fg_color, 4), # Blue "ascii"), logging.INFO: unicode_type(curses.tparm(fg_color, 2), # Green "ascii"), logging.WARNING: unicode_type(curses.tparm(fg_color, 3), # Yellow "ascii"), logging.ERROR: unicode_type(curses.tparm(fg_color, 1), # Red "ascii"), } self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
def enable_pretty_logging(options=None, logger=None): """Turns on formatted logging output as configured. This is called automaticaly by `tornado.options.parse_command_line` and `tornado.options.parse_config_file`. """ if options is None: from tornado.options import options if options.logging == 'none': return if logger is None: logger = logging.getLogger() logger.setLevel(getattr(logging, options.logging.upper())) if options.log_file_prefix: channel = logging.handlers.RotatingFileHandler( filename=options.log_file_prefix, maxBytes=options.log_file_max_size, backupCount=options.log_file_num_backups) channel.setFormatter(LogFormatter(color=False)) logger.addHandler(channel) if (options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers)): # Set up color if we are in a tty and curses is installed channel = logging.StreamHandler() channel.setFormatter(LogFormatter()) logger.addHandler(channel)
def setup_logging(): """Setup logging for the application based on the Tornado options. """ for name, logger in loggers.items(): logger.setLevel(LOGGING_MAPPING.get(options.logging, logging.DEBUG)) handler = logging.FileHandler( getattr(options, '{}_log_file_path'.format(name)) ) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler)
def get_app(self): options.access_log_file_path = 'test.access.log' options.application_log_file_path = 'test.application.log' options.logging = 'debug' log.setup_logging() return application.TailSocketApplication()
def setup_logging(self): if options.debug and options.logging == 'info': options.logging = 'debug' enable_pretty_logging() else: logging.getLogger('tornado.access').addHandler( logging.NullHandler()) logging.getLogger('tornado.access').propagate = False