我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging._levelNames()。
def log( cls, level, message, caller = None ): if not cls.logger: cls.instantiate( logLevel = app.config['LEVELOFLOG'] ) try: if level not in logging._levelNames: cls.log( "ERROR", 'Invalid file level \'%s\''%( level ) ) logLevel = logging._levelNames[level] if not caller: callers = Log.getCallers( inspect.stack() ) else: callers = caller message = '%s.%s - %s'%( callers[0], callers[1] , message ) cls.logger.log( logLevel, message ) except Exception, e: print 'Unable to record the log. Error: %s'%( e )
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) xmldata = """ <project name="testproj" code="01"> <site name="sitea" type="primary" location="testlab"> <filer name="filer1" type="filer"> <vfiler name="vftest01"> <aggregate type="root" name="aggr0"/> </vfiler> </filer> </site> </project> """ node = etree.fromstring(xmldata) self.project = Project() self.project.configure_from_node(node, self.defaults, None) self.sitea = self.project.get_sites()[0] self.filer1 = self.sitea.get_filers()[0] self.vfiler1 = self.filer1.get_vfilers()[0]
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) xmldata = """ <project name="testproj" code="01"> <site name="sitea" type="primary" location="testlab"> <filer name="filer1" type="filer"> <vfiler name="vftest01"> <aggregate type="root" name="aggr0"/> <aggregate name="aggr01"> <volume name="blah"> </volume> </aggregate> </vfiler> </filer> </site> </project> """ node = etree.fromstring(xmldata) self.project = Project() self.project.configure_from_node(node, self.defaults, None)
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) xmldata = """ <site name="primary" type="prod" location="testlab"> <vlan number="1453" type="storage"/> <vlan number="100" type="storage"/> <vlan number="300" type="storage"/> <host name="fred" platform="intel" operatingsystem="linux"/> </site> """ node = etree.fromstring(xmldata) self.site = Site() self.site.configure_from_node(node, self.defaults, None) self.host = self.site.get_hosts()[0]
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) xmldata = """ <project name="demo" code="3"> <site name="one" type="primary" location="somewhere"> <vlan number="3001" type="project"/> <filer name="testfiler1" type="filer"> </filer> </site> </project> """ node = etree.fromstring(xmldata) self.project = Project() self.project.configure_from_node(node, self.defaults, None) self.site = self.project.get_sites()[0] self.filer = self.site.get_filers()[0]
def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(logging._levelNames[level]) if not incremental: #Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters)
def __call__(self, parser, namespace, values, option_string=None): values = values.split(':') level, logger = values if len(values) > 1 else (values[0], self.main_logger) logger = logging.getLogger(logger) try: logger.setLevel(logging._levelNames[level.upper()]) except KeyError: msg = "invalid level choice: %s (choose from %s)" % \ (level, parser.log_levels) raise argparse.ArgumentError(self, msg) super(LoggingAction, self).__call__(parser, namespace, values, option_string)
def _install_handlers(cp, formatters): """Install and return handlers""" hlist = cp.get("handlers", "keys") if not len(hlist): return {} hlist = hlist.split(",") hlist = _strip_spaces(hlist) handlers = {} fixups = [] #for inter-handler references for hand in hlist: sectname = "handler_%s" % hand klass = cp.get(sectname, "class") opts = cp.options(sectname) if "formatter" in opts: fmt = cp.get(sectname, "formatter") else: fmt = "" try: klass = eval(klass, vars(logging)) except (AttributeError, NameError): klass = _resolve(klass) args = cp.get(sectname, "args") args = eval(args, vars(logging)) h = klass(*args) if "level" in opts: level = cp.get(sectname, "level") h.setLevel(logging._levelNames[level]) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): if "target" in opts: target = cp.get(sectname,"target") else: target = "" if len(target): #the target handler may not be loaded yet, so keep for later... fixups.append((h, target)) handlers[hand] = h #now all handlers are loaded, fixup inter-handler references... for h, t in fixups: h.setTarget(handlers[t]) return handlers
def logging_level(self, value): if value is None: value = self._default_logging_level if isinstance(value, (bytes, unicode)): try: level = _levelNames[value.upper()] except KeyError: raise ValueError('Unrecognized logging level: {}'.format(value)) else: try: level = int(value) except ValueError: raise ValueError('Unrecognized logging level: {}'.format(value)) self._logger.setLevel(level)
def loglevel(raw): raw = raw.upper() if raw not in logging._levelNames: raise ValueError('unkown log level') return raw
def _configure_loggers(self, config): loggers_config = config.logging.loggers logfile = config.logging.filename logger_dict = copy.deepcopy(LOGGER_CONFIG_TEMPLATE) if logfile: # set filename on file handler logger_dict['handlers']['file']['filename'] = logfile logfile_dir = os.path.dirname(logfile) if not os.path.exists(logfile_dir): os.makedirs(logfile_dir) self._log_file = logfile else: del logger_dict['handlers']['file'] # add handlers to all loggers loggers = {} for logger_name in loggers_config: loggers[logger_name] = dict(handlers=list(logger_dict['handlers'].keys())) self._all_loggers_names.append(logger_name) logger_dict['loggers'] = loggers # set level for all loggers for logger_name, logging_level in loggers_config.iteritems(): log = logging.getLogger(logger_name) level = logging._levelNames[logging_level.upper()] log.setLevel(level) dictconfig.dictConfig(logger_dict)
def __getattr__(self, attribute): if attribute.upper() in logging._levelNames: return partial(self._logger_with_task_id, _level=attribute) else: return getattr(self._logger, attribute)
def instantiate( cls, streamType = "SCREEN", logLevel = "INFO" ): try: logging.VERBOSE = 5 logging.addLevelName(logging.VERBOSE, "VERBOSE") logging.Logger.verbose = lambda inst, msg, *args, **kwargs: inst.log(logging.VERBOSE, msg, *args, **kwargs) logging.verbose = lambda msg, *args, **kwargs: logging.log(logging.VERBOSE, msg, *args, **kwargs) cls.logger = logging.getLogger() if logLevel not in logging._levelNames: raise Exception( 'Invalid file level' ) cls.logger.setLevel( logging._levelNames[logLevel] ) streamType = app.config['STREAMTYPE'] if streamType == "SCREEN": stream = logging.StreamHandler() else: stream = logging.FileHandler( app.config['LOGFILE'] ) formatter = logging.Formatter( '[%(levelname)-7s - %(asctime)s] %(message)s' ) stream.setFormatter( formatter ) cls.logger.addHandler( stream ) except Exception, e: print( 'Unable to get/set log configurations. Error: %s'%( e ) ) cls.logger = None ## # Records a message in a file and/or displays it in the screen. # @param level - String containing the name of the log message. # @param message - String containing the message to be recorded. #
def format(self, record): # Save the original format configured by the user # when the logger formatter was instantiated format_orig = self._fmt # Replace the original format with one customized by logging level if record.levelno == logging.DEBUG: self._fmt = MyFormatter.timestamp + " " + MyFormatter.debug_char + " " + MyFormatter.message elif record.levelno == logging.INFO: self._fmt = MyFormatter.timestamp + " " + MyFormatter.info_char + " " + MyFormatter.message elif record.levelno == logging._levelNames["SUCCESS"]: self._fmt = MyFormatter.timestamp + " " + MyFormatter.success_char + " " + MyFormatter.message elif record.levelno == logging._levelNames["START"]: self._fmt = MyFormatter.timestamp + " " + MyFormatter.start_char + " " + MyFormatter.message elif record.levelno == logging.WARNING: self._fmt = MyFormatter.timestamp + " " + MyFormatter.warning_char + " " + MyFormatter.message elif record.levelno == logging.ERROR: self._fmt = MyFormatter.timestamp + " " + MyFormatter.error_char + " " + MyFormatter.message # Call the original formatter class to do the grunt work result = logging.Formatter.format(self, record) # Restore the original format configured by the user self._fmt = format_orig return result # -----------------------------------------------------------------
def log_level(self, value): """ Sets the verbosity of ``pwntools`` logging mechanism. More specifically it controls the filtering of messages that happens inside the handler for logging to the screen. So if you want e.g. log all messages to a file, then this attribute makes no difference to you. Valid values are specified by the standard Python ``logging`` module. Default value is set to ``INFO``. Examples: >>> context.log_level = 'error' >>> context.log_level == logging.ERROR True >>> context.log_level = 10 >>> context.log_level = 'foobar' #doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: log_level must be an integer or one of ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'NOTSET', 'WARN', 'WARNING'] """ # If it can be converted into an int, success try: return int(value) except ValueError: pass # If it is defined in the logging module, success try: return getattr(logging, value.upper()) except AttributeError: pass # Otherwise, fail level_names = filter(lambda x: isinstance(x,str), logging._levelNames) permitted = sorted(level_names) raise AttributeError('log_level must be an integer or one of %r' % permitted)
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF)
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) xmldata = """<project name="testproj1" code="01" title="Test Project 1"/> """ self.project = Project()
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) self.defaults.get('global', 'dns_domain_name') if len(configfiles) == 0: raise ValueError("Cannot load configuration file: %s" % optparser.options.configfile) self.sitea = Site() self.sitea.name = 'sitea' self.sitea.type = 'primary' self.sitea.location = 'testlab'
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) xmldata = """ <project name="testproj" code="01"> <site name="sitea" type="primary" location="testlab"> <filer name="filer1" type="filer"> <vfiler name="vftest01"> <aggregate type="root" name="aggr0"/> <aggregate name="aggr01"/> </vfiler> </filer> </site> </project> """ node = etree.fromstring(xmldata) self.project = Project() self.project.configure_from_node(node, self.defaults, None) self.sitea = self.project.get_sites()[0] self.filer1 = self.sitea.get_filers()[0] self.vfiler1 = self.filer1.get_vfilers()[0] self.aggr1 = self.vfiler1.get_aggregates()[0]
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) self.site = Site() self.site.name = 'sitea' self.site.type = 'primary' self.site.location = 'testlab' pass
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) xmldata = """ <project name="testproj" code="01"> <site name="sitea" type="primary" location="testlab"> <host name="sitea_host01" operatingsystem="linux" /> <host name="sitea_host02" operatingsystem="windows" /> <host name="sitea_host03" operatingsystem="solaris" /> <vlan type="project" number="3001"> <network number="10.20.30.1/26" gateway="10.20.30.254"/> </vlan> <filer name="filer1" type="filer"> <vfiler name="vftest01" rootaggr="aggr0"> <ipaddress type="primary" ip="10.20.30.1"/> <aggregate name="aggr01"/> </vfiler> </filer> </site> </project> """ node = etree.fromstring(xmldata) self.project = Project() self.project.configure_from_node(node, self.defaults, None) self.sitea = self.project.get_sites()[0] self.filer1 = self.sitea.get_filers()[0] self.vfiler1 = self.filer1.get_vfilers()[0] self.aggr1 = self.vfiler1.get_aggregates()[0]
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) xmldata = """ <project name="testproj" code="01"> </project> """ node = etree.fromstring(xmldata) self.project = Project() self.project.configure_from_node(node, self.defaults, None)
def setUp(self): """ Prepare for a configuration parse test """ optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) self.project = Project()
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) self.outfile = StringIO()
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) self.defaults.get('global', 'dns_domain_name') xmldata = """ <project name="test" code="qtree"> <site name="sitea" type="primary" location="testlab"> <filer name="testfiler1" type="filer"> <vfiler name="vfiler01" rootaggr="aggr0"> <aggregate name="aggr01"> <volume name="testvol1"/> </aggregate> </vfiler> </filer> </site> </project> """ node = etree.fromstring(xmldata) self.proj = Project() self.proj.configure_from_node(node, self.defaults, None) self.volume = self.proj.get_volumes()[0]
def setUp(self): optparser = BaseOptions() optparser.parseOptions(['dummyfile.xml', '--debug=%s' % logging._levelNames[log.level].lower()]) self.defaults = RawConfigParser() configfiles = self.defaults.read(TESTCONF) self.site = Site() self.site.name = "testsite" self.site.type = "primary" self.site.locaion = "testlab"
def _install_handlers(cp, formatters): """Install and return handlers""" hlist = cp["handlers"]["keys"] if not len(hlist): return {} hlist = hlist.split(",") hlist = _strip_spaces(hlist) handlers = {} fixups = [] #for inter-handler references for hand in hlist: section = cp["handler_%s" % hand] klass = section["class"] fmt = section.get("formatter", "") try: klass = eval(klass, vars(logging)) except (AttributeError, NameError): klass = _resolve(klass) args = section["args"] args = eval(args, vars(logging)) h = klass(*args) if "level" in section: level = section["level"] h.setLevel(logging._levelNames[level]) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): target = section.get("target", "") if len(target): #the target handler may not be loaded yet, so keep for later... fixups.append((h, target)) handlers[hand] = h #now all handlers are loaded, fixup inter-handler references... for h, t in fixups: h.setTarget(handlers[t]) return handlers
def main(): parser = argparse.ArgumentParser(description='Stream logs from rds for a set of db instances.') parser.add_argument('--db_instance_ids', '-d', nargs='+', type=str, required=True, help='list of db instance ids') parser.add_argument('--minutes_in_the_past_to_start', '-m', type=int, default=0, help=('if logs have not been written to since this many minutes ago, ' 'ignore them')) parser.add_argument('--api_call_delay_seconds', '-a', type=float, default=1.0, help='time to wait before each API call') parser.add_argument('--log_state_file', '-s', type=str, default='log_state.json', help='file path for recording the state of log streaming') parser.add_argument('--retention_days', '-r', type=int, default=7, help='number of days to retain log metadata') parser.add_argument('--log_level', '-l', type=str, default='INFO', choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'], help="log level for this script's logs") parser.add_argument('--log_filename', '-f', type=str, default='rds_tail_logs.log', help="log filename for this script's logs") parser.add_argument('--run_once', '-o', dest='run_once', action='store_true', help="stream all new logs from all db instances and then exit") parser.add_argument('--output_format', '-t', choices=['json', 'text'], default='json', help="output format") parser.add_argument('--aws_region_name', type=str, help="AWS region name") parser.add_argument('--aws_profile_name', default='default', help='AWS credentials profile name') args = parser.parse_args() os.environ['TZ'] = 'UTC' time.tzset() logging.basicConfig(filename=args.log_filename, level=logging._levelNames[args.log_level], format='%(asctime)s %(message)s') logging.info('Starting rds log streaming with args: %s', args) rds = RDS(args.api_call_delay_seconds, args.aws_region_name, args.aws_profile_name) rds_tail_logs = LogTail(args.log_state_file, args.db_instance_ids, args.minutes_in_the_past_to_start, rds, args.retention_days, args.run_once, args.output_format) rds_tail_logs.stream()
def set_log_level(level): """Set the log-level of the root logger of the logging module. Args: level: can be an integer such as 30 (logging.WARN), or a string such as 'WARN' """ if isinstance(level, str): level = logging._levelNames[level] logger = logging.getLogger() # gets root logger logger.setLevel(level)