我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用oslo_log.log.getLogger()。
def get_process(proc_name): """Get process given string in process cmd line. """ LOG = log.getLogger(__name__) proc = None try: for pr in psutil.process_iter(): for args in pr.cmdline(): if proc_name in args.split(" "): proc = pr return proc except BaseException: # pass LOG.error("Error fetching {%s} process..." % proc_name) return None
def stop_spark_submit_process(): """Stop spark submit program.""" LOG = log.getLogger(__name__) try: # get the driver proc pr = get_process(SPARK_SUBMIT_PROC_NAME) if pr: # terminate (SIGTERM) spark driver proc for cpr in pr.children(recursive=False): LOG.info("Terminate child pid {%s} ..." % str(cpr.pid)) cpr.terminate() # terminate spark submit proc LOG.info("Terminate pid {%s} ..." % str(pr.pid)) pr.terminate() except Exception as e: LOG.error("Error killing spark submit " "process: got exception: {%s}" % str(e))
def error_trap(app_name): """Decorator trapping any error during application boot time. :param app_name: Application name :type app_name: str :return: _wrapper function """ @six.wraps(error_trap) def _wrapper(func): @six.wraps(_wrapper) def _inner_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception: logger = log.getLogger(__name__) logger.exception( 'Failed to load application: \'{}\''.format(app_name)) raise return _inner_wrapper return _wrapper
def setUp(self): super(ShellCommandTest, self).setUp() def get_auth_endpoint(bound_self, args): return ('test', {}) self.useFixture(fixtures.MonkeyPatch( 'karborclient.shell.KarborShell._get_endpoint_and_kwargs', get_auth_endpoint)) self.client = mock.MagicMock() # To prevent log descriptors from being closed during # shell tests set a custom StreamHandler self.logger = log.getLogger(None).logger self.logger.level = logging.DEBUG self.color_handler = handlers.ColorHandler(sys.stdout) self.logger.addHandler(self.color_handler)
def main(): config.parse_args(sys.argv) logging.setup(CONF, "masakari") log = logging.getLogger(__name__) objects.register_all() launcher = service.process_launcher() started = 0 try: server = service.WSGIService("masakari_api", use_ssl=CONF.use_ssl) launcher.launch_service(server, workers=server.workers or 1) started += 1 except exception.PasteAppNotFound as ex: log.warning("%s. ``enabled_apis`` includes bad values. " "Fix to remove this warning.", six.text_type(ex)) if started == 0: log.error('No APIs were started. ' 'Check the enabled_apis config option.') sys.exit(1) launcher.wait()
def __init__(self, virtapi, read_only=False): super(IronicDriver, self).__init__(virtapi) global ironic if ironic is None: ironic = importutils.import_module('ironicclient') # NOTE(deva): work around a lack of symbols in the current version. if not hasattr(ironic, 'exc'): ironic.exc = importutils.import_module('ironicclient.exc') if not hasattr(ironic, 'client'): ironic.client = importutils.import_module( 'ironicclient.client') self.firewall_driver = firewall.load_driver( default='nova.virt.firewall.NoopFirewallDriver') self.node_cache = {} self.node_cache_time = 0 ironicclient_log_level = CONF.ironic.client_log_level if ironicclient_log_level: level = py_logging.getLevelName(ironicclient_log_level) logger = py_logging.getLogger('ironicclient') logger.setLevel(level) self.ironicclient = client_wrapper.IronicClientWrapper()
def test_default_logging(self): stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() # there should be a null handler as well at DEBUG self.assertEqual(2, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertNotIn("at debug", stdlog.logger.output) # broken debug messages should still explode, even though we # aren't logging them in the regular handler self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo") # and, ensure that one of the terrible log messages isn't # output at info warn_log = logging.getLogger('migrate.versioning.api') warn_log.info("warn_log at info, should be skipped") warn_log.error("warn_log at error") self.assertIn("warn_log at error", stdlog.logger.output) self.assertNotIn("warn_log at info", stdlog.logger.output)
def shutdown_all_threads_and_die(): """Shut down all threads and exit process. Hit it with a hammer to kill all threads and die. """ LOG = log.getLogger(__name__) LOG.info('Monasca Transform service stopping...') os._exit(1)
def __init__(self, display=None): setup_log() self.log = logging.getLogger(__name__) self.node = None self.opts = {} # NOTE(pas-ha) this method is required for Ansible>=2.4 # TODO(pas-ha) rewrite to support defining callback plugin options # in ansible.cfg after we require Ansible >=2.4
def setUpClass(cls): cls.LOG = logging.getLogger(cls._get_full_case_name()) super(BaseTestCase, cls).setUpClass()
def _load_config(): # Don't load in global context, since we can't assume # these modules are accessible when distutils uses # this module from six.moves import configparser from oslo_config import cfg from oslo_log import log as logging global loaded, MONITORS_VENDOR, MONITORS_PRODUCT, MONITORS_PACKAGE if loaded: return loaded = True cfgfile = cfg.CONF.find_file("release") if cfgfile is None: return try: cfg = configparser.RawConfigParser() cfg.read(cfgfile) if cfg.has_option("Masakarimonitors", "vendor"): MONITORS_VENDOR = cfg.get("Masakarimonitors", "vendor") if cfg.has_option("Masakarimonitors", "product"): MONITORS_PRODUCT = cfg.get("Masakarimonitors", "product") if cfg.has_option("Masakarimonitors", "package"): MONITORS_PACKAGE = cfg.get("Masakarimonitors", "package") except Exception as ex: LOG = logging.getLogger(__name__) LOG.error("Failed to load %(cfgfile)s: %(ex)s", {'cfgfile': cfgfile, 'ex': ex})
def _handle_mp_log_events(self, p, mp_log_q): while True: try: record = mp_log_q.get(timeout=1) if record is None: break logger = logging.getLogger(record.name).logger logger.handle(record) except queue.Empty: if not p.is_alive(): break
def _setup_task_process(mp_log_q): # Setting up logging and cfg, needed since this is a new process cfg.CONF(sys.argv[1:], project='coriolis', version="1.0.0") utils.setup_logging() # Log events need to be handled in the parent process log_root = logging.getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) log_root.addHandler(handlers.QueueHandler(mp_log_q))
def __init__(self): self.logger = logging.getLogger(__name__) # Authentication
def __init__(self): self.logger = logging.getLogger(__name__)
def __init__(self): if not (os.path.exists(const.CONFIG_PATH)): set_default_for_default_log_levels() logging.setup(CONF, 'armada') self.logger = logging.getLogger(__name__)
def __init__(self, name, conf, threads=1000): os.umask(0o27) # ensure files are created with the correct privileges self._logger = logging.getLogger("eventlet.wsgi.server") self._wsgi_logger = WritableLogger(self._logger) self.name = name self.threads = threads self.children = set() self.stale_children = set() self.running = True self.pgid = os.getpid() self.conf = conf try: os.setpgid(self.pgid, self.pgid) except OSError: self.pgid = 0
def load_paste_app(app_name=None): """Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file. :param app_name: name of the application to load :raises RuntimeError when config file cannot be located or application cannot be loaded from config file """ if app_name is None: app_name = cfg.CONF.prog conf_file = _get_deployment_config_file() if conf_file is None: raise RuntimeError(_("Unable to locate config file [%s]") % cfg.CONF.paste_deploy['api_paste_config']) try: app = wsgi.paste_deploy_app(conf_file, app_name, cfg.CONF) # Log the options used when starting if we're in debug mode... if cfg.CONF.debug: cfg.CONF.log_opt_values(logging.getLogger(app_name), sys_logging.DEBUG) return app except (LookupError, ImportError) as e: raise RuntimeError(_("Unable to load %(app_name)s from " "configuration file %(conf_file)s." "\nGot: %(e)r") % {'app_name': app_name, 'conf_file': conf_file, 'e': e})
def setup_logging(name, level): logging.setup(CONF, name) LOG = logging.getLogger(None) if level == 'INFO': LOG.logger.setLevel(logging.INFO) if level == 'DEBUG': LOG.logger.setLevel(logging.DEBUG) if level == 'WARNING': LOG.logger.setLevel(logging.WARNING) if level == 'ERROR': LOG.logger.setLevel(logging.ERROR)
def _setup_logging(self, debug): # Output the logs to command-line interface color_handler = handlers.ColorHandler(sys.stdout) logger_root = logging.getLogger(None).logger logger_root.level = logging.DEBUG if debug else logging.WARNING logger_root.addHandler(color_handler) # Set the logger level of special library logging.getLogger('iso8601') \ .logger.setLevel(logging.WARNING) logging.getLogger('urllib3.connectionpool') \ .logger.setLevel(logging.WARNING)
def get_logger(name): return logging.getLogger(name)
def main(manager='rock.extension_manager.ExtensionManager'): utils.register_all_options() utils.prepare_log(service_name='rock-mon') log = logging.getLogger(__name__) log.info('Start rock monitor.') mgr_class = importutils.import_class(manager) file_path = os.path.abspath(__file__) file_dir = os.path.dirname(file_path) ext_mgr = mgr_class(file_dir + '/extensions') ext_mgr.start_collect_data()
def main(manager='rock.rules.rule_manager.RuleManager'): utils.register_all_options() utils.prepare_log(service_name='rock-engine') log = logging.getLogger(__name__) log.info('Start rock engine') mgr_class = importutils.import_class(manager) mgr = mgr_class('/etc/rock/cases') from rock.tasks.check_and_run import check_and_run check_and_run() mgr.after_start()
def set_json_logger(): json_logger = syslog.getLogger(__name__ + '_json_log') json_logger.setLevel(syslog.DEBUG) if not json_logger.handlers: log_dir = cfg.CONF.log_dir if cfg.CONF.log_dir \ else '/var/log/neutron' log_file = log_dir + '/json.output' fh = syslog.handlers.WatchedFileHandler(log_file) formatter = syslog.Formatter( '%(asctime)s.%(msecs)d %(process)d %(message)s', '%Y-%m-%d %H:%M:%S') fh.setFormatter(formatter) json_logger.addHandler(fh) return json_logger
def _load_config(): # Don't load in global context, since we can't assume # these modules are accessible when distutils uses # this module from six.moves import configparser from oslo_config import cfg from oslo_log import log as logging global loaded, MASAKARI_VENDOR, MASAKARI_PRODUCT, MASAKARI_PACKAGE if loaded: return loaded = True cfgfile = cfg.CONF.find_file("release") if cfgfile is None: return try: cfg = configparser.RawConfigParser() cfg.read(cfgfile) if cfg.has_option("Masakari", "vendor"): MASAKARI_VENDOR = cfg.get("Masakari", "vendor") if cfg.has_option("Masakari", "product"): MASAKARI_PRODUCT = cfg.get("Masakari", "product") if cfg.has_option("Masakari", "package"): MASAKARI_PACKAGE = cfg.get("Masakari", "package") except Exception as ex: LOG = logging.getLogger(__name__) LOG.error("Failed to load %(cfgfile)s: %(ex)s", {'cfgfile': cfgfile, 'ex': ex})
def __init__(self, logger_name): self._logger_name = logger_name self._snatch_handler = SnatchHandler() self._logger = oslo_logging.getLogger(self._logger_name) self._previous_level = self._logger.logger.getEffectiveLevel()
def dump_log(self, name): log = logging.getLogger(name) if not self.log_file or not os.path.exists(self.log_file): return with open(self.log_file, 'r') as fptr: for line in fptr: log.info(line.strip())
def __init__(self, threads=1000, initialize_glance_store=False): os.umask(0o27) # ensure files are created with the correct privileges self._logger = logging.getLogger("eventlet.wsgi.server") self.threads = threads self.children = set() self.stale_children = set() self.running = True self.initialize_glance_store = initialize_glance_store self.pgid = os.getpid() try: os.setpgid(self.pgid, self.pgid) except OSError: self.pgid = 0
def load_paste_app(app_name, flavor=None, conf_file=None): """Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file, if conf_file is None. :param app_name: name of the application to load :param flavor: name of the variant of the application to load :param conf_file: path to the paste config file :raises: RuntimeError when config file cannot be located or application cannot be loaded from config file """ # append the deployment flavor to the application name, # in order to identify the appropriate paste pipeline app_name += _get_deployment_flavor(flavor) if not conf_file: conf_file = _get_deployment_config_file() logger = logging.getLogger(__name__) try: logger.debug("Loading %(app_name)s from %(conf_file)s", {'conf_file': conf_file, 'app_name': app_name}) app = deploy.loadapp("config:%s" % conf_file, name=app_name) # Log the options used when starting if we're in debug mode... if CONF.debug: CONF.log_opt_values(logger, logging.DEBUG) return app except (LookupError, ImportError) as e: msg = (_("Unable to load %(app_name)s from " "configuration file %(conf_file)s." "\nGot: %(e)r") % {'app_name': app_name, 'conf_file': conf_file, 'e': e}) logger.error(msg) raise RuntimeError(msg)
def test_issue_request_exceed_maximum_redirects(self): logger = logging.getLogger( 'fortiosclient.request') with mock.patch.object(logger, 'info') as mock_log: (mysock, myresponse, myconn) = self.prep_issue_request() self.req.start() self.assertIsNone(self.req.join()) self.assertTrue(self.client.acquire_connection.called) for args, kwargs in mock_log.call_args_list: if "Maximum redirects exceeded" not in args: continue self.assertTrue("Maximum redirects exceeded" in args) self.assertTrue(mock_log.called)
def intercept_log_messages(): try: mylog = logging.getLogger('nova') stream = cStringIO() handler = logging.logging.StreamHandler(stream) handler.setFormatter(formatters.ContextFormatter()) mylog.logger.addHandler(handler) yield stream finally: mylog.logger.removeHandler(handler)
def __init__(self, etcd_client, name, election_path, work_time, recovery_time=5, multi_worker_ok=False): """Participant in a leader election via etcd datastore. etcd_client: the client handle for dealing with etcd name: the category name - we elect one leader of this type election_path: the location where we conduct elections in etcd work_time: the typical time the leader spends doing work. It remains elected for this long without conducting another election. recovery_time: the time, after we're certain the leader has stopped doing work, that is the longest we want to wait before someone else takes over if the leader has died (Note that this means you can be without a working leader for work_time + recovery_time if the leader crashes just after winning the election) multi_worker_ok: True if you'd prefer to favour having at least one elected leader over having no more than one elected leader. Typically this will cause a second leader to start working even if the original still believes it's elected, and is useful if that's more likely to reduce pauses. """ self.etcd_client = etcd_client self.name = name # A unique value that identifies each worker thread self.thread_id = str(uuid.uuid4()) # Sleeping threads wake up after this time and # check if a master is alive and one of them will become the master # if the current master key has expired self.recovery_time = recovery_time # Threads hold the lock for this lng because this is the most # work they will do. self.work_time = work_time self.master_key = election_path + "/master_%s" % self.name # We recommend you configure these log levels # etcd_log = logging.getLogger('etcd.client') # etcd_log.setLevel(logging.logging.WARNING) # LOG.setLevel(logging.logging.INFO) global elector_cleanup elector_cleanup.append(self)
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128, use_ssl=False, max_url_len=None): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :param backlog: Maximum number of queued connections. :param max_url_len: Maximum length of permitted URLs. :returns: None :raises: masakari.exception.InvalidInput """ # Allow operators to customize http requests max header line size. eventlet.wsgi.MAX_HEADER_LINE = CONF.wsgi.max_header_line self.name = name self.app = app self._server = None self._protocol = protocol self.pool_size = pool_size or self.default_pool_size self._pool = eventlet.GreenPool(self.pool_size) self._logger = logging.getLogger("masakari.%s.wsgi.server" % self.name) self._use_ssl = use_ssl self._max_url_len = max_url_len self.client_socket_timeout = CONF.wsgi.client_socket_timeout or None if backlog < 1: raise exception.InvalidInput( reason=_('The backlog must be more than 0')) bind_addr = (host, port) try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET try: self._socket = eventlet.listen(bind_addr, family, backlog=backlog) except EnvironmentError: LOG.error("Could not bind to %(host)s:%(port)d", {'host': host, 'port': port}) raise (self.host, self.port) = self._socket.getsockname()[0:2] LOG.info("%(name)s listening on %(host)s:%(port)d", {'name': self.name, 'host': self.host, 'port': self.port})
def main(): config.parse_args(sys.argv) logging.setup(CONF, "nova") LOG = logging.getLogger('nova.all') utils.monkey_patch() objects.register_all() launcher = service.process_launcher() # nova-api for api in CONF.enabled_apis: try: should_use_ssl = api in CONF.enabled_ssl_apis server = service.WSGIService(api, use_ssl=should_use_ssl) launcher.launch_service(server, workers=server.workers or 1) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s-api'), api) for mod in [xvp_proxy]: try: launcher.launch_service(mod.get_wsgi_server()) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s'), mod.__name__) for binary in ['nova-compute', 'nova-network', 'nova-scheduler', 'nova-cert', 'nova-conductor']: # FIXME(sirp): Most service configs are defined in nova/service.py, but # conductor has set a new precedent of storing these configs # nova/<service>/api.py. # # We should update the existing services to use this new approach so we # don't have to treat conductor differently here. if binary == 'nova-conductor': topic = CONF.conductor.topic manager = CONF.conductor.manager else: topic = None manager = None try: launcher.launch_service(service.Service.create(binary=binary, topic=topic, manager=manager)) except (Exception, SystemExit): LOG.exception(_LE('Failed to load %s'), binary) launcher.wait()
def main(): """Parse environment and arguments and call the appropriate action.""" config.parse_args(sys.argv, default_config_files=jsonutils.loads(os.environ['CONFIG_FILE'])) logging.setup(CONF, "nova") global LOG LOG = logging.getLogger('nova.dhcpbridge') if CONF.action.name == 'old': # NOTE(sdague): old is the most frequent message sent, and # it's a noop. We should just exit immediately otherwise we # can stack up a bunch of requests in dnsmasq. A SIGHUP seems # to dump this list, so actions queued up get lost. return objects.register_all() if not CONF.conductor.use_local: block_db_access() objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() else: LOG.warning(_LW('Conductor local mode is deprecated and will ' 'be removed in a subsequent release')) if CONF.action.name in ['add', 'del']: LOG.debug("Called '%(action)s' for mac '%(mac)s' with IP '%(ip)s'", {"action": CONF.action.name, "mac": CONF.action.mac, "ip": CONF.action.ip}) CONF.action.func(CONF.action.mac, CONF.action.ip) else: try: network_id = int(os.environ.get('NETWORK_ID')) except TypeError: LOG.error(_LE("Environment variable 'NETWORK_ID' must be set.")) return(1) print(init_leases(network_id)) rpc.cleanup()
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, protocol=eventlet.wsgi.HttpProtocol, backlog=128, use_ssl=False, max_url_len=None): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. :param app: The WSGI application to serve. :param host: IP address to serve the application. :param port: Port number to server the application. :param pool_size: Maximum number of eventlets to spawn concurrently. :param backlog: Maximum number of queued connections. :param max_url_len: Maximum length of permitted URLs. :returns: None :raises: nova.exception.InvalidInput """ # Allow operators to customize http requests max header line size. eventlet.wsgi.MAX_HEADER_LINE = CONF.wsgi.max_header_line self.name = name self.app = app self._server = None self._protocol = protocol self.pool_size = pool_size or self.default_pool_size self._pool = eventlet.GreenPool(self.pool_size) self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name) self._use_ssl = use_ssl self._max_url_len = max_url_len self.client_socket_timeout = CONF.wsgi.client_socket_timeout or None if backlog < 1: raise exception.InvalidInput( reason=_('The backlog must be more than 0')) bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: family = socket.AF_INET try: self._socket = eventlet.listen(bind_addr, family, backlog=backlog) except EnvironmentError: LOG.error(_LE("Could not bind to %(host)s:%(port)s"), {'host': host, 'port': port}) raise (self.host, self.port) = self._socket.getsockname()[0:2] LOG.info(_LI("%(name)s listening on %(host)s:%(port)s"), {'name': self.name, 'host': self.host, 'port': self.port})