我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.captureWarnings()。
def setuplogger(consolelevel, filename=None, filelevel=None): """ setup the python root logger to log to the console with defined log level. Optionally also log to file with the provided level """ if filelevel == None: filelevel = consolelevel if sys.version.startswith("2.7"): logging.captureWarnings(True) rootlogger = logging.getLogger() rootlogger.setLevel(min(consolelevel, filelevel)) formatter = logging.Formatter('%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') if filename != None: filehandler = logging.FileHandler(filename) filehandler.setLevel(filelevel) filehandler.setFormatter(formatter) rootlogger.addHandler(filehandler) consolehandler = logging.StreamHandler() consolehandler.setLevel(consolelevel) consolehandler.setFormatter(formatter) rootlogger.addHandler(consolehandler)
def run(): # pragma: no cover """Run Markdown from the command line.""" # Parse options and adjust logging level if necessary options, logging_level = parse_options() if not options: sys.exit(2) logger.setLevel(logging_level) console_handler = logging.StreamHandler() logger.addHandler(console_handler) if logging_level <= WARNING: # Ensure deprecation warnings get displayed warnings.filterwarnings('default') logging.captureWarnings(True) warn_logger = logging.getLogger('py.warnings') warn_logger.addHandler(console_handler) # Run markdown.markdownFromFile(**options)
def configure_logging(logging_config, logging_settings): if not sys.warnoptions: # Route warnings through python logging logging.captureWarnings(True) # RemovedInNextVersionWarning is a subclass of DeprecationWarning which # is hidden by default, hence we force the "default" behavior warnings.simplefilter("default", RemovedInNextVersionWarning) if logging_config: # First find the logging configuration function ... logging_config_func = import_string(logging_config) logging.config.dictConfig(DEFAULT_LOGGING) # ... then invoke it with the logging settings if logging_settings: logging_config_func(logging_settings)
def Logger(name, **kargs): """ Create and return logger """ path_dirs = PathDirs(**kargs) logging.captureWarnings(True) logger = logging.getLogger(name) logger.setLevel(logging.INFO) handler = logging.handlers.WatchedFileHandler(os.path.join( path_dirs.meta_dir, "vent.log")) handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s:%(lineno)-4d - ' '%(levelname)s - %(message)s') handler.setFormatter(formatter) if not len(logger.handlers): logger.addHandler(handler) return logger
def setup_logging(verbosity_level, save_debug_log): logging.captureWarnings(True) # if config['logging']['config_file']: # # Logging config from file must be read before other handlers are # # added. If not, the other handlers will have no effect. # try: # path = config['logging']['config_file'] # logging.config.fileConfig(path, disable_existing_loggers=False) # except Exception as e: # # Catch everything as logging does not specify what can go wrong. # logger.error('Loading logging config %r failed. %s', path, e) setup_console_logging(verbosity_level) if save_debug_log: print('Here we would call setup_debug_logging_to_file(config)') # setup_debug_logging_to_file(config) _delayed_handler.release()
def test_warnings(self): with warnings.catch_warnings(): logging.captureWarnings(True) try: warnings.filterwarnings("always", category=UserWarning) file = io.StringIO() h = logging.StreamHandler(file) logger = logging.getLogger("py.warnings") logger.addHandler(h) warnings.warn("I'm warning you...") logger.removeHandler(h) s = file.getvalue() h.close() self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0) #See if an explicit file uses the original implementation file = io.StringIO() warnings.showwarning("Explicit", UserWarning, "dummy.py", 42, file, "Dummy line") s = file.getvalue() file.close() self.assertEqual(s, "dummy.py:42: UserWarning: Explicit\n Dummy line\n") finally: logging.captureWarnings(False)
def tweet_search(log, item, limit=50): log.debug(" Searching twitter for %s", item) check_twitter_config() if len(item) > 500: log.error(" Search string too long") raise Exception("Search string too long: %d", len(item)) logging.captureWarnings(True) old_level = log.getEffectiveLevel() log.setLevel(logging.ERROR) twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) try: result = twitter.search(q=item, count=limit) except TwythonAuthError, e: twitter_auth_issue(e) raise except: raise log.setLevel(old_level) return result
def check_relationship(log, id): my_screen_name = get_screen_name(log) if my_screen_name == "Unknown": raise("Couldn't get my own screen name") log.debug(" Checking relationship of %s with me (%s)", id, my_screen_name) check_twitter_config() logging.captureWarnings(True) old_level = log.getEffectiveLevel() log.setLevel(logging.ERROR) twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) try: result = twitter.show_friendship(source_screen_name=my_screen_name, target_screen_name=id) except TwythonAuthError, e: log.setLevel(old_level) log.exception(" Problem trying to check relationship") twitter_auth_issue(e) raise except: raise log.setLevel(old_level) return result["relationship"]["source"]["following"], result["relationship"]["source"]["followed_by"]
def follow_twitter_user(log, id): log.debug(" Following %s", id) check_twitter_config() logging.captureWarnings(True) old_level = log.getEffectiveLevel() log.setLevel(logging.ERROR) twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) try: twitter.create_friendship(screen_name=id) except TwythonAuthError, e: log.setLevel(old_level) log.exception(" Problem trying to follow twitter user") twitter_auth_issue(e) raise except: raise log.setLevel(old_level)
def unfollow_twitter_user(log, id): log.debug(" Unfollowing %s", id) check_twitter_config() logging.captureWarnings(True) old_level = log.getEffectiveLevel() log.setLevel(logging.ERROR) twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) try: twitter.destroy_friendship(screen_name=id) except TwythonAuthError, e: log.setLevel(old_level) log.exception("Error unfollowing %s", id) twitter_auth_issue(e) raise except: log.exception("Error unfollowing %s", id) log.setLevel(old_level)
def get_screen_name(log): global MYSELF if not MYSELF or MYSELF == "Unknown": log.debug(" Getting current user screen name") check_twitter_config() logging.captureWarnings(True) old_level = log.getEffectiveLevel() log.setLevel(logging.ERROR) twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) try: details = twitter.verify_credentials() except TwythonAuthError, e: log.setLevel(old_level) log.exception(" Problem trying to get screen name") twitter_auth_issue(e) raise except: log.exception(" Problem trying to get screen name") details = None log.setLevel(old_level) name = "Unknown" if details: name = details["screen_name"] MYSELF = name return MYSELF
def test_warnings(self): with warnings.catch_warnings(): logging.captureWarnings(True) try: warnings.filterwarnings("always", category=UserWarning) file = cStringIO.StringIO() h = logging.StreamHandler(file) logger = logging.getLogger("py.warnings") logger.addHandler(h) warnings.warn("I'm warning you...") logger.removeHandler(h) s = file.getvalue() h.close() self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0) #See if an explicit file uses the original implementation file = cStringIO.StringIO() warnings.showwarning("Explicit", UserWarning, "dummy.py", 42, file, "Dummy line") s = file.getvalue() file.close() self.assertEqual(s, "dummy.py:42: UserWarning: Explicit\n Dummy line\n") finally: logging.captureWarnings(False)
def setup_logger(name=None, level=None, formatter_opts=None): """Sets up pretty logging using LogFormatter.""" if formatter_opts is None: formatter_opts = {} logging.captureWarnings(True) logger = logging.getLogger(name) if 'DEBUG' in os.environ: level = logging.DEBUG elif level is None: level = logging.INFO logger.setLevel(level) channel = logging.StreamHandler() formatter = LogFormatter(**formatter_opts) channel.setFormatter(formatter) logger.addHandler(channel) return logger
def _init_logger(debug=False, log_format=None): '''Initialize the logger :param debug: Whether to enable debug mode :return: An instantiated logging instance ''' LOG.handlers = [] log_level = logging.INFO if debug: log_level = logging.DEBUG if not log_format: # default log format log_format_string = constants.log_format_string else: log_format_string = log_format logging.captureWarnings(True) LOG.setLevel(log_level) handler = logging.StreamHandler(sys.stderr) handler.setFormatter(logging.Formatter(log_format_string)) LOG.addHandler(handler) LOG.debug("logging initialized")
def test_warnings(self): with warnings.catch_warnings(): logging.captureWarnings(True) self.addCleanup(logging.captureWarnings, False) warnings.filterwarnings("always", category=UserWarning) stream = io.StringIO() h = logging.StreamHandler(stream) logger = logging.getLogger("py.warnings") logger.addHandler(h) warnings.warn("I'm warning you...") logger.removeHandler(h) s = stream.getvalue() h.close() self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0) #See if an explicit file uses the original implementation a_file = io.StringIO() warnings.showwarning("Explicit", UserWarning, "dummy.py", 42, a_file, "Dummy line") s = a_file.getvalue() a_file.close() self.assertEqual(s, "dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def configure_logging(self): logging_config_file = os.path.join(self.config_directory, 'logging.conf') if os.path.isfile(logging_config_file): config.fileConfig(logging_config_file) else: logging.basicConfig() logging.captureWarnings(True)
def getStationNamesVersion(): ''' ?? station_names.js ?????????? ''' logging.captureWarnings(True) url = "https://kyfw.12306.cn/otn/" station_name_version = "" # ????? 0 , ???????????????? response = requests.get(url, verify=False) content = response.text.encode("UTF-8") soup = bs4.BeautifulSoup(content, "html.parser") scripts = soup.findAll("script") srcs = [] # ?? HTML ???? script ??? src ?? for i in scripts: try: # ???? try ????? script ????? src ???? src = i['src'] srcs.append(src) except: pass for i in srcs: # ??????????? , ??????????????? , ????????????? if "station_name" in i: # ???? station_names ??? src station_name_version = i.split("station_version=")[1] # ????? # print "??????????? :" , station_name_version # ???? return station_name_version
def test_warnings(self): with warnings.catch_warnings(): logging.captureWarnings(True) self.addCleanup(logging.captureWarnings, False) warnings.filterwarnings("always", category=UserWarning) stream = io.StringIO() h = logging.StreamHandler(stream) logger = logging.getLogger("py.warnings") logger.addHandler(h) warnings.warn("I'm warning you...") logger.removeHandler(h) s = stream.getvalue() h.close() self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0) #See if an explicit file uses the original implementation a_file = io.StringIO() warnings.showwarning("Explicit", UserWarning, "dummy.py", 42, a_file, "Dummy line") s = a_file.getvalue() a_file.close() self.assertEqual(s, "dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def configure_logging(loggingc, verbose=0, loglevel=3, logfile=None): if loggingc is None: loggingc = deepcopy(LOGGING_DEFAULT_CONFIG) if verbose > 3: verbose = 3 loggingc['handlers']['console']['level'] = LOGLEVELS[verbose] loggingc['handlers']['console_tqdm']['level'] = LOGLEVELS[verbose] if logfile is None or loglevel == 0: del loggingc['handlers']['file'] loggingc['loggers']['yam']['handlers'] = ['console_tqdm'] loggingc['loggers']['py.warnings']['handlers'] = ['console_tqdm'] else: loggingc['handlers']['file']['level'] = LOGLEVELS[loglevel] loggingc['handlers']['file']['filename'] = logfile logging.config.dictConfig(loggingc) logging.captureWarnings(loggingc.get('capture_warnings', False))
def configure_logging(full_format: bool=False, log_level: str=None) -> None: """ Setup logging to go to console and application log file If full_format is True, then use the terribly verbose format of the application log file also for the console. And log at the DEBUG level. Otherwise, you can choose the log level by passing one in. """ config = load_json('logging.json') if full_format: config["formatters"]["console"] = dict(config["formatters"]["file"]) config["handlers"]["console"]["level"] = logging.DEBUG elif log_level: config["handlers"]["console"]["level"] = log_level logging.config.dictConfig(config) # Ignored due to lack of stub in type checking library logging.captureWarnings(True) # type: ignore logger.info("Starting log for %s with ETL ID %s", package_version(), etl.monitor.Monitor.etl_id) logger.info('Command line: "%s"', ' '.join(sys.argv)) logger.debug("Current working directory: '%s'", os.getcwd()) logger.info(get_release_info())
def configure_standard_logging(verbosity: int, mode: LoggingMode): """Configure the standard library's `logging` module. Get `logging` working with options consistent with Twisted. NOTE CAREFULLY that `django.utils.log.DEFAULT_LOGGING` may have been applied (though only if installed and if configured in this environment). Those settings and the settings this function applies must be mentally combined to understand the resultant behaviour. :param verbosity: See `get_logging_level`. :param mode: The mode in which to configure logging. See `LoggingMode`. """ set_standard_verbosity(verbosity) # Make sure that `logging` is not configured to capture warnings. logging.captureWarnings(False) # If a logger is ever configured `propagate=False` but without handlers # `logging.Logger.callHandlers` will employ the `lastResort` handler in # order that the log is not lost. This goes to standard error by default. # Here we arrange for these situations to be logged more distinctively so # that they're easier to diagnose. logging.lastResort = ( logging.StreamHandler( twistedModern.LoggingFile( logger=twistedModern.Logger("lost+found"), level=twistedModern.LogLevel.error)))
def predict( predictor: str, verbose: bool, ): # Logging setup. logging.captureWarnings(True) if verbose: logging.root.setLevel(logging.DEBUG) # Read labels. labels = read_binary() labels = acton.proto.wrappers.LabelPool.deserialise(labels) # Write predictions. proto = acton.acton.predict(labels=labels, predictor=predictor) write_binary(proto.proto.SerializeToString()) # acton-recommend
def recommend( diversity: float, recommendation_count: int, recommender: str, verbose: bool, ): # Logging setup. logging.warning('Not implemented: diversity') logging.captureWarnings(True) if verbose: logging.root.setLevel(logging.DEBUG) # Read the predictions protobuf. predictions = read_binary() predictions = acton.proto.wrappers.Predictions.deserialise(predictions) # Write the recommendations protobuf. proto = acton.acton.recommend( predictions=predictions, recommender=recommender, n_recommendations=recommendation_count) write_binary(proto.proto.SerializeToString()) # acton-label
def test_benchmark(): """Benchmark reading/writing a file and output the time""" logging.captureWarnings(True) FINPUT = os.path.join(PWD, 'data', 'NSL_catalog_col.txt') FOUTPUT = "/tmp/eqclusterng-test-output.txt" import time t0 = time.time() t = BPTree.from_file(FINPUT) # Init tree with events from a file t.grow() # Populate B-P tree with events t.prune() # Prune given cutoff (calculate n if none) t.output2file(FOUTPUT) # Output to file to match MATLAB perf t1 = time.time() print " bench: {0}eq/{1:.6f}s ".format(len(t.P), t1-t0),
def configure_logging(logging_config, logging_settings): if not sys.warnoptions: # Route warnings through python logging logging.captureWarnings(True) # RemovedInNextVersionWarning is a subclass of DeprecationWarning which # is hidden by default, hence we force the "default" behavior warnings.simplefilter("default", RemovedInNextVersionWarning) if logging_config: # First find the logging configuration function ... logging_config_func = import_string(logging_config) dictConfig(DEFAULT_LOGGING) # ... then invoke it with the logging settings if logging_settings: logging_config_func(logging_settings)
def tearDown(self): # Be sure to reset the warning capture logging.captureWarnings(False) super(TestDaiquiri, self).tearDown()
def setup(level=logging.WARNING, outputs=[output.STDERR], program_name=None, capture_warnings=True): """Setup Python logging. This will setup basic handlers for Python logging. :param level: Root log level. :param outputs: Iterable of outputs to log to. :param program_name: The name of the program. Auto-detected if not set. :param capture_warnings: Capture warnings from the `warnings' module. """ root_logger = logging.getLogger(None) # Remove all handlers for handler in list(root_logger.handlers): root_logger.removeHandler(handler) # Add configured handlers for out in outputs: if isinstance(out, str): out = output.preconfigured.get(out) if out is None: raise RuntimeError("Output {} is not available".format(out)) out.add_to_logger(root_logger) root_logger.setLevel(level) program_logger = logging.getLogger(program_name) def logging_excepthook(exc_type, value, tb): program_logger.critical( "".join(traceback.format_exception(exc_type, value, tb))) sys.excepthook = logging_excepthook if capture_warnings: logging.captureWarnings(True)
def capture_logging(): # mostly warnings caused by self-signed certs logging.captureWarnings(True)
def main(): """Entry point""" logging.config.fileConfig(pkg_resources.resource_filename( 'functest', 'ci/logging.ini')) logging.captureWarnings(True) LOGGER.info('Starting Functest server') api_add_resource() init_db() APP.run(host='0.0.0.0')
def main(): """Entry point""" logging.config.fileConfig(pkg_resources.resource_filename( 'functest', 'ci/logging.ini')) logging.captureWarnings(True) deployment = CheckDeployment() return deployment.check_all()
def main(): """Entry point""" logging.config.fileConfig(pkg_resources.resource_filename( 'functest', 'ci/logging.ini')) logging.captureWarnings(True) parser = RunTestsParser() args = parser.parse_args(sys.argv[1:]) runner = Runner() return runner.main(**args).value
def main(): """ main entry point for script """ opts = getoptions() logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%dT%H:%M:%S', level=opts['log']) if sys.version.startswith("2.7"): logging.captureWarnings(True) config = Config(opts['config']) dwconfig = config.getsection("datawarehouse") dbif = DbHelper(dwconfig, 'modw_supremm.batchscripts') for resourcename, settings in config.resourceconfigs(): if opts['resource'] in (None, resourcename, str(settings['resource_id'])): logging.debug("Processing %s (id=%s)", resourcename, settings['resource_id']) if "script_dir" in settings: total = processfor(settings['resource_id'], settings['script_dir'], dbif, opts['deltadays']) logging.info("Processed %s files for %s", total, resourcename) else: logging.debug("Skip resource %s no script dir defined", resourcename) dbif.postinsert()
def main(): """ main entry point for script """ opts, args = getoptions() logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%dT%H:%M:%S', level=opts['log']) if sys.version.startswith("2.7"): logging.captureWarnings(True) preprocs = loadpreprocessors() plugins = loadplugins() if len(opts['plugin_whitelist']) > 0: preprocs, plugins = filter_plugins({"plugin_whitelist": opts['plugin_whitelist']}, preprocs, plugins) elif len(opts['plugin_blacklist']) > 0: preprocs, plugins = filter_plugins({"plugin_blacklist": opts['plugin_blacklist']}, preprocs, plugins) logging.debug("Loaded %s preprocessors", len(preprocs)) logging.debug("Loaded %s plugins", len(plugins)) archivelist = args job = MockJob(archivelist) config = Config(confpath=opts['config']) preprocessors = [x(job) for x in preprocs] analytics = [x(job) for x in plugins] s = Summarize(preprocessors, analytics, job, config) s.process() result = s.get() print json.dumps(result, indent=4)
def __init__(self, capture_warnings=True, use_default_kvp=True, json=False): self._config = deepcopy(DEFAULT_LOGGING_CONF) if use_default_kvp: self.update_default_formatter(DEFAULT_KVP_FORMAT) if json: self.enable_json_formatter() # Custom level to suppress handlers logging.addLevelName('DISABLED', LEVEL_MAP['DISABLED']) logging.captureWarnings(capture_warnings)
def setup_logging(conf): # Add additional dependent libraries that have unhelp bug levels extra_log_level_defaults = [] logging.set_defaults(default_log_levels=logging.get_default_log_levels() + extra_log_level_defaults) logging.setup(conf, 'deckhand') py_logging.captureWarnings(True)
def verbosity(lvl): if lvl < 1: return elif lvl == 1: log = logging.getLogger('aptrepo') log.setLevel(logging.INFO) elif lvl == 2: log = logging.getLogger() log.setLevel(logging.INFO) else: log = logging.getLogger() log.setLevel(logging.DEBUG) log.addHandler(logging.StreamHandler()) logging.captureWarnings(True)
def set_logger(log_file_path): logger = logging.getLogger(NAME) logger.setLevel(logging.INFO) file_handler = logging.handlers.RotatingFileHandler(log_file_path, maxBytes=1024 * 1024, backupCount=10) file_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")) logger.addHandler(file_handler) logging.captureWarnings(True) return logger
def init_logger(self) -> None: try: log_path = QStandardPaths.writableLocation(QStandardPaths.AppConfigLocation) except AttributeError: if sys.platform == 'win32': log_path = os.path.join(QDir.homePath(), 'AppData', 'Local', qApp.applicationName().lower()) elif sys.platform == 'darwin': log_path = os.path.join(QDir.homePath(), 'Library', 'Preferences', qApp.applicationName().lower()) else: log_path = os.path.join(QDir.homePath(), '.config', qApp.applicationName().lower()) os.makedirs(log_path, exist_ok=True) self.console = ConsoleWidget(self) self.consoleLogger = ConsoleHandler(self.console) handlers = [logging.handlers.RotatingFileHandler(os.path.join(log_path, '%s.log' % qApp.applicationName().lower()), maxBytes=1000000, backupCount=1), self.consoleLogger] if self.parser.isSet(self.debug_option) or self.verboseLogs: # noinspection PyTypeChecker handlers.append(logging.StreamHandler()) logging.setLoggerClass(VideoLogger) logging.basicConfig(handlers=handlers, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M', level=logging.INFO) logging.captureWarnings(capture=True) sys.excepthook = MainWindow.log_uncaught_exceptions
def tweet_string(message, log, media=None): check_twitter_config() logging.captureWarnings(True) old_level = log.getEffectiveLevel() log.setLevel(logging.ERROR) twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) retries = 0 while retries < 5: log.setLevel(logging.ERROR) try: if media: photo = open(media, 'rb') media_ids = twitter.upload_media(media=photo) twitter.update_status(status=message.encode('utf-8').strip(), media_ids=media_ids['media_id']) else: twitter.update_status(status=message.encode('utf-8').strip()) break except TwythonAuthError, e: log.setLevel(old_level) log.exception(" Problem trying to tweet string") twitter_auth_issue(e) return except: log.setLevel(old_level) log.exception(" Problem trying to tweet string") retries += 1 s = random.randrange(5, 10 * retries) log.debug(" sleeping %d seconds for retry", s) time.sleep(s) log.setLevel(old_level) if retries == 5: log.error("Couldn't tweet string: %s with media: %s", message, media)
def get_following(log, id): log.debug(" Getting people %s is following", id) check_twitter_config() logging.captureWarnings(True) old_level = log.getEffectiveLevel() log.setLevel(logging.ERROR) twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) log.setLevel(old_level) cursor = -1 max_loops = 15 while cursor != 0: try: log.setLevel(logging.ERROR) following = twitter.get_friends_list(screen_name=id, cursor=cursor, count=200) log.setLevel(old_level) except TwythonAuthError, e: log.exception(" Problem trying to get people following") twitter_auth_issue(e) raise except: raise for u in following["users"]: yield u["screen_name"] cursor = following["next_cursor"] if cursor: s = random.randint(55, 65) log.debug(" Sleeping %ds to avoid rate limit. Cursor: %s", s, cursor) time.sleep(s) else: log.debug(" Normal query end") max_loops -= 1 if max_loops <= 0: log.debug(" Killing search due to max loops") break log.setLevel(old_level)