我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.shutdown()。
def _config(props, category=None, disable_existing_loggers=1): logging.shutdown() # critical section # patterned after from logging.config... logging._acquireLock() try: logging._handlers.clear() del logging._handlerList[:] # Handlers add themselves to logging._handlers handlers = _install_handlers(props) _install_loggers(props, handlers, category, disable_existing_loggers) except Exception as e: traceback.print_exc() raise e finally: logging._releaseLock()
def cancel(self): '''To exit cleanly, flush all write buffers, and stop all running timers. ''' #self.stopflag = True #time.sleep(2.5) #self.queuetimer.cancel() self.finished.set() self.WriteToLogFile() self.FlushLogWriteBuffers("Flushing buffers prior to exiting") logging.shutdown() self.flushtimer.cancel() self.logrotatetimer.cancel() if self.settings['E-mail']['SMTP Send Email'] == True: self.emailtimer.cancel() if self.settings['Log Maintenance']['Delete Old Logs'] == True: self.oldlogtimer.cancel() #~ if self.settings['Timestamp']['Timestamp Enable'] == True: #~ self.timestamptimer.cancel() if self.settings['Zip']['Zip Enable'] == True: self.ziptimer.cancel()
def test_it(self, mock_register, mock_get, mock_except_hook, mock_sys): mock_sys.argv = ['--debug'] mock_sys.version_info = sys.version_info self._call() mock_root_logger = mock_get() mock_root_logger.setLevel.assert_called_once_with(logging.DEBUG) self.assertEqual(mock_root_logger.addHandler.call_count, 2) MemoryHandler = logging.handlers.MemoryHandler memory_handler = None for call in mock_root_logger.addHandler.call_args_list: handler = call[0][0] if memory_handler is None and isinstance(handler, MemoryHandler): memory_handler = handler else: self.assertTrue(isinstance(handler, logging.StreamHandler)) self.assertTrue( isinstance(memory_handler.target, logging.StreamHandler)) mock_register.assert_called_once_with(logging.shutdown) mock_sys.excepthook(1, 2, 3) mock_except_hook.assert_called_once_with( memory_handler, 1, 2, 3, debug=True, log_path=mock.ANY)
def submit(self, fn, *args, **kwargs): if self._shutdown_lock.acquire(): if self._shutdown: self._shutdown_lock.release() raise RuntimeError( 'Cannot schedule new futures after shutdown') f = WSGIFuture(self.futures) w = _WorkItem(f, fn, args, kwargs) self._work_queue.put(w) self._adjust_thread_count() self._shutdown_lock.release() return f else: return False
def run(): """Execute main loop.""" try: setup() try: parser, args = make_argparser() mainloop(parser, args) except KeyboardInterrupt as exc: sys.stderr.flush() sys.exit(2) except IOError as exc: if exc.errno == errno.EPIPE: # downstream is done with our output sys.stderr.flush() sys.exit(0) else: raise finally: logging.shutdown()
def unload(self): if self._current_reader: self._current_reader.get_source().close_connection() self._current_reader = None try: self.iface.mapCanvas().xyCoordinates.disconnect(self._handle_mouse_move) QgsMapLayerRegistry.instance().layersWillBeRemoved.disconnect(self._on_remove) self.iface.newProjectCreated.disconnect(self._on_project_change) self.iface.projectRead.disconnect(self._on_project_change) self._debouncer.stop() self._debouncer.shutdown() self.iface.layerToolBar().removeAction(self.toolButtonAction) self.iface.removePluginVectorMenu("&Vector Tiles Reader", self.about_action) self.iface.removePluginVectorMenu("&Vector Tiles Reader", self.open_connections_action) self.iface.removePluginVectorMenu("&Vector Tiles Reader", self.reload_action) self.iface.removePluginVectorMenu("&Vector Tiles Reader", self.clear_cache_action) self.iface.addLayerMenu().removeAction(self.open_connections_action) logging.shutdown() except: pass
def init_logger(filename, root, verbose): """Initialise logger.""" logfile = os.path.join(root, filename + '.log') logging.shutdown() root_logger = logging.getLogger() for _ in list(root_logger.handlers): root_logger.removeHandler(_) _.flush() _.close() for _ in list(root_logger.filters): root_logger.removeFilter(_) _.flush() _.close() logging.basicConfig(filename=logfile, level=logging.INFO, filemode='w', format='%(levelname)s (%(asctime)-15s): %(message)s') stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.INFO if verbose else logging.ERROR) stream_handler.setFormatter( logging.Formatter('%(levelname)s (%(asctime)-15s): %(message)s')) root_logger.addHandler(stream_handler) return logfile
def setUp(self): super(LoggerAdapterTest, self).setUp() old_handler_list = logging._handlerList[:] self.recording = RecordingHandler() self.logger = logging.root self.logger.addHandler(self.recording) self.addCleanup(self.logger.removeHandler, self.recording) self.addCleanup(self.recording.close) def cleanup(): logging._handlerList[:] = old_handler_list self.addCleanup(cleanup) self.addCleanup(logging.shutdown) self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def main(args): """ Main TODO: validate ip address input """ setup_logs() _logger.debug("Starting main()") ips = [] args = parse_args(args) print("[*] Fuzzing port {} on IP address {}".format(args.port, args.ip)) fuzz(args.ip, args.port, args.size, args.increment) _logger.debug("All done, shutting down.") logging.shutdown()
def main(args): """ Main """ setup_logs() _logger.debug("Starting main()") args = parse_args(args) print("[*] Initiating attack on IP address {}:{}".format(args.ip, args.port)) if args.test: pwn(args.ip, args.port, 'test') elif args.unique: pwn(args.ip, args.port, 'unique') elif args.chars: pwn(args.ip, args.port, 'chars') elif args.breakpoint: pwn(args.ip, args.port, 'break') else: pwn(args.ip, args.port, args.payload) _logger.debug("All done, shutting down.") logging.shutdown()
def run(self): """Loads plugins, and initiates polling schedules.""" reactor.callWhenRunning(self.install_sighandlers) if self.options.netbox: self.setup_single_job() elif self.options.multiprocess: self.setup_multiprocess(self.options.multiprocess, self.options.max_jobs) elif self.options.worker: self.setup_worker() else: self.setup_scheduling() reactor.suggestThreadPoolSize(self.options.threadpoolsize) reactor.addSystemEventTrigger("after", "shutdown", self.shutdown) reactor.run()
def monitor(): logger.debug(u'??????...') get_config() try: content = handler() if content: send_email(content, message_type='comment') except Exception as e: logger.exception(e) else: global the_number_of_mistakes the_number_of_mistakes = -2 if not content: logger.debug(u'????????') finally: # ?????????????????????? logging.shutdown()
def run(self): while True: try: commandToken = self.inputCommandQueue.get_nowait() # Get orders if commandToken.shutdown: self._logInfo("Shutting down") self.closing = True self.pool.shutdown() self.pool.join() return # Exit the tread. else: self._logError("Unknown command token") pass # Unknown command, ignore. except Queue.Empty: #self._log("Nothing in queue") time.sleep(0.5)
def image_saver(config, imageName='webgobbler.bmp', generateSingleImage=False): ''' Continuously generate new images (using the assembler_superpose) and save them into a file. config (an applicationConfig object) : the program configuration imageName (string): name of image to save (eg."toto.jpeg","dudu.png"...) generateSingleImage (bool): If True, will generate a single image. ''' log = logging.getLogger('image_saver') a = assembler_superpose(pool=imagePool(config=config), config=config) a.start() try: while True: log.info("Generating a new image to %s" % imageName) a.superposeB() # Evolve current image a.saveImageTo(imageName) if generateSingleImage: break log.info("Will generate a new image in %d seconds." % config["program.every"]) time.sleep(config["program.every"]) finally: a.shutdown() a.join()
def autopilot(self): # Make sure the payload release is in the closed position self.lock_payload() # TODO use GPS Fix = 3D before allowing continue? # TODO check if safety switch activated? while not self.connection.location.global_relative_frame.alt and not self.stop(): self.log( "No GPS signal yet" ) time.sleep(1) while not self.stop(): if not self.released: time.sleep(3) logging.shutdown() self.connection.close()
def run(self): args = self.mechanic.commandLine if args.verbose: self.logger.setLevel(logging.DEBUG) else: self.logger.setLevel(logging.INFO) if not self.mechanic.config.getLogFile() in [ "", "/dev/stderr", "stderr" ]: makeparentdirs(self.mechanic.config.getLogFile()) self.logger.addHandler(logging.FileHandler(self.mechanic.config.getLogFile())) command = self.mechanic.commands.get(args.commandName) try: if command is not None: command.run(args) else: self.mechanic.defaultCommand.run(args) except MigrationFailedException as e: self.logger.error(e.message) return 2 except FollowUpCommandFailedException as e: self.logger.error(e.message) return 3 except MechanicException as e: self.logger.error(e.message) return 1 except Exception as e: self.logger.error(e) return 1 finally: logging.shutdown() return 0
def tearDown(self): if task_id() is not None: # We're in a child process, and probably got to this point # via an uncaught exception. If we return now, both # processes will continue with the rest of the test suite. # Exit now so the parent process will restart the child # (since we don't have a clean way to signal failure to # the parent that won't restart) logging.error("aborting child process from tearDown") logging.shutdown() os._exit(1) # In the surviving process, clear the alarm we set earlier signal.alarm(0) super(ProcessTest, self).tearDown()
def stop(self, exit=True): for servers in (self.xmlrpcd, self.jsonrpcd, self.webdavd): for server in servers: server.stop() server.join() if exit: if self.options.pidfile: os.unlink(self.options.pidfile) logging.getLogger('server').info('stopped') logging.shutdown() sys.exit(0)
def __exit__(self): logging.shutdown() sys.exit()
def test(): logger = logging.getLogger("") logger.setLevel(logging.DEBUG) logger.addHandler(BufferingSMTPHandler(MAILHOST, FROM, TO, SUBJECT, 10)) for i in xrange(102): logger.info("Info index = %d", i) logging.shutdown()
def init_logger(min_level=logging.WARNING, path=None, max_bytes=10 << 100): global _instance logging.basicConfig(format='[%(asctime)-15s] (%(levelname)s) %(threadName)s: %(message)s') _instance = logging.getLogger() _instance.propagate = False _instance.setLevel(min_level) if path: handler = logging.handlers.RotatingFileHandler(path, 'a', maxBytes=max_bytes) _instance.addHandler(handler) atexit.register(logging.shutdown)
def errorCallback(self, msg, fail): if fail: self.logger.critical(msg) if self.verbose: self.dumpMemory() logging.shutdown() exit(-1) else: logging.warning(msg) # <summary> # Dumps important program memory in the event of critical failure # </summary>
def __init__(self, sock_tuple, port, secure=False): self.client_addr, self.client_port = sock_tuple[1][:2] self.server_port = port self.socket = sock_tuple[0] self.start_time = time.time() self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket) self.secure = secure if IS_JYTHON: # In Jython we must set TCP_NODELAY here since it does not # inherit from the listening socket. # See: http://bugs.jython.org/issue1309 self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.socket.settimeout(SOCKET_TIMEOUT) self.shutdown = self.socket.shutdown self.fileno = self.socket.fileno self.setblocking = self.socket.setblocking self.recv = self.socket.recv self.send = self.socket.send self.makefile = self.socket.makefile if sys.platform == 'darwin': self.sendall = self._sendall_darwin else: self.sendall = self.socket.sendall
def stop(self, stoplogging=False): log.info('Stopping %s' % SERVER_SOFTWARE) self.startstop_lock.acquire() try: # Stop listeners for l in self.listeners: l.ready = False # Encourage a context switch time.sleep(0.01) for l in self.listeners: if l.isAlive(): l.join() # Stop Monitor self._monitor.stop() if self._monitor.isAlive(): self._monitor.join() # Stop Worker threads self._threadpool.stop() if stoplogging: logging.shutdown() msg = "Calling logging.shutdown() is now the responsibility of \ the application developer. Please update your \ applications to no longer call rocket.stop(True)" try: raise DeprecationWarning(msg) except ImportError: raise RuntimeError(msg) finally: self.startstop_lock.release()
def stop(self): self.alive = False if __debug__: log.debug("Stopping threads.") self.stop_server = True # Prompt the threads to die self.shrink(len(self.threads)) # Stop futures initially if has_futures and self.app_info.get('futures'): if __debug__: log.debug("Future executor is present. Python will not " "exit until all jobs have finished.") self.app_info['executor'].shutdown(wait=False) # Give them the gun # active_threads = [t for t in self.threads if t.isAlive()] # while active_threads: # t = active_threads.pop() # t.kill() # Wait until they pull the trigger for t in self.threads: if t.isAlive(): t.join() # Clean up the mess self.bring_out_your_dead()
def pre_arg_parse_setup(): """Setup logging before command line arguments are parsed. Terminal logging is setup using `certbot.constants.QUIET_LOGGING_LEVEL` so Certbot is as quiet as possible. File logging is setup so that logging messages are buffered in memory. If Certbot exits before `post_arg_parse_setup` is called, these buffered messages are written to a temporary file. If Certbot doesn't exit, `post_arg_parse_setup` writes the messages to the normal log files. This function also sets `logging.shutdown` to be called on program exit which automatically flushes logging handlers and `sys.excepthook` to properly log/display fatal exceptions. """ temp_handler = TempHandler() temp_handler.setFormatter(logging.Formatter(FILE_FMT)) temp_handler.setLevel(logging.DEBUG) memory_handler = MemoryHandler(temp_handler) stream_handler = ColoredStreamHandler() stream_handler.setFormatter(logging.Formatter(CLI_FMT)) stream_handler.setLevel(constants.QUIET_LOGGING_LEVEL) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) # send all records to handlers root_logger.addHandler(memory_handler) root_logger.addHandler(stream_handler) # logging.shutdown will flush the memory handler because flush() and # close() are explicitly called util.atexit_register(logging.shutdown) sys.excepthook = functools.partial( pre_arg_parse_except_hook, memory_handler, debug='--debug' in sys.argv, log_path=temp_handler.path)
def flush(self, force=False): # pylint: disable=arguments-differ """Flush the buffer if force=True. If force=False, this call is a noop. :param bool force: True if the buffer should be flushed. """ # This method allows flush() calls in logging.shutdown to be a # noop so we can control when this handler is flushed. if force: if sys.version_info < (2, 7): # pragma: no cover logging.handlers.MemoryHandler.flush(self) else: super(MemoryHandler, self).flush()
def init_logger(): """Reload the global logger.""" global g_logger if g_logger is None: g_logger = logging.getLogger() else: logging.shutdown() g_logger.handlers = [] g_logger.setLevel(logging.DEBUG)
def __beforeShutdownCallback(self): self.logger.debug("Running shutdown hooks") while len(self.__shutdownHooks) > 0: func, args, kwargs = self.__shutdownHooks.pop(0) if hasattr(func, "im_class"): # This is a bound method hookName = "%s.%s.%s" % (func.im_class.__module__, func.im_class.__name__, func.im_func.__name__) else: # This is an ordinary function hookName = "%s.%s" % (func.__module__, func.__name__) self.logger.debug("Calling shutdown hook: %s", hookName) func(*args, **kwargs)
def run(self): """Reads data from arecord and passes to processors.""" self._arecord = subprocess.Popen(self._cmd, stdout=subprocess.PIPE) logger.info("started recording") # Check for race-condition when __exit__ is called at the same time as # the process is started by the background thread if self._closed: self._arecord.kill() return this_chunk = b'' while True: input_data = self._arecord.stdout.read(self._chunk_bytes) if not input_data: break this_chunk += input_data if len(this_chunk) >= self._chunk_bytes: self._handle_chunk(this_chunk[:self._chunk_bytes]) this_chunk = this_chunk[self._chunk_bytes:] if not self._closed: logger.error('Microphone recorder died unexpectedly, aborting...') # sys.exit doesn't work from background threads, so use os._exit as # an emergency measure. logging.shutdown() os._exit(1) # pylint: disable=protected-access
def stop(self, stoplogging=False): log.info('Stopping %s' % SERVER_SOFTWARE) self.startstop_lock.acquire() try: # Stop listeners for l in self.listeners: l.ready = False # Encourage a context switch time.sleep(0.01) for l in self.listeners: if l.isAlive(): l.join() # Stop Monitor self._monitor.stop() if self._monitor.isAlive(): self._monitor.join() # Stop Worker threads self._threadpool.stop() if stoplogging: logging.shutdown() msg = "Calling logging.shutdown() is now the responsibility of \ the application developer. Please update your \ applications to no longer call rocket.stop(True)" try: import warnings raise warnings.DeprecationWarning(msg) except ImportError: raise RuntimeError(msg) finally: self.startstop_lock.release()
def index_bam(self): """ Indexes a bam using samtools ('samtools index file.bam'). Returns ------- bool True if successful. Raises ------ RuntimeError If return code from external call is not 0. """ self.logger.info("Indexing bam file: {}".format(self.filepath)) idx_start = time.time() rc = subprocess.call([self.samtools, "index", self.filepath]) if rc == 0: self.logger.info("Indexing complete. Elapsed time: {} seconds".format( time.time() - idx_start)) return True else: self.logger.error("Unable to index bamfile {}. Exiting".format( self.filepath)) logging.shutdown() raise RuntimeError("Unable to index bamfile. Exiting")
def get_chrom_length(self, chrom): """ Extract chromosome length from BAM header. Parameters ---------- chrom : str The name of the chromosome or scaffold. Returns ------- length : int The length (integer) of the chromsome/scaffold Raises ------ RuntimeError If chromosome name not present in bam header """ bamfile = pysam.AlignmentFile(self.filepath, "rb") lengths = dict(zip(bamfile.references, bamfile.lengths)) try: lens = lengths[chrom] bamfile.close() return lens except: self.logger.error( "{} not present in bam header for {}. Exiting.".format( chrom, self.filepath)) logging.shutdown() raise RuntimeError( "Chromosome name not present in bam header. Exiting")
def compress_vcf(self): """ Compresses vcf file using bgzip. Returns ------- bool True if successful Raises ------- RuntimeError If return code from external call is not 0 """ self.logger.info("Compressing vcf file {}".format(self.filepath)) bgzip_start = time.time() rc = subprocess.call([self.bgzip, "-f", self.filepath]) if rc == 0: self.logger.info("Compression complete. Elapsed time: {} seconds".format( time.time() - bgzip_start)) self.filepath = self.filepath + ".gz" return True else: self.logger.error("Unable to compress vcf file: {}. Exiting.".format( self.filepath)) logging.shutdown() raise RuntimeError("Unable to compress vcf file. Exiting.")
def index_vcf(self): """ Indexes vcf file using tabix. If file does not end in .gz, will compress with bgzip (by calling self.compress_vcf). Note: Files MUST be compressed using bgzip. Returns ------- bool True if successful. Raises ------ RuntimeError If return code from external call is not 0. """ self.logger.info("Indexing vcf file: {}".format(self.filepath)) index_start = time.time() rc = subprocess.call([self.tabix, "-f", "-p", "vcf", self.filepath]) if rc == 0: self.logger.info("Indexing complete. Elapsed time: {} seconds.".format( time.time() - index_start)) return True else: self.logger.info("Unable to index vcf file: {}. Exiting".format( self.filepath)) logging.shutdown() raise RuntimeError("unable to index vcf file. Exiting.")
def index_bwa(self): """ Index reference using bwa Returns ------- bool True if successful Raises ------ RuntimeError If return code from external call is not 0 """ self.logger.info("Creating bwa indices for: {}".format( self.filepath)) bwa_idx_start = time.time() rc = subprocess.call([self.bwa, "index", self.filepath]) if rc == 0: self.logger.info( "BWA indexing complete. Elapsed time: {} seconds".format( time.time() - bwa_idx_start)) return True else: self.logger.error( "Unable to create bwa indices for {}. Exiting".format( self.filepath)) logging.shutdown() raise RuntimeError("Unable to create bwa indicies. Exiting")
def get_chrom_length(self, chrom): """ Extract chromosome length from fasta. Parameters ---------- chrom : str The name of the chromosome or scaffold. Returns ------- length : int The length (integer) of the chromsome/scaffold Raises ------ RuntimeError If chromosome name not present in bam header """ fastafile = pysam.FastaFile(self.filepath) lengths = dict(zip(fastafile.references, fastafile.lengths)) try: lens = lengths[chrom] fastafile.close() return lens except: self.logger.error( "{} not present in {}. Exiting.".format( chrom, self.filepath)) logging.shutdown() raise RuntimeError( "Chromosome name not present in fasta. Exiting")