我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用os.wait()。
def wait_on_children(self): """Wait on children exit.""" while self.running: try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): self._remove_children(pid) self._verify_and_respawn_children(pid, status) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: LOG.info(_LI('Caught keyboard interrupt. Exiting.')) os.killpg(0, signal.SIGTERM) break except exception.SIGHUPInterrupt: self.reload() continue eventlet.greenio.shutdown_safe(self.sock) self.sock.close() LOG.debug('Exited')
def test_dummy_thread_after_fork(self): # Issue #14308: a dummy thread in the active list doesn't mess up # the after-fork mechanism. code = """if 1: import thread, threading, os, time def background_thread(evt): # Creates and registers the _DummyThread instance threading.current_thread() evt.set() time.sleep(10) evt = threading.Event() thread.start_new_thread(background_thread, (evt,)) evt.wait() assert threading.active_count() == 2, threading.active_count() if os.fork() == 0: assert threading.active_count() == 1, threading.active_count() os._exit(0) else: os.wait() """ _, out, err = assert_python_ok("-c", code) self.assertEqual(out, '') self.assertEqual(err, '')
def _run_and_join(self, script): script = """if 1: import sys, os, time, threading # a thread, which waits for the main program to terminate def joiningfunc(mainthread): mainthread.join() print 'end of thread' \n""" + script p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE) rc = p.wait() data = p.stdout.read().replace('\r', '') p.stdout.close() self.assertEqual(data, "end of main\nend of thread\n") self.assertFalse(rc == 2, "interpreter was blocked") self.assertTrue(rc == 0, "Unexpected error")
def test_reinit_tls_after_fork(self): # Issue #13817: fork() would deadlock in a multithreaded program with # the ad-hoc TLS implementation. def do_fork_and_wait(): # just fork a child process and wait it pid = os.fork() if pid > 0: os.waitpid(pid, 0) else: os._exit(0) # start a bunch of threads that will fork() child processes threads = [] for i in range(16): t = threading.Thread(target=do_fork_and_wait) threads.append(t) t.start() for t in threads: t.join()
def test_dummy_thread_after_fork(self): # Issue #14308: a dummy thread in the active list doesn't mess up # the after-fork mechanism. code = """if 1: import _thread, threading, os, time def background_thread(evt): # Creates and registers the _DummyThread instance threading.current_thread() evt.set() time.sleep(10) evt = threading.Event() _thread.start_new_thread(background_thread, (evt,)) evt.wait() assert threading.active_count() == 2, threading.active_count() if os.fork() == 0: assert threading.active_count() == 1, threading.active_count() os._exit(0) else: os.wait() """ _, out, err = assert_python_ok("-c", code) self.assertEqual(out, b'') self.assertEqual(err, b'')
def write_multi_process(self, child_count): log.info("Generating pages using %d child processes", child_count) pages = list(self.site.pages.values()) # From http://code.activestate.com/recipes/576785-partition-an-iterable-into-n-lists/ chunks = [pages[i::child_count] for i in range(child_count)] print(len(pages)) for c in chunks: print(len(c)) import sys pids = set() for chunk in chunks: pid = os.fork() if pid == 0: self.write_pages(chunk) sys.exit(0) else: pids.add(pid) while pids: (pid, status) = os.wait() pids.discard(pid)
def _run_and_join(self, script): script = """if 1: import sys, os, time, threading # a thread, which waits for the main program to terminate def joiningfunc(mainthread): mainthread.join() print 'end of thread' # stdout is fully buffered because not a tty, we have to flush # before exit. sys.stdout.flush() \n""" + script p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE) rc = p.wait() data = p.stdout.read().replace('\r', '') p.stdout.close() self.assertEqual(data, "end of main\nend of thread\n") self.assertFalse(rc == 2, "interpreter was blocked") self.assertTrue(rc == 0, "Unexpected error")
def test_arp_ping(self): """test ARP ping - compare to arping utilite""" arp_ping = Ping(IFACE, ARP_NAME, TIMEOUT, False) for ip in list(ipaddress.ip_network(TEST_NETWORK).hosts())[:5]: try: # need arping installed with os.popen('arping -c {} -t {} {}'.format(COUNT, TIMEOUT, str(ip)), 'r'): # get exit code ec = os.wait()[1] & 0xFF00 res = arp_ping.ping_host(str(ip)) except PermissionException: print('Need root previlegies') if res[STATUS_INDEX] == ONLINE: self.assertTrue(ec == 0) else: self.assertFalse(ec == 0)
def test_icmp_ping(self): """test icmp ping - compare to icmping utilite""" icmp_ping = Ping(IFACE, ICMP_NAME, TIMEOUT, False) for ip in list(ipaddress.ip_network(TEST_NETWORK).hosts())[:5]: try: # need arping installed with os.popen('ping -c {} -t {} {}'.format(COUNT, TIMEOUT, str(ip)), 'r'): # get exit code ec = os.wait()[1] & 0xFF00 res = icmp_ping.ping_host(str(ip)) except PermissionException: print('Need root previlegies') if res[STATUS_INDEX] == ONLINE: self.assertTrue(ec == 0) else: self.assertFalse(ec == 0)
def grim_reaper(signum, frame): while True: try: #??????os.wait()??? #os.wait()??????????????????????????????????????. #waitpid()???WNOHANG??????????????????????????????pid=0???????. #????????waitpid?????????????????????????. pid, status = os.waitpid( -1, os.WNOHANG ) except OSError: return if pid == 0: return
def workers(master_host, master_port, relay_socket_path, num_workers): # Start the relay master_redis_cfg = {'host': master_host, 'port': master_port} relay_redis_cfg = {'unix_socket_path': relay_socket_path} if os.fork() == 0: RelayClient(master_redis_cfg, relay_redis_cfg).run() return # Start the workers noise = SharedNoiseTable() # Workers share the same noise num_workers = num_workers if num_workers else os.cpu_count() logging.info('Spawning {} workers'.format(num_workers)) for _ in range(num_workers): if os.fork() == 0: run_worker(relay_redis_cfg, noise=noise) return os.wait()
def _join_daemon(self): try: try: # Mac, UNIX os.wait() except AttributeError: # Windows try: pid = self.get_pid() except IOError: # Assume the subprocess deleted the pidfile on shutdown. pass else: os.waitpid(pid, 0) except OSError: x = sys.exc_info()[1] if x.args != (10, 'No child processes'): raise
def get_asynchronous_eventlet_pool(size=1000): """Return eventlet pool to caller. Also store pools created in global list, to wait on it after getting signal for graceful shutdown. :param size: eventlet pool size :returns: eventlet pool """ global ASYNC_EVENTLET_THREAD_POOL_LIST pool = eventlet.GreenPool(size=size) # Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool) return pool
def wait_on_children(self): while self.running: try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): self._remove_children(pid) self._verify_and_respawn_children(pid, status) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: LOG.info('Caught keyboard interrupt. Exiting.') break except glare_exc.SIGHUPInterrupt: self.reload() continue eventlet.greenio.shutdown_safe(self.sock) self.sock.close() LOG.debug('Exited')
def start_server(): run_daemon() try: pid = os.fork() except OSError, e: logging.error('start http server is error.') os._exit(1) if pid == 0: start_server_in_subprocess() return while pid: # wait ????????????????? ret = os.wait() logging.error('Child process already stop, try to start') try: pid = os.fork() if pid == 0: start_server_in_subprocess() return except OSError, e: logging.error('start http server is error.') os._exit(1) # ??????
def run(): # we don't care to be notified of our childrens' exit statuses. # this prevents zombie processes from cluttering up the process # table when zopectl start/stop is used interactively. # DM 2004-11-26: from the Linux "execve(2)" manual page: # Any signals set to be caught by the calling process are reset # to their default behaviour. # The SIGCHLD signal (when set to SIG_IGN) may or may not be reset # to SIG_DFL. # If it is not reset, 'os.wait[pid]' can non-deterministically fail. # Thus, use a way such that "SIGCHLD" is definitely reset in children. # signal.signal(signal.SIGCHLD, signal.SIG_IGN) if not WIN and os.uname()[0] != 'Darwin': # On Windows the os.uname method does not exist. # On Mac OS X, setting up a signal handler causes waitpid to # raise EINTR, which is not preventable via the Python signal # handler API and can't be dealt with properly as we can't pass # the SA_RESTART to the signal API. Since Mac OS X doesn't # appear to clutter up the process table with zombies if # SIGCHILD is unset, just don't bother registering a SIGCHILD # signal handler at all. signal.signal(signal.SIGCHLD, _ignoreSIGCHLD) exitstatus = main() sys.exit(exitstatus)
def _handleChildren(self): while not self._close.isSet(): try: os.wait() except: time.sleep(0.5)
def watch(self): try: os.wait() except KeyboardInterrupt: # I put the capital B in KeyBoardInterrupt so I can # tell when the Watcher gets the SIGINT print 'KeyBoardInterrupt' self.kill() sys.exit()
def f(path): "one file object + forking" with lockpath.keeping(path) as file: if os.fork(): os.wait() else: lockfile(file)
def F(path): "separate file objects + forking" with lockpath.keeping(path): if os.fork(): os.wait() else: lockpath(path)
def wait_for_exit(self, raise_error=True): """Returns a `.Future` which resolves when the process exits. Usage:: ret = yield proc.wait_for_exit() This is a coroutine-friendly alternative to `set_exit_callback` (and a replacement for the blocking `subprocess.Popen.wait`). By default, raises `subprocess.CalledProcessError` if the process has a non-zero exit status. Use ``wait_for_exit(raise_error=False)`` to suppress this behavior and return the exit status without raising. .. versionadded:: 4.2 """ future = Future() def callback(ret): if ret != 0 and raise_error: # Unfortunately we don't have the original args any more. future.set_exception(CalledProcessError(ret, None)) else: future.set_result(ret) self.set_exit_callback(callback) return future
def chunkify(self, gen): bs = self.bs chunks = self.chunks() chunk = chunks.next() pos = 0 for src in gen: srctype = type(src) src = memoryview(src) if srctype in (str, buffer, bytearray, memoryview) else memoryview(str(src)) slen = len(src) try: # fast append chunk.payload[pos:pos + slen] = src pos += slen except ValueError: # oops - too big - slice & dice soff = bs - pos # pad buffer out to end using first n bytes from src chunk.payload[pos:bs] = src[0:soff] yield chunk chunk = chunks.next() pos = 0 # then carve off full blocks directly from src while soff + bs <= slen: chunk.payload[0:bs] = src[soff:soff+bs] yield chunk chunk = chunks.next() soff += bs # and stash the remainder pos = slen - soff chunk.payload[0:pos] = src[soff:soff+pos] if pos: yield chunk(pos) # because every multiprocessing.Process().start() very helpfully # does a waitpid(WNOHANG) across all known children, and I want # to use os.wait() to catch exiting children
def test_negotiate(self, group=14): server = socket.socket() server.bind(('',0)) server.listen(1) port = server.getsockname()[1] pid = os.fork() # child process - aka, the server if pid == 0: sock, _ = server.accept() server.close() # parent - aka, the client else: sock = socket.socket() sock.connect(('', port)) server.close() alice = pyDHE.new(group) local_key = alice.negotiate(sock) #sock.close() if pid == 0: sock.send(long_to_bytes(local_key)) sock.close() else: os.wait() remote_key = bytes_to_long(sock.recv(1024)) sock.close() self.assertEqual(local_key, remote_key, "keys do not match")
def send_await(self, msg, deadline=None): """Send `msg` and wait for a response with an optional timeout.""" receiver = self.send_async(msg) response = receiver.get_data(deadline) IOLOG.debug('%r._send_await() -> %r', self, response) return response
def _setup_master(self, profiling, parent_id, context_id, in_fd, out_fd): if profiling: enable_profiling() self.broker = Broker() self.router = Router(self.broker) self.router.add_handler(self._on_shutdown_msg, SHUTDOWN) self.master = Context(self.router, 0, 'master') if parent_id == 0: self.parent = self.master else: self.parent = Context(self.router, parent_id, 'parent') self.channel = Receiver(self.router, CALL_FUNCTION) self.stream = Stream(self.router, parent_id) self.stream.name = 'parent' self.stream.accept(in_fd, out_fd) self.stream.receive_side.keep_alive = False listen(self.broker, 'shutdown', self._on_broker_shutdown) listen(self.broker, 'exit', self._on_broker_exit) os.close(in_fd) try: os.wait() # Reap first stage. except OSError: pass # No first stage exists (e.g. fakessh)
def wait(self): """Wait until all servers have completed running.""" try: if self.children: self.wait_on_children() else: self.pool.waitall() except KeyboardInterrupt: pass
def __init__(self, job_state=None): Job.__init__(self) self.job_state = job_state self.procs = [] self.pids = [] # pids in order self.pipe_status = [] # status in order self.status = -1 # for 'wait' jobs
def __init__(self): # pid -> Job instance # A pipeline that is backgrounded is always run in a SubProgramThunk? So # you can wait for it once? self.jobs = {}
def AllDone(self): """Test if all jobs are done. Used by 'wait' builtin.""" for job in self.jobs.itervalues(): if job.State() != ProcessState.Done: return False return True
def __init__(self): self.callbacks = {} # pid -> callback self.last_status = 127 # wait -n error code
def Wait(self): # This is a list of async jobs try: pid, status = os.wait() except OSError as e: if e.errno == errno.ECHILD: #log('WAIT ECHILD') return False # nothing to wait for caller should stop else: # What else can go wrong? raise #log('WAIT got %s %s', pid, status) # TODO: change status in more cases. if os.WIFSIGNALED(status): pass elif os.WIFEXITED(status): status = os.WEXITSTATUS(status) #log('exit status: %s', status) # This could happen via coding error. But this may legitimately happen # if a grandchild outlives the child (its parent). Then it is reparented # under this process, so we might receive notification of its exit, even # though we didn't start it. We can't have any knowledge of such # processes, so print a warning. if pid not in self.callbacks: util.warn("PID %d stopped, but osh didn't start it", pid) return True # caller should keep waiting callback = self.callbacks.pop(pid) callback(pid, status) self.last_status = status # for wait -n return True # caller should keep waiting
def test_1_join_on_shutdown(self): # The usual case: on exit, wait for a non-daemon thread script = """if 1: import os t = threading.Thread(target=joiningfunc, args=(threading.current_thread(),)) t.start() time.sleep(0.1) print 'end of main' """ self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output): p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE) rc = p.wait() data = p.stdout.read().decode().replace('\r', '') self.assertEqual(rc, 0, "Unexpected error") self.assertEqual(data, expected_output)
def test_ident_of_no_threading_threads(self): # The ident still must work for the main thread and dummy threads. self.assertIsNotNone(threading.currentThread().ident) def f(): ident.append(threading.currentThread().ident) done.set() done = threading.Event() ident = [] thread.start_new_thread(f, ()) done.wait() self.assertIsNotNone(ident[0]) # Kill the "immortal" _DummyThread del threading._active[ident[0]] # run with a small(ish) thread stack size (256kB)
def get_fs_type(path): cmd = ['/usr/bin/stat', '-f', '-L', '-c', '%T', path] p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, universal_newlines=True) p.wait() with p.stdout as f: return f.readline().strip()
def update_config_from_file(config_opts, config_file, uid_manager): config_file = os.path.realpath(config_file) r_pipe, w_pipe = os.pipe() if os.fork() == 0: try: os.close(r_pipe) if uid_manager and not all(getresuid()): uid_manager.dropPrivsForever() include(config_file, config_opts) with os.fdopen(w_pipe, 'wb') as writer: pickle.dump(config_opts, writer) except: import traceback etype, evalue, raw_tb = sys.exc_info() tb = traceback.extract_tb(raw_tb) tb = [entry for entry in tb if entry[0] == config_file] print('\n'.join(traceback.format_list(tb)), file=sys.stderr) print('\n'.join(traceback.format_exception_only(etype, evalue)), file=sys.stderr) sys.exit(1) sys.exit(0) else: os.close(w_pipe) with os.fdopen(r_pipe, 'rb') as reader: while True: try: new_config = reader.read() break except OSError as e: if e.errno != errno.EINTR: raise _, ret = os.wait() if ret != 0: raise exception.ConfigError('Error in configuration') if new_config: config_opts.update(pickle.loads(new_config))