我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用errno.EMFILE。
def test_serve_listeners_accept_capacity_error(autojump_clock, caplog): listener = MemoryListener() async def raise_EMFILE(): raise OSError(errno.EMFILE, "out of file descriptors") listener.accept_hook = raise_EMFILE # It retries every 100 ms, so in 950 ms it will retry at 0, 100, ..., 900 # = 10 times total with trio.move_on_after(0.950): await trio.serve_listeners(None, [listener]) assert len(caplog.records) == 10 for record in caplog.records: assert "retrying" in record.msg assert record.exc_info[1].errno == errno.EMFILE
def test_fork_error(self, monkeypatch): def critical(*args): called.times += 1 called.args = args logger = Mock() called = MockUtils.Placeholder() logger.critical = critical accept_responses = [ lambda: MockUtils.raise_(socket.error(errno.EMFILE, "fatal error message")), lambda: (MockSocket.create(), ('some-address', 7000)) ] monkeypatch.setattr('relaax.server.rlx_server.rlx_port.log', logger) if sys.platform != 'win32': monkeypatch.setattr(os, 'fork', lambda: MockUtils.raise_(OSError('can\'t fork'))) monkeypatch.setattr(socket, 'socket', lambda af, st: self.socket) self.socket.accept = lambda: accept_responses.pop()() try: RLXPort.listen(('localhost', 7000)) assert False except Exception as e: if sys.platform != 'win32': assert called.args == ("Can't start child process ('some-address', 7000): can't fork",) assert called.times == 1 assert str(e) == '[Errno %d] fatal error message' % errno.EMFILE
def test_urandom_failure(self): # Check urandom() failing when it is not able to open /dev/random. # We spawn a new process to make the test more robust (if getrlimit() # failed to restore the file descriptor limit after this, the whole # test suite would crash; this actually happened on the OS X Tiger # buildbot). code = """if 1: import errno import os import resource soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit)) try: os.urandom(16) except OSError as e: assert e.errno == errno.EMFILE, e.errno else: raise AssertionError("OSError not raised") """ assert_python_ok('-c', code)
def _accept_connection(self, protocol_factory, sock, sslcontext=None, server=None): try: conn, addr = sock.accept() if self._debug: logger.debug("%r got a new connection from %r: %r", server, addr, conn) conn.setblocking(False) except (BlockingIOError, InterruptedError, ConnectionAbortedError): pass # False alarm. except OSError as exc: # There's nowhere to send the error, so just log it. if exc.errno in (errno.EMFILE, errno.ENFILE, errno.ENOBUFS, errno.ENOMEM): # Some platforms (e.g. Linux keep reporting the FD as # ready, so we remove the read handler temporarily. # We'll try again in a while. self.call_exception_handler({ 'message': 'socket.accept() out of system resource', 'exception': exc, 'socket': sock, }) self.remove_reader(sock.fileno()) self.call_later(constants.ACCEPT_RETRY_DELAY, self._start_serving, protocol_factory, sock, sslcontext, server) else: raise # The event loop will catch, log and ignore it. else: extra = {'peername': addr} accept = self._accept_connection2(protocol_factory, conn, extra, sslcontext, server) self.create_task(accept)
def test_accept_connection_exception(self, m_log): sock = mock.Mock() sock.fileno.return_value = 10 sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files') self.loop.remove_reader = mock.Mock() self.loop.call_later = mock.Mock() self.loop._accept_connection(MyProto, sock) self.assertTrue(m_log.error.called) self.assertFalse(sock.close.called) self.loop.remove_reader.assert_called_with(10) self.loop.call_later.assert_called_with(constants.ACCEPT_RETRY_DELAY, # self.loop._start_serving mock.ANY, MyProto, sock, None, None)
def handle_accept_socket_exeption(cls, error): if error.errno in (errno.EWOULDBLOCK, errno.EAGAIN): # Try again return True # continue accept loop elif error.errno == errno.EPERM: # Netfilter on Linux may have rejected the # connection, but we get told to try to accept() # anyway. return True # continue accept loop elif error.errno in (errno.EMFILE, errno.ENOBUFS, errno.ENFILE, errno.ENOMEM, errno.ECONNABORTED): # Linux gives EMFILE when a process is not allowed to # allocate any more file descriptors. *BSD and Win32 # give (WSA)ENOBUFS. Linux can also give ENFILE if the # system is out of inodes, or ENOMEM if there is # insufficient memory to allocate a new dentry. # ECONNABORTED is documented as possible on all # relevant platforms (Linux, Windows, macOS, and the # BSDs) but occurs only on the BSDs. It occurs when a # client sends a FIN or RST after the server sends a # SYN|ACK but before application code calls accept(2). # On Linux, calling accept(2) on such a listener # returns a connection that fails as though the it were # terminated after being fully established. This # appears to be an implementation choice (see # inet_accept in inet/ipv4/af_inet.c). On macOS X, # such a listener is not considered readable, so # accept(2) will never be called. Calling accept(2) on # such a listener, however, does not return at all. log.error("Could not accept new connection (%s)" % error.strerror) return False # break accept loop
def test_no_leaking(self): # Make sure we leak no resources if not mswindows: max_handles = 1026 # too much for most UNIX systems else: max_handles = 2050 # too much for (at least some) Windows setups handles = [] tmpdir = tempfile.mkdtemp() try: for i in range(max_handles): try: tmpfile = os.path.join(tmpdir, support.TESTFN) handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT)) except OSError as e: if e.errno != errno.EMFILE: raise break else: self.skipTest("failed to reach the file descriptor limit " "(tried %d)" % max_handles) # Close a couple of them (should be enough for a subprocess) for i in range(10): os.close(handles.pop()) # Loop creating some subprocesses. If one of them leaks some fds, # the next loop iteration will fail by reaching the max fd limit. for i in range(15): p = subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.read())"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) data = p.communicate(b"lime")[0] self.assertEqual(data, b"lime") finally: for h in handles: os.close(h) shutil.rmtree(tmpdir)
def test_no_leaking(self): # Make sure we leak no resources if not mswindows: max_handles = 1026 # too much for most UNIX systems else: max_handles = 2050 # too much for (at least some) Windows setups handles = [] try: for i in range(max_handles): try: handles.append(os.open(test_support.TESTFN, os.O_WRONLY | os.O_CREAT)) except OSError as e: if e.errno != errno.EMFILE: raise break else: self.skipTest("failed to reach the file descriptor limit " "(tried %d)" % max_handles) # Close a couple of them (should be enough for a subprocess) for i in range(10): os.close(handles.pop()) # Loop creating some subprocesses. If one of them leaks some fds, # the next loop iteration will fail by reaching the max fd limit. for i in range(15): p = subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.read())"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) data = p.communicate(b"lime")[0] self.assertEqual(data, b"lime") finally: for h in handles: os.close(h) test_support.unlink(test_support.TESTFN)
def test_fd_leak(self): # Since we're opening a lot of FDs, we must be careful to avoid leaks: # we both check that calling fwalk() a large number of times doesn't # yield EMFILE, and that the minimum allocated FD hasn't changed. minfd = os.dup(1) os.close(minfd) for i in range(256): for x in os.fwalk(support.TESTFN): pass newfd = os.dup(1) self.addCleanup(os.close, newfd) self.assertEqual(newfd, minfd)
def _raise_error(): """ Raises errors for inotify failures. """ err = ctypes.get_errno() if err == errno.ENOSPC: raise OSError("inotify watch limit reached") elif err == errno.EMFILE: raise OSError("inotify instance limit reached") else: raise OSError(os.strerror(err))
def use_fds(self): fds = [] try: while True: fds.append(os.open(getattr(os, "devnull", "/dev/null"), os.O_RDONLY)) except OSError, e: if e.errno != errno.EMFILE: raise e else: return fds
def _accept_connection(self, protocol_factory, sock, sslcontext=None, server=None, backlog=100): # This method is only called once for each event loop tick where the # listening socket has triggered an EVENT_READ. There may be multiple # connections waiting for an .accept() so it is called in a loop. # See https://bugs.python.org/issue27906 for more details. for _ in range(backlog): try: conn, addr = sock.accept() if self._debug: logger.debug("%r got a new connection from %r: %r", server, addr, conn) conn.setblocking(False) except (BlockingIOError, InterruptedError, ConnectionAbortedError): # Early exit because the socket accept buffer is empty. return None except OSError as exc: # There's nowhere to send the error, so just log it. if exc.errno in (errno.EMFILE, errno.ENFILE, errno.ENOBUFS, errno.ENOMEM): # Some platforms (e.g. Linux keep reporting the FD as # ready, so we remove the read handler temporarily. # We'll try again in a while. self.call_exception_handler({ 'message': 'socket.accept() out of system resource', 'exception': exc, 'socket': sock, }) self._remove_reader(sock.fileno()) self.call_later(constants.ACCEPT_RETRY_DELAY, self._start_serving, protocol_factory, sock, sslcontext, server, backlog) else: raise # The event loop will catch, log and ignore it. else: extra = {'peername': addr} accept = self._accept_connection2(protocol_factory, conn, extra, sslcontext, server) self.create_task(accept)
def _accept_connection(self, protocol_factory, sock, sslcontext=None, server=None): try: conn, addr = sock.accept() if self._debug: logger.debug("%r got a new connection from %r: %r", server, addr, conn) conn.setblocking(False) except (BlockingIOError, InterruptedError, ConnectionAbortedError): pass # False alarm. except OSError as exc: # There's nowhere to send the error, so just log it. # TODO: Someone will want an error handler for this. if exc.errno in (errno.EMFILE, errno.ENFILE, errno.ENOBUFS, errno.ENOMEM): # Some platforms (e.g. Linux keep reporting the FD as # ready, so we remove the read handler temporarily. # We'll try again in a while. self.call_exception_handler({ 'message': 'socket.accept() out of system resource', 'exception': exc, 'socket': sock, }) self.remove_reader(sock.fileno()) self.call_later(constants.ACCEPT_RETRY_DELAY, self._start_serving, protocol_factory, sock, sslcontext, server) else: raise # The event loop will catch, log and ignore it. else: if sslcontext: self._make_ssl_transport( conn, protocol_factory(), sslcontext, None, server_side=True, extra={'peername': addr}, server=server) else: self._make_socket_transport( conn, protocol_factory(), extra={'peername': addr}, server=server) # It's now up to the protocol to handle the connection.
def test_SocketListener_accept_errors(): class FakeSocket(tsocket.SocketType): def __init__(self, events): self._events = iter(events) type = tsocket.SOCK_STREAM # Fool the check for SO_ACCEPTCONN in SocketListener.__init__ def getsockopt(self, level, opt): return True def setsockopt(self, level, opt, value): pass # Fool the check for connection in SocketStream.__init__ def getpeername(self): pass async def accept(self): await _core.checkpoint() event = next(self._events) if isinstance(event, BaseException): raise event else: return event, None fake_server_sock = FakeSocket([]) fake_listen_sock = FakeSocket( [ OSError(errno.ECONNABORTED, "Connection aborted"), OSError(errno.EPERM, "Permission denied"), OSError(errno.EPROTO, "Bad protocol"), fake_server_sock, OSError(errno.EMFILE, "Out of file descriptors"), OSError(errno.EFAULT, "attempt to write to read-only memory"), OSError(errno.ENOBUFS, "out of buffers"), fake_server_sock, ] ) l = SocketListener(fake_listen_sock) with assert_checkpoints(): s = await l.accept() assert s.socket is fake_server_sock for code in [errno.EMFILE, errno.EFAULT, errno.ENOBUFS]: with assert_checkpoints(): with pytest.raises(OSError) as excinfo: await l.accept() assert excinfo.value.errno == code with assert_checkpoints(): s = await l.accept() assert s.socket is fake_server_sock