我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用gevent.socket.error()。
def handle_read(self): while True: try: buf = self._socket.recv(self.in_buffer_size) self._iobuf.write(buf) except socket.error as err: log.debug("Exception in read for %s: %s", self, err) self.defunct(err) return # leave the read loop if buf and self._iobuf.tell(): self.process_io_buffer() else: log.debug("Connection %s closed by server", self) self.close() return
def _reconnect(self): log.debug("[control connection] Attempting to reconnect") try: self._set_new_connection(self._reconnect_internal()) except NoHostAvailable: # make a retry schedule (which includes backoff) schedule = self._cluster.reconnection_policy.new_schedule() with self._reconnection_lock: # cancel existing reconnection attempts if self._reconnection_handler: self._reconnection_handler.cancel() # when a connection is successfully made, _set_new_connection # will be called with the new connection and then our # _reconnection_handler will be cleared out self._reconnection_handler = _ControlReconnectionHandler( self, self._cluster.scheduler, schedule, self._get_and_set_reconnection_handler, new_handler=None) self._reconnection_handler.start() except Exception: log.debug("[control connection] error reconnecting", exc_info=True) raise
def add_errback(self, fn, *args, **kwargs): """ Like :meth:`.add_callback()`, but handles error cases. An Exception instance will be passed as the first positional argument to `fn`. """ run_now = False with self._callback_lock: # Always add fn to self._errbacks, even when we're about to execute # it, to prevent races with functions like start_fetching_next_page # that reset _final_exception self._errbacks.append((fn, args, kwargs)) if self._final_exception: run_now = True if run_now: fn(self._final_exception, *args, **kwargs) return self
def connect(self) -> bool: """ Establish a long running connection to EPMD, will not return until the connection has been established. :return: True """ while True: try: print("EPMD: Connecting %s:%d" % (self.host_, self.port_)) host_port = (self.host_, self.port_) self.sock_ = socket.create_connection(address=host_port, timeout=5.0) break # the connect loop except socket.error as err: print("EPMD: connection error:", err) gevent.sleep(5) print("EPMD: Socket connected") return True
def _read_alive2_reply(self) -> int: """ Read reply from ALIVE2 request, check the result code, read creation :return: Creation value if all is well, connection remains on. On error returns -1 """ # Reply will be [121,0,Creation:16] for OK, otherwise [121,Error] reply = self.sock_.recv(2) if not reply: print("EPMD: ALIVE2 Read error. Closed?", reply) return -1 if reply[1] == 0: cr = self.sock_.recv(2) (creation,) = struct.unpack(">H", cr) return creation print("EPMD: ALIVE2 returned error", reply[1]) return -1
def _parse_for_errors(soap_response): if soap_response.status == 500: response_data = soap_response.read() try: err_dom = parseString(response_data) err_code = _node_val(err_dom.getElementsByTagName('errorCode')[0]) err_msg = _node_val( err_dom.getElementsByTagName('errorDescription')[0] ) except Exception, err: logging.error("Unable to parse SOAP error: {0}, response: {1}".format(err, response_data)) return False logging.error('SOAP request error: {0} - {1}'.format(err_code, err_msg)) raise Exception( 'SOAP request error: {0} - {1}'.format(err_code, err_msg) ) return False else: return True
def _real_connect(self, addr, connect_ex): if self.server_side: raise ValueError("can't connect in server-side mode") # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. if self._connected: raise ValueError("attempt to connect already-connected SSLSocket!") self._sslobj = self.context._wrap_socket(self._sock, False, self.server_hostname, ssl_sock=self) try: if connect_ex: rc = socket.connect_ex(self, addr) else: rc = None socket.connect(self, addr) if not rc: self._connected = True if self.do_handshake_on_connect: self.do_handshake() return rc except socket_error: self._sslobj = None raise
def _real_connect(self, addr, connect_ex): if self.server_side: raise ValueError("can't connect in server-side mode") # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. if self._connected: raise ValueError("attempt to connect already-connected SSLSocket!") self._sslobj = self.context._wrap_socket(self._sock, False, self.server_hostname) try: if connect_ex: rc = socket.connect_ex(self, addr) else: rc = None socket.connect(self, addr) if not rc: if self.do_handshake_on_connect: self.do_handshake() self._connected = True return rc except socket_error: self._sslobj = None raise
def update_environ(self): """ Called before the first request is handled to fill in WSGI environment values. This includes getting the correct server name and port. """ address = self.address if isinstance(address, tuple): if 'SERVER_NAME' not in self.environ: try: name = socket.getfqdn(address[0]) except socket.error: name = str(address[0]) if PY3 and not isinstance(name, str): name = name.decode('ascii') self.environ['SERVER_NAME'] = name self.environ.setdefault('SERVER_PORT', str(address[1])) else: self.environ.setdefault('SERVER_NAME', '') self.environ.setdefault('SERVER_PORT', '')
def _real_connect(self, addr, connect_ex): if self.server_side: raise ValueError("can't connect in server-side mode") # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. if self._connected: raise ValueError("attempt to connect already-connected SSLSocket!") self._sslobj = self._context._wrap_socket(self._sock, False, self.server_hostname, ssl_sock=self) try: if connect_ex: rc = socket.connect_ex(self, addr) else: rc = None socket.connect(self, addr) if not rc: self._connected = True if self.do_handshake_on_connect: self.do_handshake() return rc except socket_error: self._sslobj = None raise
def _real_connect(self, addr, connect_ex): if self.server_side: raise ValueError("can't connect in server-side mode") # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. if self._connected: raise ValueError("attempt to connect already-connected SSLSocket!") self._sslobj = self._context._wrap_socket(self._sock, False, self.server_hostname) if self._session is not None: # 3.6 self._sslobj = SSLObject(self._sslobj, owner=self, session=self._session) try: if connect_ex: rc = socket.connect_ex(self, addr) else: rc = None socket.connect(self, addr) if not rc: if self.do_handshake_on_connect: self.do_handshake() self._connected = True return rc except socket_error: self._sslobj = None raise
def update_environ(self): """ Called before the first request is handled to fill in WSGI environment values. This includes getting the correct server name and port. """ address = self.address if isinstance(address, tuple): if 'SERVER_NAME' not in self.environ: try: name = socket.getfqdn(address[0]) except socket.error: name = str(address[0]) if PY3 and not isinstance(name, str): name = name.decode('ascii') # python 2 pylint:disable=redefined-variable-type self.environ['SERVER_NAME'] = name self.environ.setdefault('SERVER_PORT', str(address[1])) else: self.environ.setdefault('SERVER_NAME', '') self.environ.setdefault('SERVER_PORT', '')
def handle_slave_send(socket, address, req): message = req['data'] message_id = message.get('message_id', '?') message['to_slave'] = True try: runtime = send_funcs['message_send_enqueue'](message) response = 'OK' access_logger.info('Message (ID %s) from master %s queued successfully', message_id, address) except Exception: response = 'FAIL' logger.exception('Queueing message (ID %s) from master %s failed.') access_logger.error('Failed queueing message (ID %s) from master %s: %s', message_id, address, runtime) metrics.incr('slave_message_send_fail_cnt') socket.sendall(msgpack.packb(response))
def get_jvm_option(self, java_bin): cmd = '%s -version 2>&1 | grep \' version \' | awk -F"[\\"_]" \'{print $2}\'' %java_bin popen = subprocess.Popen(cmd, shell=True, close_fds=True, stdout=subprocess.PIPE) excute_data = popen.stdout.readlines() try: version = excute_data[0].strip('\n') major, minor, security = [int(x) for x in version.split('.')] if major > 1 or minor >= 8: self.logger.info("using jdk version: {0}, " "set JVM option with MetaSpace parameter".format(excute_data)) return JSTATUS['new_jvm_option'] else: self.logger.info("using jdk version: {0}, " "set JVM option with PermSpace parameter".format(excute_data)) return JSTATUS['old_jvm_option'] except Exception as msg: self.logger.error("failed to decide the java version from excute_data: {0}, " "exception msg: {1}".format(excute_data, msg)) self.logger.error(traceback.format_exc()) raise JvmCollectorExcept('receive java version error')
def start_jstatus_process(self, java_bin): self.jstatus_jvm_option = self.get_jvm_option(java_bin) cmd = '%s %s -jar %s -D %s -L %s -l %s -P %s start' % ( java_bin, self.jstatus_jvm_option, self.jstatus_path, self.jstatus_data, self.jstatus_log_level, self.jstatus_log, self.jstatus_port ) cmd = 'nohup %s &' % cmd popen = subprocess.Popen(cmd, shell=True, close_fds=True) popen.communicate() code = popen.returncode if not code: self.logger.info('start jstatus success') else: raise JvmCollectorExcept('start jstatus error') gevent.sleep(10)
def _parse_data(self, rvalue): try: rvalue = json.loads(rvalue) except ValueError, e: raise JvmCollectorExcept('return value should be json, bug found %s' % rvalue) # ??????????? if rvalue.get('code'): if "Can't match any instances with instance id" in rvalue.get('msg') and not self.already_reload: self.already_reload = True # ???????? self.reload_instance(self.inst_id) return 1, {} else: raise JvmCollectorExcept('jstatus return error, %s' % rvalue.get('msg')) return 0, self._parse_dimension_value(rvalue.get('data', {}))
def can_read(self, timeout=0.0): """ Checks if there is data that can be read from the socket (if open). Returns True if there is data and False if not. It returns None if something very bad happens such as a dead connection (bad file descriptor), etc """ # rs = Read Sockets # ws = Write Sockets # es = Error Sockets if self.socket is not None: try: rs, _, es = select([self.socket], [], [], timeout) except (SelectError, socket.error), e: if e[0] == errno.EBADF: # Bad File Descriptor... hmm self.close() return None if len(es) > 0: # Bad File Descriptor self.close() return None return len(rs) > 0 # no socket or no connection return None
def can_write(self, timeout=0): """ Checks if there is data that can be written to the socket (if open). Returns True if writing is possible and False if not. It returns None if something very bad happens such as a dead connection (bad file descriptor), etc """ # rs = Read Sockets # ws = Write Sockets # es = Error Sockets if self.socket is not None: try: _, ws, es = select([], [self.socket], [], timeout) except (SelectError, socket.error), e: if e[0] == errno.EBADF: # Bad File Descriptor... hmm self.close() return None if len(es) > 0: # Bad File Descriptor self.close() return None return len(ws) > 0 # no socket or no connection return None
def handle_write(self): while True: try: next_msg = self._write_queue.get() self._socket.sendall(next_msg) except socket.error as err: log.debug("Exception in send for %s: %s", self, err) self.defunct(err) return
def _on_up_future_completed(self, host, futures, results, lock, finished_future): with lock: futures.discard(finished_future) try: results.append(finished_future.result()) except Exception as exc: results.append(exc) if futures: return try: # all futures have completed at this point for exc in [f for f in results if isinstance(f, Exception)]: log.error("Unexpected failure while marking node %s up:", host, exc_info=exc) self._cleanup_failed_on_up_handling(host) return if not all(results): log.debug("Connection pool could not be created, not marking node %s up", host) self._cleanup_failed_on_up_handling(host) return log.info("Connection pools established for node %s", host) # mark the host as up and notify all listeners host.set_up() for listener in self.listeners: listener.on_up(host) finally: with host.lock: host._currently_handling_node_up = False # see if there are any pools to add or remove now that the host is marked up for session in self.sessions: session.update_created_pools()
def _prepare_all_queries(self, host): if not self._prepared_statements or not self.reprepare_on_up: return log.debug("Preparing all known prepared statements against host %s", host) connection = None try: connection = self.connection_factory(host.address) statements = self._prepared_statements.values() for keyspace, ks_statements in groupby(statements, lambda s: s.keyspace): if keyspace is not None: connection.set_keyspace_blocking(keyspace) # prepare 10 statements at a time ks_statements = list(ks_statements) chunks = [] for i in range(0, len(ks_statements), 10): chunks.append(ks_statements[i:i + 10]) for ks_chunk in chunks: messages = [PrepareMessage(query=s.query_string) for s in ks_chunk] # TODO: make this timeout configurable somehow? responses = connection.wait_for_responses(*messages, timeout=5.0, fail_on_error=False) for success, response in responses: if not success: log.debug("Got unexpected response when preparing " "statement on host %s: %r", host, response) log.debug("Done preparing all known prepared statements against host %s", host) except OperationTimedOut as timeout: log.warning("Timed out trying to prepare all statements on host %s: %s", host, timeout) except (ConnectionException, socket.error) as exc: log.warning("Error trying to prepare all statements on host %s: %r", host, exc) except Exception: log.exception("Error trying to prepare all statements on host %s", host) finally: if connection: connection.close()
def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None): """ Execute the given query and return a :class:`~.ResponseFuture` object which callbacks may be attached to for asynchronous response delivery. You may also call :meth:`~.ResponseFuture.result()` on the :class:`.ResponseFuture` to synchronously block for results at any time. See :meth:`Session.execute` for parameter definitions. Example usage:: >>> session = cluster.connect() >>> future = session.execute_async("SELECT * FROM mycf") >>> def log_results(results): ... for row in results: ... log.info("Results: %s", row) >>> def log_error(exc): >>> log.error("Operation failed: %s", exc) >>> future.add_callbacks(log_results, log_error) Async execution with blocking wait for results:: >>> future = session.execute_async("SELECT * FROM mycf") >>> # do other stuff... >>> try: ... results = future.result() ... except Exception: ... log.exception("Operation failed:") """ future = self._create_response_future(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state) future._protocol_handler = self.client_protocol_handler self._on_request(future) future.send_request() return future
def prepare_on_all_hosts(self, query, excluded_host): """ Prepare the given query on all hosts, excluding ``excluded_host``. Intended for internal use only. """ futures = [] for host in tuple(self._pools.keys()): if host != excluded_host and host.is_up: future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout) # we don't care about errors preparing against specific hosts, # since we can always prepare them as needed when the prepared # statement is used. Just log errors and continue on. try: request_id = future._query(host) except Exception: log.exception("Error preparing query for host %s:", host) continue if request_id is None: # the error has already been logged by ResponsFuture log.debug("Failed to prepare query for host %s: %r", host, future._errors.get(host)) continue futures.append((host, future)) for host, future in futures: try: future.result() except Exception: log.exception("Error preparing query for host %s:", host)
def result(self): """ Return the final result or raise an Exception if errors were encountered. If the final result or error has not been set yet, this method will block until it is set, or the timeout set for the request expires. Timeout is specified in the Session request execution functions. If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised. This is a client-side timeout. For more information about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`. Example usage:: >>> future = session.execute_async("SELECT * FROM mycf") >>> # do other stuff... >>> try: ... rows = future.result() ... for row in rows: ... ... # process results ... except Exception: ... log.exception("Operation failed:") """ self._event.wait() if self._final_result is not _NOT_SET: return ResultSet(self, self._final_result) else: raise self._final_exception
def add_callbacks(self, callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_kwargs=None): """ A convenient combination of :meth:`.add_callback()` and :meth:`.add_errback()`. Example usage:: >>> session = cluster.connect() >>> query = "SELECT * FROM mycf" >>> future = session.execute_async(query) >>> def log_results(results, level='debug'): ... for row in results: ... log.log(level, "Result: %s", row) >>> def log_error(exc, query): ... log.error("Query '%s' failed: %s", query, exc) >>> future.add_callbacks( ... callback=log_results, callback_kwargs={'level': 'info'}, ... errback=log_error, errback_args=(query,)) """ self.add_callback(callback, *callback_args, **(callback_kwargs or {})) self.add_errback(errback, *errback_args, **(errback_kwargs or {}))
def new_chat_channel(conn, address): """ New chat channel for a given connection """ participants.add(conn) data = conn.recv(1024) user = '' while data: print("Chat:", data.strip()) for p in participants: try: if p is not conn: data = data.decode('utf-8') user, msg = data.split(':') if msg != '<handshake>': data_s = '\n#[' + user + ']>>> says ' + msg else: data_s = '(User %s connected)\n' % user p.send(bytearray(data_s, 'utf-8')) except socket.error as e: # ignore broken pipes, they just mean the participant # closed its connection already if e[0] != 32: raise data = conn.recv(1024) participants.remove(conn) print("Participant %s left chat." % user)
def _m_search_ssdp(local_ip): """ Broadcast a UDP SSDP M-SEARCH packet and return response. """ search_target = "urn:schemas-upnp-org:device:InternetGatewayDevice:1" ssdp_request = ''.join( ['M-SEARCH * HTTP/1.1\r\n', 'HOST: 239.255.255.250:1900\r\n', 'MAN: "ssdp:discover"\r\n', 'MX: 2\r\n', 'ST: {0}\r\n'.format(search_target), '\r\n'] ) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((local_ip, 10000)) sock.sendto(ssdp_request, ('239.255.255.250', 1900)) if local_ip == "127.0.0.1": sock.settimeout(1) else: sock.settimeout(5) try: return sock.recv(2048) except socket.error, err: # no reply from IGD, possibly no IGD on LAN logging.debug("UDP SSDP M-SEARCH send error using ip %s: %s" % (local_ip, err)) return False
def run_application(self): if self.status and not self.headers_sent: self.write('') client = Client(self.client_address, self) self.server.clients.add(client) try: print "[%s] Client connected (%s clients total)" % ( self.environ['REMOTE_ADDR'], len(self.server.clients)) origin = self.environ.get('HTTP_ORIGIN', '') self.start_response("200 OK", [ ('Content-Type', 'text/event-stream'), ('Cache-Control', 'no-cache'), ('Connection', 'keep-alive'), ('Access-Control-Allow-Origin', origin), ]) self.result = client.stream() self.process_result() super(EventSourceHandler, self).run_application() except socket.error as exc: if exc.errno != 32: raise finally: self.server.clients.remove(client)
def handle(self): try: while self.socket is not None: self.time_start = time.time() self.time_finish = 0 result = self.handle_one_request() if result is None: break if result is True: continue self.status, response_body = result self.socket.sendall(response_body) if self.time_finish == 0: self.time_finish = time.time() self.log_request() break finally: if self.socket is not None: try: # read out request data to prevent error: [Errno 104] Connection reset by peer try: self.socket._sock.recv(16384) finally: self.socket._sock.close() # do not rely on garbage collection self.socket.close() except socket.error: pass self.__dict__.pop('socket', None) self.__dict__.pop('rfile', None)
def _sendall(self, data): try: self.socket.sendall(data) except socket.error, ex: self.status = 'socket error: %s' % ex if self.code > 0: self.code = -self.code raise self.response_length += len(data)
def update_environ(self): address = self.address if isinstance(address, tuple): if 'SERVER_NAME' not in self.environ: try: name = socket.getfqdn(address[0]) except socket.error: name = str(address[0]) self.environ['SERVER_NAME'] = name self.environ.setdefault('SERVER_PORT', str(address[1])) else: self.environ.setdefault('SERVER_NAME', '') self.environ.setdefault('SERVER_PORT', '')
def __init__(self, sock, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, ssl_version=PROTOCOL_SSLv23, ca_certs=None, do_handshake_on_connect=True, suppress_ragged_eofs=True, ciphers=None): socket.__init__(self, _sock=sock) if certfile and not keyfile: keyfile = certfile # see if it's connected try: socket.getpeername(self) except socket_error, e: if e[0] != errno.ENOTCONN: raise # no, no connection yet self._sslobj = None else: # yes, create the SSL object if ciphers is None: self._sslobj = _ssl.sslwrap(self._sock, server_side, keyfile, certfile, cert_reqs, ssl_version, ca_certs) else: self._sslobj = _ssl.sslwrap(self._sock, server_side, keyfile, certfile, cert_reqs, ssl_version, ca_certs, ciphers) if do_handshake_on_connect: self.do_handshake() self.keyfile = keyfile self.certfile = certfile self.cert_reqs = cert_reqs self.ssl_version = ssl_version self.ca_certs = ca_certs self.ciphers = ciphers self.do_handshake_on_connect = do_handshake_on_connect self.suppress_ragged_eofs = suppress_ragged_eofs self._makefile_refs = 0
def handle(self): try: while self.socket is not None: self.time_start = time.time() self.time_finish = 0 result = self.handle_one_request() if result is None: break if result is True: continue self.status, response_body = result self.socket.sendall(response_body) if self.time_finish == 0: self.time_finish = time.time() self.log_request() break finally: if self.socket is not None: try: self.socket._sock.close() # do not rely on garbage collection self.socket.close() except socket.error: pass self.__dict__.pop('socket', None) self.__dict__.pop('rfile', None) self.__dict__.pop('_wfile', None) # XXX remove once wfile property is gone
def read(self, len=1024): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" while True: try: return self._sslobj.read(len) except SSLError, ex: if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: return '' elif ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise sys.exc_clear() try: wait_read(self.fileno(), timeout=self.timeout, timeout_exc=_SSLErrorReadTimeout, event=self._read_event) except socket_error, ex: if ex[0] == EBADF: return '' raise elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise sys.exc_clear() try: # note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional wait_write(self.fileno(), timeout=self.timeout, timeout_exc=_SSLErrorReadTimeout, event=self._write_event) except socket_error, ex: if ex[0] == EBADF: return '' raise else: raise
def write(self, data): """Write DATA to the underlying SSL channel. Returns number of bytes of DATA actually transmitted.""" while True: try: return self._sslobj.write(data) except SSLError, ex: if ex.args[0] == SSL_ERROR_WANT_READ: if self.timeout == 0.0: raise sys.exc_clear() try: wait_read(self.fileno(), timeout=self.timeout, timeout_exc=_SSLErrorWriteTimeout, event=self._read_event) except socket_error, ex: if ex[0] == EBADF: return 0 raise elif ex.args[0] == SSL_ERROR_WANT_WRITE: if self.timeout == 0.0: raise sys.exc_clear() try: wait_write(self.fileno(), timeout=self.timeout, timeout_exc=_SSLErrorWriteTimeout, event=self._write_event) except socket_error, ex: if ex[0] == EBADF: return 0 raise else: raise