我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用asyncio.Protocol()。
def task(): rfd, wfd = os.pipe() args = [sys.executable, '-c', code, str(rfd)] proc = yield from asyncio.create_subprocess_exec( *args, pass_fds={rfd}, stdout=subprocess.PIPE) pipe = open(wfd, 'wb', 0) transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, pipe) transport.write(b'data') stdout, stderr = yield from proc.communicate() print("stdout = %r" % stdout.decode()) transport.close()
def test_empty(self): f = mock.Mock() p = asyncio.Protocol() self.assertIsNone(p.connection_made(f)) self.assertIsNone(p.connection_lost(f)) self.assertIsNone(p.data_received(f)) self.assertIsNone(p.eof_received()) dp = asyncio.DatagramProtocol() self.assertIsNone(dp.connection_made(f)) self.assertIsNone(dp.connection_lost(f)) self.assertIsNone(dp.error_received(f)) self.assertIsNone(dp.datagram_received(f, f)) sp = asyncio.SubprocessProtocol() self.assertIsNone(sp.connection_made(f)) self.assertIsNone(sp.connection_lost(f)) self.assertIsNone(sp.pipe_data_received(1, f)) self.assertIsNone(sp.pipe_connection_lost(1, f)) self.assertIsNone(sp.process_exited())
def test_make_ssl_transport(self): m = mock.Mock() self.loop.add_reader = mock.Mock() self.loop.add_reader._is_coroutine = False self.loop.add_writer = mock.Mock() self.loop.remove_reader = mock.Mock() self.loop.remove_writer = mock.Mock() waiter = asyncio.Future(loop=self.loop) with test_utils.disable_logger(): transport = self.loop._make_ssl_transport( m, asyncio.Protocol(), m, waiter) # execute the handshake while the logger is disabled # to ignore SSL handshake failure test_utils.run_briefly(self.loop) # Sanity check class_name = transport.__class__.__name__ self.assertIn("ssl", class_name.lower()) self.assertIn("transport", class_name.lower()) transport.close() # execute pending callbacks to close the socket transport test_utils.run_briefly(self.loop)
def connection_made(self, transport): """ Protocol connection made """ if self._verbose: print('Connection made') self._transport = transport sock = self._transport.get_extra_info('socket') # sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.settimeout(2) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) sock.bind(('', self._upnp.ssdp_port)) tmpl = ('M-SEARCH * HTTP/1.1', 'Host: ' + self._upnp.ssdp_host + ':' + str(self._upnp.ssdp_port), 'Man: "ssdp:discover"', 'ST: {}'.format(self._search_target), # 'ST: ssdp:all', 'MX: 3', '', '') msg = "\r\n".join(tmpl).encode('ascii') self._transport.sendto(msg, (self._upnp.ssdp_host, self._upnp.ssdp_port))
def __init__(self, proto): """ A :pep:`3156` ready websocket handler that works well in a coroutine-aware loop such as the one provided by the asyncio module. The provided `proto` instance is a :class:`asyncio.Protocol` subclass instance that will be used internally to read and write from the underlying transport. Because the base :class:`ws4py.websocket.WebSocket` class is still coupled a bit to the socket interface, we have to override a little more than necessary to play nice with the :pep:`3156` interface. Hopefully, some day this will be cleaned out. """ _WebSocket.__init__(self, None) self.started = False self.proto = proto
def msg_received(self, msg): if msg['MessageType'] != 'Result': return status = msg['Number'] if status == 0: self.waiter.set_result(None) # ensure no futher data is received until the protocol # is switched to the application protocol. self.transport.pause_reading() elif status == 2: self.waiter.set_exception(DeviceNotConnected("Device %s is not currently connected" % (self.device_id,))) elif status == 3: self.waiter.set_exception(ConnectionRefused("Connection refused to device_id=%s port=%d" % (self.device_id, self.port))) else: self.waiter.set_exception(ConnectionFailed("Protocol error connecting to device %s" % (self.device_id,)))
def connect_to_device(self, protocol_factory, device_id, port): '''Open a TCP connection to a port on a device. Args: protocol_factory (callable): A callable that returns an asyncio.Protocol implementation device_id (int): The id of the device to connect to port (int): The port to connect to on the target device Returns: (dict, asyncio.Transport, asyncio.Protocol): The device information, connected transport and protocol. Raises: A USBMuxError subclass instance such as ConnectionRefused ''' waiter = asyncio.Future(loop=self.loop) connector = USBMuxConnector(device_id, port, waiter) transport, switcher = await self._connect_transport(lambda: _ProtoSwitcher(self.loop, connector)) # wait for the connection to succeed or fail await waiter app_protocol = switcher.switch_protocol(protocol_factory) device_info = self.attached.get(device_id) or {} return device_info, transport, app_protocol
def __aenter__(self): transports = self._transports write_buff = self._write_buff class SocksPrimitiveProtocol(asyncio.Protocol): _transport = None def connection_made(self, transport): self._transport = transport transports.append(transport) def data_received(self, data): self._transport.write(write_buff) def factory(): return SocksPrimitiveProtocol() self._srv = await self._loop.create_server( factory, '127.0.0.1', self.port) return self
def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop) fstat_patcher = mock.patch('os.fstat') m_fstat = fstat_patcher.start() st = mock.Mock() st.st_mode = stat.S_IFIFO m_fstat.return_value = st self.addCleanup(fstat_patcher.stop)
def datagram_received(self, data, addr): """ Opens a read or write connection to remote host by scheduling an asyncio.Protocol. """ logging.debug('received: {}'.format(data.decode())) first_packet = self.packet_factory.from_bytes(data) protocol = self.select_protocol(first_packet) file_handler_cls = self.select_file_handler(first_packet) connect = self.loop.create_datagram_endpoint( lambda: protocol(data, file_handler_cls, addr, self.extra_opts), local_addr=(self.host_interface, 0, )) self.loop.create_task(connect)
def check_port_open(port, loop, delay=1): # the "s = socket.socket; s.bind" approach sometimes says a port is in use when it's not # this approach replicates aiohttp so should always give the same answer for i in range(5, 0, -1): try: server = await loop.create_server(asyncio.Protocol(), host=HOST, port=port) except OSError as e: if e.errno != 98: # pragma: no cover raise dft_logger.warning('port %d is already in use, waiting %d...', port, i) await asyncio.sleep(delay, loop=loop) else: server.close() await server.wait_closed() return raise AiohttpDevException('The port {} is already is use'.format(port))
def data_received(self, buf): self._deserializer.update(buf) for packet in self._deserializer.nextPackets(): if isinstance(packet, AnnounceLinkPacket): self._switch.registerLink(packet.address, self) elif isinstance(packet, WirePacket): destinations = self._switch.getOutboundLinks(packet.source, packet.sourcePort, packet.destination, packet.destinationPort) for destinationProtocol in destinations: # The Switching Protocol supports higher-layer processing. # So, if there's a higher protocol, pass the un-modified packet # Otherwise, serialize for transport packetBytes = packet.__serialize__() destinationProtocol.transport.write(packetBytes) else: self._switch.handleExtensionPacket(protocol, packet) #errReporter.error("Unexpected message received", exception=NetworkError.UnexpectedPacket(packet))
def connection_made(self, transport): ''' override asyncio.Protocol ''' self._connected = True self.transport = transport self.remote_ip, self.port = transport.get_extra_info('peername')[:2] logging.debug( 'Connection made (address: {} port: {})' .format(self.remote_ip, self.port)) self.auth_future = self.send_package(protomap.CPROTO_REQ_AUTH, data=(self._username, self._password, self._dbname), timeout=10) self._password = None self.on_connection_made()
def connection_lost(self, exc): ''' override asyncio.Protocol ''' self._connected = False logging.debug( 'Connection lost (address: {} port: {})' .format(self.remote_ip, self.port)) for pid, (future, task) in self._requests.items(): task.cancel() if future.cancelled(): continue future.set_exception(ConnectionError( 'Connection is lost before we had an answer on package id: {}.' .format(pid))) self.on_connection_lost(exc)
def data_received(self, data): ''' override asyncio.Protocol ''' self._buffered_data.extend(data) while self._buffered_data: size = len(self._buffered_data) if self._data_package is None: if size < DataPackage.struct_datapackage.size: return None self._data_package = DataPackage(self._buffered_data) if size < self._data_package.length: return None try: self._data_package.extract_data_from(self._buffered_data) except KeyError as e: logging.error('Unsupported package received: {}'.format(e)) except Exception as e: logging.exception(e) # empty the byte-array to recover from this error self._buffered_data.clear() else: self._on_package_received() self._data_package = None
def connection_made(self, transport): """Called when asyncio.Protocol establishes the network connection.""" self.log.info('Connection established to PLM') self.transport = transport # self.transport.set_write_buffer_limits(128) # limit = self.transport.get_write_buffer_size() # self.log.debug('Write buffer size is %d', limit) self.get_plm_info() self.load_all_link_database()
def data_received(self, data): """Called when asyncio.Protocol detects received data from network.""" self.log.debug('Received %d bytes from PLM: %s', len(data), binascii.hexlify(data)) self._buffer.extend(data) self._peel_messages_from_buffer() for message in self._recv_queue: self._process_message(message) self._recv_queue.remove(message)
def connection_lost(self, exc): """Called when asyncio.Protocol loses the network connection.""" if exc is None: self.log.warning('eof from modem?') else: self.log.warning('Lost connection to modem: %s', exc) self.transport = None if self._connection_lost_callback: self._connection_lost_callback()
def _retry_for_result(result): if isinstance(result, tuple): return not isinstance(result[0], Transport) or not isinstance(result[1], asyncio.Protocol) return True
def connect_write_pipe(file): loop = asyncio.get_event_loop() transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, file) return transport # # Wrap a readable pipe in a stream #
def setUp(self): self.loop = self.new_test_loop() self.addCleanup(self.loop.close) self.proactor = mock.Mock() self.loop._proactor = self.proactor self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.sock = mock.Mock(socket.socket)
def test_make_socket_transport(self): tr = self.loop._make_socket_transport(self.sock, asyncio.Protocol()) self.assertIsInstance(tr, _ProactorSocketTransport) close_transport(tr)
def _basetest_create_connection(self, connection_fut, check_sockname=True): tr, pr = self.loop.run_until_complete(connection_fut) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, asyncio.Protocol) self.assertIs(pr.transport, tr) if check_sockname: self.assertIsNotNone(tr.get_extra_info('sockname')) self.loop.run_until_complete(pr.done) self.assertGreater(pr.nbytes, 0) tr.close()
def _basetest_create_ssl_connection(self, connection_fut, check_sockname=True): tr, pr = self.loop.run_until_complete(connection_fut) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, asyncio.Protocol) self.assertTrue('ssl' in tr.__class__.__name__.lower()) if check_sockname: self.assertIsNotNone(tr.get_extra_info('sockname')) self.loop.run_until_complete(pr.done) self.assertGreater(pr.nbytes, 0) tr.close()
def test_make_socket_transport(self): m = mock.Mock() self.loop.add_reader = mock.Mock() self.loop.add_reader._is_coroutine = False transport = self.loop._make_socket_transport(m, asyncio.Protocol()) self.assertIsInstance(transport, _SelectorSocketTransport) close_transport(transport)
def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.sock = mock.Mock(socket.socket) self.sock.fileno.return_value = 7
def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.sock = mock.Mock(socket.socket) self.sock.fileno.return_value = 7 self.sslsock = mock.Mock() self.sslsock.fileno.return_value = 1 self.sslcontext = mock.Mock() self.sslcontext.wrap_socket.return_value = self.sslsock
def ssl_protocol(self, waiter=None): sslcontext = test_utils.dummy_ssl_context() app_proto = asyncio.Protocol() proto = sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter) self.addCleanup(proto._app_transport.close) return proto
def test_close(self): a, b = self.loop._socketpair() trans = self.loop._make_socket_transport(a, asyncio.Protocol()) f = asyncio.async(self.loop.sock_recv(b, 100)) trans.close() self.loop.run_until_complete(f) self.assertEqual(f.result(), b'') b.close()
def _test_pipe(self): ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid() with self.assertRaises(FileNotFoundError): yield from self.loop.create_pipe_connection( asyncio.Protocol, ADDRESS) [server] = yield from self.loop.start_serving_pipe( UpperProto, ADDRESS) self.assertIsInstance(server, windows_events.PipeServer) clients = [] for i in range(5): stream_reader = asyncio.StreamReader(loop=self.loop) protocol = asyncio.StreamReaderProtocol(stream_reader, loop=self.loop) trans, proto = yield from self.loop.create_pipe_connection( lambda: protocol, ADDRESS) self.assertIsInstance(trans, asyncio.Transport) self.assertEqual(protocol, proto) clients.append((stream_reader, trans)) for i, (r, w) in enumerate(clients): w.write('lower-{}\n'.format(i).encode()) for i, (r, w) in enumerate(clients): response = yield from r.readline() self.assertEqual(response, 'LOWER-{}\n'.format(i).encode()) w.close() server.close() with self.assertRaises(FileNotFoundError): yield from self.loop.create_pipe_connection( asyncio.Protocol, ADDRESS) return 'done'
def test_create_connection_multiple_errors(self, m_socket): class MyProto(asyncio.Protocol): pass @asyncio.coroutine def getaddrinfo(*args, **kw): yield from [] return [(2, 1, 6, '', ('107.6.106.82', 80)), (2, 1, 6, '', ('107.6.106.82', 80))] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) idx = -1 errors = ['err1', 'err2'] def _socket(*args, **kw): nonlocal idx, errors idx += 1 raise OSError(errors[idx]) m_socket.socket = _socket self.loop.getaddrinfo = getaddrinfo_task coro = self.loop.create_connection(MyProto, 'example.com', 80) with self.assertRaises(OSError) as cm: self.loop.run_until_complete(coro) self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
def __init__(self, consumer, loop=None, state=None): self.consumer = consumer self.loop = loop or asyncio.get_event_loop() self.request_parser = httptools.HttpRequestParser(self) if state is None: state = {'total_requests': 0} self.state = state self.base_message = { 'channel': 'http.request' } self.base_channels = { 'reply': ReplyChannel(self) } self.transport = None self.message = None self.channels = None self.headers = None self.upgrade = None self.read_paused = False self.write_paused = False self.buffer_size = 0 self.high_water_limit = HIGH_WATER_LIMIT self.low_water_limit = LOW_WATER_LIMIT self.active_request = None self.max_pipelined_requests = MAX_PIPELINED_REQUESTS self.pipeline_queue = collections.deque() # The asyncio.Protocol hooks...
def connect_to_first_device(self, protocol_factory, port, timeout=DEFAULT_MAX_WAIT, include=None, exclude=None): '''Open a TCP connection to the first device that has the requested port open. Args: protocol_factory (callable): A callable that returns an asyncio.Protocol implementation. port (int): The port to connect to on the target device. timeout (float): The maximum amount of time to wait for a suitable device to be connected. Returns: (dict, asyncio.Transport, asyncio.Protocol): The device information, connected transport and protocol. Raises: asyncio.TimeoutError if no devices with the requested port become available in the specified time. ''' with self.attach_watcher(include_existing=True) as watcher: timeout = Timeout(timeout) while not timeout.expired: action, device_id, info = await watcher.wait_for_next(timeout.remaining) if action != ACTION_ATTACHED: continue if exclude is not None and device_id in exclude: continue if include is not None and device_id not in include: continue try: return await self.connect_to_device(protocol_factory, device_id, port) except USBMuxError: pass raise asyncio.TimeoutError("No available devices")
def __init__(self, loop: asyncio.BaseEventLoop, dispatcher: AbstractDispatcher, *, request_timeout: int=15): """ This is a specialized HTTP protocol meant to be a low-latency API Gateway. For the HTTP dispatcher this means that data in/out will be streamed each direction, other dispatchers may require full request/response bodies be read into memory. :param loop: event loop :param dispatcher: dispatcher strategy, should implement the methods defined in the AbstractDispatcher :param request_timeout: Max length of a request cycle in secs (def: 15s) """ self.dispatcher = dispatcher self.loop = loop self.request_timeout = request_timeout self.parser = None # httptools.HttpRequestParser self.transport = None # type: Optional[asyncio.Transport] self.reader = None # type: Optional[asyncio.StreamReader] self.timeout = None # type: Optional[Task] # request info self.url = None # type: Optional[str] self.headers = None # type: Optional[List[Tuple[bytes, bytes]]] # =========================== # asyncio.Protocol callbacks # ===========================
def test_base_negotiate_with_app_proto(loop): waiter = asyncio.Future(loop=loop) proto = make_base(loop, waiter=waiter, ap_factory=lambda: asyncio.Protocol()) proto.socks_request = make_mocked_coro((None, None)) await proto.negotiate(None, None) await waiter assert waiter.done()
def sendto(self, data, addr=None): assert addr is None self.control_protocol.send_payload( Protocol.PACKET_NUMBERS[Mumble_pb2.UDPTunnel], data)
def test_create_connection_sock(self): with test_utils.run_test_server() as httpd: sock = None infos = self.loop.run_until_complete( self.loop.getaddrinfo( *httpd.address, type=socket.SOCK_STREAM)) for family, type, proto, cname, address in infos: try: sock = socket.socket(family=family, type=type, proto=proto) sock.setblocking(False) self.loop.run_until_complete( self.loop.sock_connect(sock, address)) except: pass else: break else: assert False, 'Can not create socket.' f = self.loop.create_connection( lambda: MyProto(loop=self.loop), sock=sock) tr, pr = self.loop.run_until_complete(f) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, asyncio.Protocol) self.loop.run_until_complete(pr.done) self.assertGreater(pr.nbytes, 0) tr.close()
def _basetest_create_ssl_connection(self, connection_fut, check_sockname=True, peername=None): tr, pr = self.loop.run_until_complete(connection_fut) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, asyncio.Protocol) self.assertTrue('ssl' in tr.__class__.__name__.lower()) self.check_ssl_extra_info(tr, check_sockname, peername) self.loop.run_until_complete(pr.done) self.assertGreater(pr.nbytes, 0) tr.close()
def test_make_socket_transport(self): m = mock.Mock() self.loop.add_reader = mock.Mock() self.loop.add_reader._is_coroutine = False transport = self.loop._make_socket_transport(m, asyncio.Protocol()) self.assertIsInstance(transport, _SelectorSocketTransport) # Calling repr() must not fail when the event loop is closed self.loop.close() repr(transport) close_transport(transport)
def setUp(self): self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.sock = mock.Mock(socket.socket) self.sock_fd = self.sock.fileno.return_value = 7
def test_close(self): a, b = self.loop._socketpair() trans = self.loop._make_socket_transport(a, asyncio.Protocol()) f = asyncio.ensure_future(self.loop.sock_recv(b, 100)) trans.close() self.loop.run_until_complete(f) self.assertEqual(f.result(), b'') b.close()