我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用asyncio.FIRST_COMPLETED。
def daemon(): try_init_cgroup() async with VJ4Session(config['server_url']) as session: while True: try: await session.login_if_needed(config['uname'], config['password']) done, pending = await wait([do_judge(session), do_noop(session)], return_when=FIRST_COMPLETED) for task in pending: task.cancel() await gather(*done) except Exception as e: logger.exception(e) logger.info('Retrying after %d seconds', RETRY_DELAY_SEC) await sleep(RETRY_DELAY_SEC)
def package_handler(self): """ Co-routine that handles the packages coming out of the decoder. """ dlog.debug(self.log_prefix + "Package handler started") while self.running: decoder_future = asyncio.ensure_future(self.decoder.get()) await asyncio.wait((decoder_future, self.closing_semaphore.acquire()), return_when=asyncio.FIRST_COMPLETED) if decoder_future.done(): package = decoder_future.result() dlog.debug(self.log_prefix + "Received '{}({})'".format( self.__command_dictionary[package[0]].func.__name__, package[1] )) await self.__command_dictionary[package[0]](*package[1]) else: break dlog.debug(self.log_prefix + "Package handler stopped")
def await_until_closing(self, coro): """ Wait for some task to complete but aborts as soon asthe instance is being closed. :param coro: The coroutine or future-like object to wait for. """ wait_task = asyncio.ensure_future(self.wait_closing(), loop=self.loop) coro_task = asyncio.ensure_future(coro, loop=self.loop) try: done, pending = await asyncio.wait( [wait_task, coro_task], return_when=asyncio.FIRST_COMPLETED, loop=self.loop, ) finally: wait_task.cancel() coro_task.cancel() # It could be that the previous instructions cancelled coro_task if it # wasn't done yet. return await coro_task
def connect(**kwargs): logger.info("Connected") irc_client.send("NICK", nick=get_config("mainserver.irc.irc.user.nick")) irc_client.send("USER", user=get_config("mainserver.irc.irc.user.name"), realname=get_config("mainserver.irc.irc.user.realname")) done, pending = await asyncio.wait( [irc_client.wait("RPL_ENDOFMOTD"), irc_client.wait("ERR_NOMOTD")], loop=irc_client.loop, return_when=asyncio.FIRST_COMPLETED ) for future in pending: future.cancel() irc_client.send('JOIN', channel=get_config("mainserver.irc.irc.channel"))
def wait_for_first_response(tasks, converters): """given a list of unawaited tasks and non-coro result parsers to be called on the results, this function returns the 1st result that is returned and converted if it is possible for 2 tasks to complete at the same time, only the 1st result deteremined by asyncio.wait will be returned returns None if none successfully complete returns 1st error raised if any occur (probably) """ primed = [wait_for_result(t, c) for t, c in zip(tasks, converters)] done, pending = await asyncio.wait(primed, return_when=asyncio.FIRST_COMPLETED) for p in pending: p.cancel() try: return done.pop().result() except NotImplementedError as e: raise e except: return None
def _wait(self, work): work_task = asyncio.ensure_future(work, loop=self.loop) stop_event_task = asyncio.ensure_future(self.stop_event.wait(), loop=self.loop) try: await asyncio.wait( [work_task, stop_event_task], return_when=asyncio.FIRST_COMPLETED, loop=self.loop) if self.stop_event.is_set(): raise StopServer() else: return work_task.result() finally: if not work_task.done(): work_task.cancel() if not stop_event_task.done(): stop_event_task.cancel()
def handle(self): self.proxy_to_backend_task = asyncio.ensure_future( self.proxy_to_backend(), loop=self.loop) self.proxy_from_backend_task = asyncio.ensure_future( self.proxy_from_backend(), loop=self.loop) try: await asyncio.wait( [self.proxy_to_backend_task, self.proxy_from_backend_task], loop=self.loop, return_when=asyncio.FIRST_COMPLETED) finally: # Asyncio fails to properly remove the readers and writers # when the task doing recv() or send() is cancelled, so # we must remove the readers and writers manually before # closing the sockets. self.loop.remove_reader(self.client_sock.fileno()) self.loop.remove_writer(self.client_sock.fileno()) self.loop.remove_reader(self.backend_sock.fileno()) self.loop.remove_writer(self.backend_sock.fileno()) self.client_sock.close() self.backend_sock.close()
def _read(self, sock, n): read_task = asyncio.ensure_future( self.loop.sock_recv(sock, n), loop=self.loop) conn_event_task = asyncio.ensure_future( self.connectivity_loss.wait(), loop=self.loop) try: await asyncio.wait( [read_task, conn_event_task], return_when=asyncio.FIRST_COMPLETED, loop=self.loop) if self.connectivity_loss.is_set(): return None else: return read_task.result() finally: if not read_task.done(): read_task.cancel() if not conn_event_task.done(): conn_event_task.cancel()
def _write(self, sock, data): write_task = asyncio.ensure_future( self.loop.sock_sendall(sock, data), loop=self.loop) conn_event_task = asyncio.ensure_future( self.connectivity_loss.wait(), loop=self.loop) try: await asyncio.wait( [write_task, conn_event_task], return_when=asyncio.FIRST_COMPLETED, loop=self.loop) if self.connectivity_loss.is_set(): return None else: return write_task.result() finally: if not write_task.done(): write_task.cancel() if not conn_event_task.done(): conn_event_task.cancel()
def _transfer(self, rbd_read_operations: List[Tuple[int, int, bool]], parallel: int): log.info('Transferring image with %d parallel stream(s).', parallel) self.total = len(rbd_read_operations) self.prev_report_time = time.monotonic() for (offset, length, exists) in rbd_read_operations: while len(self._transfers) >= parallel: await self._wait_for_transfers(asyncio.FIRST_COMPLETED) # ensure_future is required since we need .cancel() method.. self._transfers.add(asyncio.ensure_future(self._transfer_chunk(offset, length, exists))) log.debug('Iteration loop complete.') if self._transfers: log.debug('Waiting for the tail transfers.') await self._wait_for_transfers(asyncio.ALL_COMPLETED) log.debug('Flushing QCOW2 image') await self.nbd_client.flush()
def get_comments(self,item): """Nasty peice of works - BFS without history""" new_items = [item] pending = [] results = [] while new_items or pending: tasks = chain(map(self.async_get_item, new_items), pending) done, pending = self.loop.run_until_complete( asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)) new_items = [] for task in done: r = task.result() if r["type"] == "comment" and "text" in r: results.append((r["id"], r["text"])) if "kids" in r: new_items += r["kids"] return results
def run_with_interrupt(task, event, loop=None): """ Awaits a task while allowing it to be interrupted by an `asyncio.Event`. If the task finishes without the event becoming set, the results of the task will be returned. If the event becomes set, the task will be cancelled ``None`` will be returned. :param task: Task to run :param event: An `asyncio.Event` which, if set, will interrupt `task` and cause it to be cancelled. :param loop: Optional event loop to use other than the default. """ loop = loop or asyncio.get_event_loop() event_task = loop.create_task(event.wait()) done, pending = await asyncio.wait([task, event_task], loop=loop, return_when=asyncio.FIRST_COMPLETED) for f in pending: f.cancel() exception = [f.exception() for f in done if f is not event_task and f.exception()] if exception: raise exception[0] result = [f.result() for f in done if f is not event_task] if result: return result[0] else: return None
def handler(websocket, path): global connected print('{} new connection from {}'.format(time.time(),websocket.remote_address[0])) connected.add(websocket) consumer_task = asyncio.ensure_future(consumer_handler(websocket)) producer_task = asyncio.ensure_future(producer_handler(websocket)) done, pending = await asyncio.wait( [consumer_task, producer_task], return_when=asyncio.FIRST_COMPLETED, ) for task in pending: task.cancel()
def _routing(self): while True: self._listener_task = ensure_future(self._recv()) done, pending = await wait( [self._routing_on, self._listener_task, self._send_error], return_when=asyncio.FIRST_COMPLETED, timeout=self._ensure_alive_timeout) if self._routing_on in done: self._routing_on = None return elif self._send_error in done: logger.info("WS> Client disconnected while sending: {}".format(self._send_error.exception())) if not await self._on_disconnected(): break elif self._listener_task in done: try: message = self._listener_task.result() await self._router(message) except ProtocolError: raise except CancelledError as ex: pass except Exception as ex: logger.info("WS> Client disconnected while receiving: {}".format(ex)) if not await self._on_disconnected(): break elif self._listener_task in pending: logger.info("WS> Client timeout") self._listener_task.cancel() if not await self._on_disconnected(): break
def run_with_reloader(loop, coroutine, cleanup=None, *args, **kwargs): """ Run coroutine with reloader """ clear_screen() print("?? Running in debug mode with live reloading") print(" (don't forget to disable it for production)") # Create watcher handler = Handler(loop) watcher = Observer() # Setup path = realpath(os.getcwd()) watcher.schedule(handler, path=path, recursive=True) watcher.start() print(" (watching {})".format(path)) # Run watcher and coroutine together done, pending = await asyncio.wait([coroutine, handler.changed], return_when=asyncio.FIRST_COMPLETED) # Cleanup cleanup and cleanup() watcher.stop() for fut in done: # If change event, then reload if isinstance(fut.result(), Event): print("Reloading...") reload()
def wshandler(request): ws = web.WebSocketResponse() await ws.prepare(request) recv_task = None tick_task = None while 1: if not recv_task: recv_task = asyncio.ensure_future(ws.receive()) if not tick_task: await tick.acquire() tick_task = asyncio.ensure_future(tick.wait()) done, pending = await asyncio.wait( [recv_task, tick_task], return_when=asyncio.FIRST_COMPLETED) if recv_task in done: msg = recv_task.result() if msg.tp == web.MsgType.text: print("Got message %s" % msg.data) ws.send_str("Pressed key code: {}".format(msg.data)) elif msg.tp == web.MsgType.close or\ msg.tp == web.MsgType.error: break recv_task = None if tick_task in done: ws.send_str("game loop ticks") tick.release() tick_task = None return ws
def _on_page_load_event_fired(self, obj: Dict, *, format: str) -> None: if format in ('mhtml', 'pdf'): await self._scroll_to_bottom() done, pending = await asyncio.wait([ self._evaluate_prerender_ready(), self._wait_responses_ready(), ], return_when=asyncio.FIRST_COMPLETED) for task in pending: task.cancel() for task in done: task.result() # To trigger exception if any status_code = await self.get_status_code() if status_code == 304: status_code = 200 if format == 'html': html = await self.get_html() self._render_future.set_result((html, status_code)) elif format == 'mhtml': self._render_future.set_result((bytes(self._mhtml), status_code)) elif format == 'pdf': data = await self.print_to_pdf() self._render_future.set_result((data, status_code)) elif format == 'jpeg' or format == 'png': data = await self.screenshot(format) self._render_future.set_result((data, status_code))
def test_wait_first_completed(self): def gen(): when = yield self.assertAlmostEqual(10.0, when) when = yield 0 self.assertAlmostEqual(0.1, when) yield 0.1 loop = self.new_test_loop(gen) a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop) b = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop) task = asyncio.Task( asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED, loop=loop), loop=loop) done, pending = loop.run_until_complete(task) self.assertEqual({b}, done) self.assertEqual({a}, pending) self.assertFalse(a.done()) self.assertTrue(b.done()) self.assertIsNone(b.result()) self.assertAlmostEqual(0.1, loop.time()) # move forward to close generator loop.advance_time(10) loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_really_done(self): # there is possibility that some tasks in the pending list # became done but their callbacks haven't all been called yet @asyncio.coroutine def coro1(): yield @asyncio.coroutine def coro2(): yield yield a = asyncio.Task(coro1(), loop=self.loop) b = asyncio.Task(coro2(), loop=self.loop) task = asyncio.Task( asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED, loop=self.loop), loop=self.loop) done, pending = self.loop.run_until_complete(task) self.assertEqual({a, b}, done) self.assertTrue(a.done()) self.assertIsNone(a.result()) self.assertTrue(b.done()) self.assertIsNone(b.result())
def handler(self, websocket, path): print(" **** debug client connected", websocket.remote_address) consumer_task = asyncio.get_event_loop().create_task( self.instruction_handler(websocket)) producer_task = asyncio.get_event_loop().create_task(self.uplink_handler(websocket)) done, pending = yield from asyncio.wait( [consumer_task, producer_task], return_when=asyncio.FIRST_COMPLETED, ) for task in pending: task.cancel() print(" **** debug client disconnected", websocket.remote_address)
def run(): yield from asyncio.wait( [db(), sleeper()], return_when=asyncio.FIRST_COMPLETED)
def run(): yield from asyncio.wait( [get(), sleeper()], return_when=asyncio.FIRST_COMPLETED)
def run(): yield from asyncio.wait([db(), sleeper()], return_when=asyncio.FIRST_COMPLETED)
def wait_for_announcement(self) -> Tuple[LESPeer, les.HeadInfo]: """Wait for a new announcement from any of our connected peers. Returns a tuple containing the LESPeer on which the announcement was received and the announcement info. Raises StopRequested when LightChain.stop() has been called. """ should_stop = False async def wait_for_stop_event(): nonlocal should_stop await self._should_stop.wait() should_stop = True # Wait for either a new announcement or the _should_stop event. done, pending = await asyncio.wait( [self._announcement_queue.get(), wait_for_stop_event()], return_when=asyncio.FIRST_COMPLETED) # The asyncio.wait() call above may return both tasks as done, but never both as pending, # although to be future-proof (in case more than 2 tasks are passed in to wait()), we # iterate over all pending tasks and cancel all of them. for task in pending: task.cancel() if should_stop: raise StopRequested() return done.pop().result()
def recv_multipart(self): """ Read from all the associated sockets. :returns: A list of tuples (socket, frames) for each socket that returned a result. """ if not self._sockets: return [] results = [] async def recv_and_store(socket): frames = await socket.recv_multipart() results.append((socket, frames)) tasks = [ asyncio.ensure_future(recv_and_store(socket), loop=self.loop) for socket in self._sockets ] try: await asyncio.wait( tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop, ) finally: for task in tasks: task.cancel() return results
def run_forever(self): with listener_queue_lock: listener_queues.append(self.queue) print("Connection opened.") handshake_verified = await self.negotiate_connection() if not handshake_verified: return #set of user ids that the client is interested in getting updates for. interests = set() listener_task = asyncio.ensure_future(self.websocket.recv()) producer_task = asyncio.ensure_future(self.producer()) while True: done, pending = await asyncio.wait( [listener_task, producer_task], return_when=asyncio.FIRST_COMPLETED) if listener_task in done: message = listener_task.result() await self.handle_user_request(message) listener_task = asyncio.ensure_future(self.websocket.recv()) if producer_task in done: message = producer_task.result() await self.handle_queue_message(message) producer_task = asyncio.ensure_future(self.producer())
def _collect(self): """Collect tasks that complete in the background.""" # Main collection loop. while True: # Block until we have at least one task in the pool or until we are # told to shut down, which ever comes first. while not self._done and not self._pool: async with self._lock: self._idle.notify() await self._cond.wait() # Build a wait set containing all the tasks, plus a watch for # new tasks. async with self._lock: pending = set(task for task in self._pool) async with EnsureDone(self._cond.wait(), loop=self._loop) as watch: pending.add(watch) # NOTE: `self._cond.wait()` releases the lock until we're # notified, so we don't hold the lock while we block # on `asyncio.wait()`. self._busy.notify() done, pending = await asyncio.wait( pending, return_when=asyncio.FIRST_COMPLETED, ) for task in done: if task is watch: continue self._pool.remove(task) # Log exceptions for crashed tasks. try: task.result() except asyncio.CancelledError: pass except Exception: logging.exception('Task crashed!')
def fetch_data(self): if self.not_done: self.done, self.not_done = await asyncio.wait( self.not_done, return_when=asyncio.FIRST_COMPLETED) if not self.done: return None return self.done.pop().result()
def process(self): # This task is never restarted. self.dcss_task = ensure_future(self.dcss_manager.start()) while True: # Discord manager initial setup or it is reconnecting. if not self.discord_manager or not self.discord_manager.shutdown: # Let the current task finish. if self.discord_task and not self.discord_task.done(): yield from self.discord_task # We re-instantiate the manager and create a new websocket. self.discord_manager = DiscordManager(self.conf.discord, self.dcss_manager) self.discord_task = ensure_future(self.discord_manager.start()) yield from asyncio.wait([self.dcss_task, self.discord_task], return_when=asyncio.FIRST_COMPLETED) # We are shutting down the bot. if self.dcss_task.done(): if self.discord_task and not self.discord_task.done(): yield from self.discord_task return
def _select(pipes, futures, *, loop=None): futures = [asyncio.ensure_future(f, loop=loop) for f in futures] _, pending = await asyncio.wait([p.wait_empty() for p in pipes] + list(futures), loop=loop, return_when=asyncio.FIRST_COMPLETED) for p in pending: if p not in futures: p.cancel() try: await p except Exception: pass return [p for p in pipes if not p.is_empty()], [f for f in futures if f.done()]
def consume(self): """Consume the queue and post messages in Discord.""" while not self.ws_running.done(): task = asyncio.ensure_future(self.get()) done, pending = await asyncio.wait( [task, self.ws_running], return_when=asyncio.FIRST_COMPLETED) if task in done: data = task.result() f = asyncio.ensure_future(self.send_message(self.channel_id, { "embed": { "title": ("{data[repository][owner_name]}/" "{data[repository][name]} " "{data[status_message]}" ).format(data=data), "type": "rich", "description": ("{data[author_name]} {data[type]} " "<{data[compare_url]}>" ).format(data=data), "url": data['build_url'] } })) self.futures.append(f) else: task.cancel() break
def __anext__(self): if self._stopped.is_set(): raise StopAsyncIteration() chunk_task = asyncio.ensure_future(self._next_chunk()) stop_task = asyncio.ensure_future(self._stopped.wait()) try: done, pending = await asyncio.wait( [chunk_task, stop_task], return_when=asyncio.FIRST_COMPLETED ) for task in pending: task.cancel() if chunk_task.done(): try: return chunk_task.result() except StopAsyncIteration: self.end() raise else: raise StopAsyncIteration() finally: chunk_task.cancel() stop_task.cancel()
def run(): async with websockets.connect(SMOGON_WEBSOCKET_URI) as websocket: await _connect(websocket) while True: while len(pool) < MAX_PARALLEL_GAMES: # Preload connection for battle so that it receives messages conn = await _open_connection() # Start Smogon battle search await websocket.send('|/utm %s' % SMOGON_TEAM) await websocket.send('|/search gen71v1') # Wait for battle initialization msg = '' while not '|init|battle' in msg: msg = await websocket.recv() # Room id is in the first line of the message # of the form >roomid m = re.match('>(.+?)\n', msg) roomid = m.group(1) # Start battle handler with preloaded connection bh = BattleHandler(roomid, SMOGON_USERNAME) pool.add(asyncio.ensure_future(_battle(conn, bh))) # Wait for battle handlers to complete done, _ = await asyncio.wait(pool, return_when=asyncio.FIRST_COMPLETED) # Remove done from pool for d in done: pool.remove(d)
def send_http_request(expr, count=None, attributes=None, critical=True): params = {'expr': expr, 'subscription-key': subscription_key} if count: params['count'] = count if attributes: params['attributes'] = ','.join(attributes) async def shoot(): async with client_session.get(bop_url, params=params) as resp: # logger.info('sending HTTP request: %s' % urllib.parse.unquote(resp.url)) json = await resp.json() if 'entities' in json: return json['entities'] else: logger.error('invalid response from server') return [] if critical: done, pending = await asyncio.wait([shoot()]*3, return_when=asyncio.FIRST_COMPLETED) else: done, pending = await asyncio.wait([shoot()], timeout=io_time_limit) for future in pending: future.cancel() done = list(done) if done: return done[0].result() return []
def call(self, method, *params): msg_id = self.new_id() msg = {"id": msg_id, "method": method, "params": params} data = "%s\n" % json.dumps(msg) print('< %s' % data[:200] + (data[200:] and "...\n"), end='') self.writer.write(data.encode()) try: #r = asyncio.ensure_future(self.notifier.wait_for(msg_id)) r = asyncio.async(self.notifier.wait_for(msg_id)) yield from asyncio.wait([r, self.notifier.task], timeout=30, return_when=asyncio.FIRST_COMPLETED) if self.notifier.task.done(): raise self.notifier.task.exception() data = r.result() log = '> %s' % data print(log[:100] + (log[100:] and '...')) except TimeoutError: raise Exception("Request to server timed out.") return data
def setup(self): """ Setup stuff that's async """ task = asyncio.ensure_future(self.client.connect()) # Wait for hangups to either finish connecting or raise an exception. on_connect = asyncio.Future() self.client.on_connect.add_observer(lambda: on_connect.set_result(None)) done, _ = await asyncio.wait( (on_connect, task), return_when=asyncio.FIRST_COMPLETED ) await asyncio.gather(*done) await asyncio.ensure_future(self.get_users_conversations()) # self.conversation_list.on_event.add_observer(self.on_event)
def receive_packet(self): if self.is_connected: if self.open: self.log.debug("Receiving at {}".format(self.name)) #First come first serve receiving done, pending = await asyncio.wait([conn.receive() for conn in self.connections], return_when=asyncio.FIRST_COMPLETED) #The first packet is taken packet= done.pop().result() #Cancel all other tasks [task.cancel() for task in pending] self.log.debug( "Received {} from {}".format(packet, self.name)) # if self._iip: # await self.close() if packet.is_eos: await self.close() stop_message = "Stopping because {} was received".format(packet) self.log.info(stop_message) raise StopAsyncIteration(stop_message) else: return packet else: raise StopAsyncIteration("stopp") else: e = PortDisconnectedError(self, 'disc') self.log.error(e) raise e
def _next(self): if self._async_iter is None: # An edge condition self._q.put((None, True)) return if self._next_future is None: self._next_future = asyncio.ensure_future(self._async_iter.__anext__(), loop=self._loop) try: done, _ = await asyncio.wait([self._stop_future, self._next_future], loop=self._loop, return_when=asyncio.FIRST_COMPLETED) if self._stop_future in done: self._q.put((await self._stop_future, True)) self._next_future.cancel() try: await self._next_future except CancelledError: pass finally: self._next_future = None else: nf = self._next_future self._next_future = None self._q.put((await nf, False)) except StopAsyncIteration: self._q.put((None, True)) except Exception as exc: self._q.put((exc, True))
def test_rpc(bus: lightbus.BusNode, dummy_api): """Full rpc call integration test""" async def co_call_rpc(): asyncio.sleep(0.1) return await bus.my.dummy.my_proc.call_async(field='Hello! ??') async def co_consume_rpcs(): return await bus.bus_client.consume_rpcs(apis=[dummy_api]) (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED) consume_task.cancel() assert call_task.result() == 'value: Hello! ??'
def test_rpc_timeout(bus: lightbus.BusNode, dummy_api): """Full rpc call integration test""" async def co_call_rpc(): asyncio.sleep(0.1) return await bus.my.dummy.sudden_death.call_async() async def co_consume_rpcs(): return await bus.bus_client.consume_rpcs(apis=[dummy_api]) (call_task, ), (consume_task, ) = await asyncio.wait([co_call_rpc(), co_consume_rpcs()], return_when=asyncio.FIRST_COMPLETED) consume_task.cancel() with pytest.raises(LightbusTimeout): call_task.result()
def ping_handler(self): ping_interval = self.redis_settings['ping_interval'] ping_timeout = self.redis_settings['ping_timeout'] if not ping_interval or not ping_timeout: return latency = 0 ping = wait = None try: while True: # Sleep before pings await asyncio.sleep(ping_interval - latency) self.shark.log.debug('redis ping') start_time = time.time() ping = self.redis.ping() wait = asyncio.ensure_future(asyncio.sleep(ping_timeout)) done, pending = await asyncio.wait( [ping, wait], return_when=asyncio.FIRST_COMPLETED) if ping in pending: # Ping timeout ping.cancel() self.shark.log.warn('redis ping timeout') break latency = time.time() - start_time self.shark.log.debug('redis pong', latency=round(latency, 3)) except asyncio.CancelledError: # Cancelled by stop() if ping: ping.cancel() if wait: wait.cancel() if not self._stop: self.shark.log.exception('unhandled exception in ping handler') except Exception: self.shark.log.exception('unhandled exception in ping handler') finally: await self.stop()
def connector(bot, dispatcher, NICK, CHANNELS, PASSWORD=None): @bot.on('client_connect') async def connect(**kwargs): bot.send('USER', user=NICK, realname=NICK) if PASSWORD: bot.send('PASS', password=PASSWORD) bot.send('NICK', nick=NICK) # Don't try to join channels until the server has # sent the MOTD, or signaled that there's no MOTD. done, pending = await asyncio.wait( [bot.wait("RPL_ENDOFMOTD"), bot.wait("ERR_NOMOTD")], loop=bot.loop, return_when=asyncio.FIRST_COMPLETED ) # Cancel whichever waiter's event didn't come in. for future in pending: future.cancel() for channel in CHANNELS: bot.send('JOIN', channel=channel) @bot.on('client_disconnect') async def reconnect(**kwargs): # Wait a second so we don't flood await asyncio.sleep(5, loop=bot.loop) # Schedule a connection when the loop's next available bot.loop.create_task(bot.connect()) # Wait until client_connect has triggered await bot.wait("client_connect") @bot.on('ping') def keepalive(message, **kwargs): bot.send('PONG', message=message) @bot.on('privmsg') def message(host, target, message, **kwargs): if host == NICK: # don't process messages from the bot itself return if target == NICK: # private message dispatcher.handle_private_message(host, message) else: # channel message dispatcher.handle_channel_message(host, target, message)
def mine_loop(self): print("Fetching current challenge") current_challenge = await self.get_challenge() while True: new_challenge = False while not new_challenge: recv_task = asyncio.ensure_future( self.wait_for_new_challenge()) mine_task = asyncio.ensure_future( self.solve_challenge(current_challenge)) done, pending = await asyncio.wait([recv_task, mine_task], return_when=asyncio.FIRST_COMPLETED) if mine_task in done: solution = mine_task.result() result = await self.submit(solution[1]) if result is not None: # we got a new challenge, right after the submission recv_task.cancel() current_challenge = result new_challenge = True else: await asyncio.wait([recv_task], return_when=asyncio.FIRST_COMPLETED) challenge = recv_task.result() if challenge is not None: new_challenge = True current_challenge = challenge continue else: self.solving_thread.alive = False mine_task.cancel() if recv_task in done: self.solving_thread.alive = False mine_task.cancel() challenge = recv_task.result() if challenge is not None: new_challenge = True current_challenge = challenge else: recv_task.cancel() asyncio.sleep(1)
def download(self): if self._state == DOWNLOADING: return self._state = DOWNLOADING # Create a downloader for each bytes range. for i, bytes_range in enumerate(self._bytes_ranges): filename = '{idx:03}_{range[0]!s}-{range[1]!s}.tmp'.format(idx=i, range=bytes_range) buffer_file_path = os.path.join(self._download_dir, filename) self._downloaders.append( RangeDownloader(self._url, bytes_range, buffer_file_path, loop=self._loop, chunk_size=self._chunk_size)) # Start first single downloader for fast first part response to a client. self._start_next_downloaders(1) # Waiting for all downloads to complete. try: while self._state is DOWNLOADING and self._downloads: done, self._downloads = await wait(self._downloads, loop=self._loop, return_when=asyncio.FIRST_COMPLETED) for dd in done: # type: Future # Cancel downloading if any of completed downloads is not downloaded. if dd.result() is not DOWNLOADED: raise CancelledError() self._start_next_downloaders() # Notify all readers. async with self._state_condition: self._state_condition.notify_all() except Exception as ex: self._debug('Download failed. Error: {!r}.'.format(ex)) self.cancel() # Notify all readers. async with self._state_condition: self._state_condition.notify_all() raise DownloadError(ex) else: # OK. All done. self._state = DOWNLOADED