我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tornado.gen.sleep()。
def test_notify_n_with_timeout(self): # Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout. # Wait for that timeout to expire, then do notify(2) and make # sure everyone runs. Verifies that a timed-out callback does # not count against the 'n' argument to notify(). c = locks.Condition() self.record_done(c.wait(), 0) self.record_done(c.wait(timedelta(seconds=0.01)), 1) self.record_done(c.wait(), 2) self.record_done(c.wait(), 3) # Wait for callback 1 to time out. yield gen.sleep(0.02) self.assertEqual(['timeout'], self.history) c.notify(2) yield gen.sleep(0.01) self.assertEqual(['timeout', 0, 2], self.history) self.assertEqual(['timeout', 0, 2], self.history) c.notify() self.assertEqual(['timeout', 0, 2, 3], self.history)
def test_garbage_collection(self): # Test that timed-out waiters are occasionally cleaned from the queue. sem = locks.Semaphore(value=0) futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)] future = sem.acquire() self.assertEqual(102, len(sem._waiters)) # Let first 101 waiters time out, triggering a collection. yield gen.sleep(0.02) self.assertEqual(1, len(sem._waiters)) # Final waiter is still active. self.assertFalse(future.done()) sem.release() self.assertTrue(future.done()) # Prevent "Future exception was never retrieved" messages. for future in futures: self.assertRaises(TimeoutError, future.result)
def test_context_manager_contended(self): sem = locks.Semaphore() history = [] @gen.coroutine def f(index): with (yield sem.acquire()): history.append('acquired %d' % index) yield gen.sleep(0.01) history.append('release %d' % index) yield [f(i) for i in range(2)] expected_history = [] for i in range(2): expected_history.extend(['acquired %d' % i, 'release %d' % i]) self.assertEqual(expected_history, history)
def test_task_done(self): q = self.queue_class() for i in range(100): q.put_nowait(i) self.accumulator = 0 @gen.coroutine def worker(): while True: item = yield q.get() self.accumulator += item q.task_done() yield gen.sleep(random() * 0.01) # Two coroutines share work. worker() worker() yield q.join() self.assertEqual(sum(range(100)), self.accumulator)
def test_streaming_until_close_future(self): server, client = self.make_iostream_pair() try: chunks = [] @gen.coroutine def client_task(): yield client.read_until_close(streaming_callback=chunks.append) @gen.coroutine def server_task(): yield server.write(b"1234") yield gen.sleep(0.01) yield server.write(b"5678") server.close() @gen.coroutine def f(): yield [client_task(), server_task()] self.io_loop.run_sync(f) self.assertEqual(chunks, [b"1234", b"5678"]) finally: server.close() client.close()
def test_garbage_collection(self): # Test that timed-out waiters are occasionally cleaned from the queue. c = locks.Condition() for _ in range(101): c.wait(timedelta(seconds=0.01)) future = c.wait() self.assertEqual(102, len(c._waiters)) # Let first 101 waiters time out, triggering a collection. yield gen.sleep(0.02) self.assertEqual(1, len(c._waiters)) # Final waiter is still active. self.assertFalse(future.done()) c.notify() self.assertTrue(future.done())
def test_bad_subscription(io_loop, mocker): framework_info = {} handler = mocker.Mock() handlers = {Event.SUBSCRIBED: handler, Event.HEARTBEAT: handler, Event.OFFERS: handler, Event.SHUTDOWN: handler} sub = Subscription(framework_info, 'zk://localhost:2181', '/api/v1/scheduler', handlers, timeout=1, loop=io_loop) assert sub.state.current_state == States.CLOSED yield sub.start() yield gen.sleep(5) assert sub.state.current_state == States.CLOSED
def enforce(self, request=None): self.timings[id(request)].append(time.time()) tries = len(self.timings[id(request)]) if tries == 1: return if self.try_limit is not None and tries >= self.try_limit: raise FailedRetry wait_time = self.sleep_func(self.timings[id(request)]) if wait_time is None or wait_time == 0: return elif wait_time < 0: raise FailedRetry log.debug("Waiting %d seconds until next try.", wait_time) yield gen.sleep(wait_time)
def patch(self, udid): data = tornado.escape.json_decode(self.request.body) id = data['id'] timeout = float(data.get('timeout', 20.0)) print 'Timeout:', timeout result = self.results.get(id) if result is None: self.results[id] = self yield gen.sleep(timeout) if self.results.get(id) == self: del(self.results[id]) self.write('null') self.finish() else: self.write(json.dumps(result)) self.results.pop(id, None)
def proxy(self, port, path): if not path.startswith('/'): path = '/' + path # if we're in 'starting' let's wait a while for i in range(5): if not self.state.get('starting', False): break # Simple exponential backoff wait_time = max(1.4 ** i, 5) self.log.debug('Waiting {} before checking if rstudio is up'.format(wait_time)) yield gen.sleep(wait_time) else: raise web.HTTPError('could not start rsession in time', status_code=500) # FIXME: try to not start multiple processes at a time with some locking here if 'proc' not in self.state: self.log.info('No existing process rsession process found') yield self.start_process() return (yield super().proxy(self.port, path))
def call(self, fn, *args, **kwargs): self.begin(fn) result = NO_RESULT exc_info = None start_time = now() while True: do = self.iter(result=result, exc_info=exc_info, start_time=start_time) if isinstance(do, DoAttempt): try: result = yield fn(*args, **kwargs) exc_info = None continue except Exception: result = NO_RESULT exc_info = sys.exc_info() continue elif isinstance(do, DoSleep): result = NO_RESULT exc_info = None yield self.sleep(do) else: raise gen.Return(do)
def keep_alive(self): """Constantly emit keepalive events So that intermediate proxies don't terminate an idle connection """ self._keepalive = True while True: await gen.sleep(self.KEEPALIVE_INTERVAL) if not self._keepalive: return try: # lines that start with : are comments # and should be ignored by event consumers self.write(':keepalive\n\n') await self.flush() except StreamClosedError: return
def match_game(self): multiprocessing.set_start_method('spawn') # TODO restart runners for running games? logger.info('Matchmaker started') while True: wait = gen.sleep(5) starttime = ioloop.IOLoop.current().time() players = yield gamequeue.find().sort([('_id', 1)]).limit(10).to_list(length=10) while len(players) >= 2: random.shuffle(players) p0, p1, players = players[0], players[1], players[2:] p0['token'], p1['token'] = create_token(), create_token() queue_ids = [p0.pop('_id'), p1.pop('_id')] game = {'player0': p0, 'player1': p1, 'turn': 0, 'status': 'new'} insert_result = yield games.insert_one(game) game_idstr = str(insert_result.inserted_id) runner_name = 'runner-%s' % game_idstr logger.info('Launching Process "%s"', runner_name) p = multiprocessing.Process(target=Runner.start_game, args=(game_idstr,), name=runner_name, daemon=True) p.start() # TODO keep track of spawned runner processes yield gamequeue.delete_many({'_id': {'$in': queue_ids}}) endtime = ioloop.IOLoop.current().time() logger.debug('MatchMaker ran for %.3fms', 1000 * (endtime - starttime)) yield wait
def test_server_push(io_loop, host, port): c = H2Client(io_loop=io_loop) yield c.connect(host, port) print("Sending HTTP/2 request with Server Push") now = time.time() response = yield c.get_request("/") yield gen.sleep(1) total_time = c.last_time_data_recvd - now print( "Pushes Received: {}\nTotal data received: {} bytes\nDuration: {}s\nRate: {} Bps\n".format( len(c.pushes), c.data_received_size, total_time, c.data_received_size/total_time ) ) #print response c.close_connection()
def mock_coro_factory(return_value=None, side_effect=None): """Creates a mock coroutine with a given return value""" @gen.coroutine def coro(*args, **kwargs): coro.called = True coro.call_args = (args, kwargs) yield gen.sleep(0.1) if side_effect: if isinstance(side_effect, Exception): raise side_effect else: side_effect(*args, **kwargs) return coro.return_value coro.called = False coro.call_args = ([], {}) coro.return_value = return_value return coro
def test_reverse_proxy_operations(self): coroutine_out = None @gen.coroutine def mock_api_request(self, *args, **kwargs): nonlocal coroutine_out yield gen.sleep(0.1) coroutine_out = dict(args=args, kwargs=kwargs) reverse_proxy = ReverseProxy( endpoint_url="http://fake/api", api_token="token") reverse_proxy._reverse_proxy = Mock(spec=orm.Proxy) reverse_proxy._reverse_proxy.api_request = mock_api_request yield reverse_proxy.register("/hello/from/me/", "http://localhost:12312/") self.assertEqual(coroutine_out["kwargs"]["method"], "POST") yield reverse_proxy.unregister("/hello/from/me/") self.assertEqual(coroutine_out["kwargs"]["method"], "DELETE")