我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用asyncio.gather()。
def get_dashboard_data(self, player): """ Get player fact numbers, such as the number of finishes. Number of top-3 records, etc. :param player: Player instance. :type player: pyplanet.apps.core.maniaplanet.models.Player :return: dictionary with results. """ # Combine several calls. finishes, top_3, records = await asyncio.gather( self.get_num_finishes(player), self.get_num_top_3(player), self.get_num_records(player), ) return dict( numbers=dict( finishes=finishes, top_3=top_3, records=records, ), )
def get_apps(self, prefetch_values=True): """ Get all the app label + names for all the settings we can find in our registry. Returns a dict with label as key, and count + name as values. :param prefetch_values: Prefetch the values in this call. Defaults to True. :return: List with setting objects. """ apps = dict() if prefetch_values: await asyncio.gather(*[ s.get_value(refresh=True) for s in self.recursive_settings ]) for setting in self.recursive_settings: if setting.app_label not in apps: apps[setting.app_label] = dict( count=0, name=self._instance.apps.apps[setting.app_label].name, app=self._instance.apps.apps[setting.app_label], settings=list() ) apps[setting.app_label]['count'] += 1 apps[setting.app_label]['settings'].append(setting) return apps
def get_categories(self, prefetch_values=True): """ Get all the categories we have registered. Returns a dict with label as key, and count + name as values. :param prefetch_values: Prefetch the values in this call. Defaults to True. :return: List with setting objects. """ cats = dict() if prefetch_values: await asyncio.gather(*[ s.get_value(refresh=True) for s in self.recursive_settings ]) for setting in self.recursive_settings: if setting.category not in cats: cats[setting.category] = dict( count=0, name=setting.category, settings=list() ) cats[setting.category]['count'] += 1 cats[setting.category]['settings'].append(setting) return cats
def save(self, player, action, values, *args, **kwargs): raw_value = values['setting_value_field'] try: await self.setting.set_value(raw_value) except SerializationException as e: await self.parent.app.instance.chat( '$fa0Error with saving setting: {}'.format(str(e)), player ) except Exception as e: await self.parent.app.instance.chat( '$fa0Error with saving setting: {}'.format(str(e)), player ) finally: await asyncio.gather( self.parent.app.instance.chat( '$fa0Setting has been saved \'{}\''.format(self.setting.key), player ), self.hide([player.login]) ) self.response_future.set_result(self.setting) self.response_future.done()
def set_limit(self, new_limit, disable_announce=False): min_limit, max_limit, announce = await asyncio.gather( self.setting_min_points.get_value(), self.setting_max_points.get_value(), self.setting_announce_points_change.get_value(), ) if min_limit > 0 and new_limit < min_limit: new_limit = min_limit if max_limit > 0 and new_limit > max_limit: new_limit = max_limit if new_limit <= 0 or not new_limit: return await self.instance.mode_manager.update_settings({ 'S_MapPointsLimit': int(new_limit) }) if announce and not disable_announce: await self.instance.chat( '$ff0The points limit has been changed to $fff{}$z$s$ff0.'.format(new_limit) )
def podium_start(self, **kwargs): if len(self.jukebox) == 0: return next = self.jukebox.pop(0) message = '$fa0The next map will be $fff{}$z$s$fa0 as requested by $fff{}$z$s$fa0.'.format(next['map'].name, next['player'].nickname) # Try to set the map, if not successful it might be that the map is removed while juked! try: await asyncio.gather( self.instance.chat(message), self.instance.map_manager.set_next_map(next['map']) ) except Fault as e: # It's removed from the server. if 'Map not in the selection' in e.faultString or 'Map unknown' in e.faultString: await self.instance.chat( '$fa0Setting the next map has been canceled because the map is not on the server anymore!' ) # Retry the next map(s). await self.podium_start() else: raise
def write_map_list(self, player, data, **kwargs): setting = settings.MAP_MATCHSETTINGS if isinstance(setting, dict) and self.instance.process_name in setting: setting = setting[self.instance.process_name] if not isinstance(setting, str): setting = None if not setting and not data.file: message = '$ff0Default match settings file not configured in your settings!' return await self.instance.chat(message, player) if data.file: file_name = data.file else: file_name = setting.format(server_login=self.instance.game.server_player_login) file_path = 'MatchSettings/{}'.format(file_name) message = '$ff0Match Settings has been saved to the file: {}'.format(file_name) await self.instance.map_manager.save_matchsettings(file_path) # Send message + reload all maps in memory. await asyncio.gather( self.instance.chat(message, player), self.instance.map_manager.update_list(full_update=True) )
def shuffle(self, player, data, **kwargs): setting = settings.MAP_MATCHSETTINGS if isinstance(setting, dict) and self.instance.process_name in setting: setting = setting[self.instance.process_name] if not isinstance(setting, str): setting = None if not setting: message = '$ff0Default match settings file not configured in your settings!' return await self.instance.chat(message, player) try: await self.instance.map_manager.load_matchsettings('MatchSettings/{}'.format(setting)) message = '$ff0Map list has been shuffled and reloaded from disk!' except: message = '$ff0Could not shuffle and reload map list.' # Send message + reload all maps in memory. await asyncio.gather( self.instance.chat(message, player), self.instance.map_manager.update_list(full_update=True) )
def warn_player(self, player, data, **kwargs): try: warn_player = await self.instance.player_manager.get_player(login=data.login, lock=False) await asyncio.gather( show_alert( warn_player, 'You have just been warned! Ask the present admin for further information and / or potential consequences.', size='sm', buttons=None ), self.instance.chat('$ff0Admin $fff{}$z$s$ff0 has warned $fff{}$z$s$ff0.'.format(player.nickname, warn_player.nickname)) ) except PlayerNotFound: message = '$i$f00Unknown login!' await self.instance.chat(message, player.login) return
def querySelectorAll(self, selector: str) -> List['ElementHandle']: """Get all elelments which matches `selector`.""" remoteObject = await self._rawEvaluate( 'selector => Array.from(document.querySelectorAll(selector))', selector, ) response = await self._client.send('Runtime.getProperties', { 'objectId': remoteObject.get('objectId', ''), 'ownProperties': True, }) properties = response.get('result', {}) result: List[ElementHandle] = [] releasePromises = [helper.releaseObject(self._client, remoteObject)] for prop in properties: value = prop.get('value', {}) if prop.get('enumerable') and value.get('subtype') == 'node': result.append(ElementHandle(self._client, value, self._mouse, self._touchscreen)) else: releasePromises.append( helper.releaseObject(self._client, value)) await asyncio.gather(*releasePromises) return result #: Alias to querySelector
def run(self): size = self.queue.qsize() print('[*] qsize: {}'.format(size)) print('[*] test_wildcard_dns_record') self.test_wildcard_dns_record() for i in range(size): task = asyncio.ensure_future(self.dns_query()) self.tasks.append(task) try: responses = asyncio.gather(*self.tasks) result = self.loop.run_until_complete(responses) result = list(filter(lambda r:r is not None, result)) print('[+] Found {} subdomain'.format(len(result))) except Exception as e: print(e)
def test_wildcard_dns_record(self): global wildcard_dns_record ip_dic = {} genrandstr = lambda i: ''.join(random.choices(string.ascii_lowercase + string.digits, k=i)) tasks = [asyncio.ensure_future(self.resolver.query(genrandstr(20) + '.' + self.domain, 'A')) for _ in range(6)] reqs = asyncio.gather(*tasks) result = self.loop.run_until_complete(reqs) for r in result: if ip_dic.get(r.ip[0]): ip_dic[r.ip[0]] += 1 if ip_dic[r.ip[0]] > 3: wildcard_dns_record = r.ip[0] print(f'[*] Found wildcard dns record:{wildcard_dns_record}') return else: ip_dic[r.ip[0]] = 1
def _create_data(self): # here we created the following data: # 3 instances of MainDocument, naming d0, d1 and d2. # 2 of these instances have references, one has not. r = self.refdoc() yield from r.save() to_list_field = ['string0', 'string1', 'string2'] futures = [] for i in range(3): d = self.maindoc(docname='d%s' % i) d.docint = i d.list_field = to_list_field[:i + 1] if i < 2: d.ref = r f = d.save() futures.append(f) yield from asyncio.gather(*futures)
def scrape_archives(url, scrape_function, min_date, max_date, user_agent, min_timedelta=None, concurrency=5): """ Scrape the archives of the given URL. The min_date and start_date parameters allow to restrict the archives to a given period. A minimum time delta between two archives can be specified with the timedelta parameter. The concurrency parameter limits the number of concurrent connections to the web archive. """ # Get the list of archive available for the given url archive_timestamps = list_archive_timestamps(url, min_date, max_date, user_agent) # Filter the timestamps to have a minimum timedelta between each timestamp if min_timedelta and len(archive_timestamps): archive_timestamps = timedelta_filter(archive_timestamps, min_timedelta) loop = asyncio.get_event_loop() # Scrape each archives asynchronously and gather the results scraping_task = loop.create_task(run_scraping(url, archive_timestamps, scrape_function, concurrency, user_agent)) try: loop.run_until_complete(scraping_task) finally: loop.close() return scraping_task.result()
def run_scraping(url, timestamps, scrape_function, concurrency, user_agent): """ Run the scraping function asynchronously on the given archives. The concurrency parameter limits the number of concurrent connections to the web archive. """ # Use a semaphore to limit the number of concurrent connections to the internet archive sem = asyncio.Semaphore(concurrency) # Use one session to benefit from connection pooling async with aiohttp.ClientSession(headers={'User-Agent': user_agent}) as session: # Create scraping coroutines for each archive coroutines = [scrape_archive(session, url, timestamp, scrape_function, sem) for timestamp in timestamps] # Wait for coroutines to finish and gather the results results = await asyncio.gather(*coroutines) # Compile each valid scraping results in a dictionary return {timestamp: result for timestamp, result in results if result is not None}
def run(self): missing_contextes = [ c for spec in self.current.job_specs.values() for c in self.current.jobs[spec.name].list_contexts(spec) if c not in self.current.statuses ] loop = asyncio.get_event_loop() tasks = [ loop.create_task( self.current.last_commit.maybe_update_status( dict( context=context, description='Backed', state='pending', ) ) ) for context in missing_contextes ] yield from asyncio.gather(*tasks)
def test_release_notify(connection_pool): connection_pool._max_conns = 2 connection_pool._max_times_acquired = 2 conn1 = await connection_pool.acquire() conn2 = await connection_pool.acquire() conn3 = await connection_pool.acquire() conn4 = await connection_pool.acquire() async def release(conn): conn.release() results = await asyncio.gather( *[connection_pool.acquire(), release(conn4)]) conn4 = results[0] assert conn4 is conn2 await connection_pool.close()
def clear_loop(self): def silence_gathered(future): try: future.result() finally: print("stopping loop...") loop.stop() print("loop stopped!") # cancel lingering tasks pending = asyncio.Task.all_tasks(loop=self.loop) if pending: gathered = asyncio.gather(*pending, loop=self.loop) gathered.add_done_callback(silence_gathered) gathered.cancel() else: self.loop.stop()
def req_hist_data_async(self, *req_list: [object]): """ Concurrently downloads historical market data for multiple requests. """ ibparms_list = (self._hist_data_req_to_args(req) for req in req_list) bars_list = await asyncio.gather(*( self.reqHistoricalDataAsync(*ibparms) for ibparms in ibparms_list)) df_list = [ib_insync.util.df(bars) for bars in bars_list] xchg_tz_list = await asyncio.gather(*( self.hist_data_req_timezone(req) for req in req_list)) blk_list = [] for req, df, xchg_tz in zip(req_list, df_list, xchg_tz_list): _logger.debug(df.iloc[:3]) if req.BarSize[-1] in ('d', 'W', 'M'): # not intraday dl_tz = xchg_tz # dates without timezone, init with xchg_tz. else: dl_tz = pytz.UTC blk = MarketDataBlock(df, symbol=req.Symbol, datatype=req.DataType, barsize=req.BarSize, tz=dl_tz) blk.tz_convert(xchg_tz) blk_list.append(blk) return blk_list
def polling(self, last_update=None): payload = {} if last_update: # `offset` param prevets from getting duplicates updates # from Telegram API: # https://core.telegram.org/bots/api#getupdates payload['offset'] = last_update + 1 updates = await self.api.get_updates(**payload) # If polling request returned at least one update, use its ID # to define the offset. if len(updates.get('result', [])): last_update = updates['result'][-1]['update_id'] # Handle each new message, send its responses and then request # updates again. tasks = [self.message_handler(msg) for msg in updates['result']] await asyncio.gather(*tasks) asyncio.ensure_future(self.polling(last_update))
def main(): asyncio.set_event_loop(None) if args.iocp: from asyncio.windows_events import ProactorEventLoop loop = ProactorEventLoop() else: loop = asyncio.new_event_loop() sslctx = None if args.tls: sslctx = test_utils.dummy_ssl_context() cache = CacheClient(args.host, args.port, sslctx=sslctx, loop=loop) try: loop.run_until_complete( asyncio.gather( *[testing(i, cache, loop) for i in range(args.ntasks)], loop=loop)) finally: loop.close()
def test_gather_shield(self): child1 = asyncio.Future(loop=self.loop) child2 = asyncio.Future(loop=self.loop) inner1 = asyncio.shield(child1, loop=self.loop) inner2 = asyncio.shield(child2, loop=self.loop) parent = asyncio.gather(inner1, inner2, loop=self.loop) test_utils.run_briefly(self.loop) parent.cancel() # This should cancel inner1 and inner2 but bot child1 and child2. test_utils.run_briefly(self.loop) self.assertIsInstance(parent.exception(), asyncio.CancelledError) self.assertTrue(inner1.cancelled()) self.assertTrue(inner2.cancelled()) child1.set_result(1) child2.set_result(2) test_utils.run_briefly(self.loop)
def test_one_exception(self): a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)] fut = asyncio.gather(*self.wrap_futures(a, b, c, d, e)) cb = test_utils.MockCallback() fut.add_done_callback(cb) exc = ZeroDivisionError() a.set_result(1) b.set_exception(exc) self._run_loop(self.one_loop) self.assertTrue(fut.done()) cb.assert_called_once_with(fut) self.assertIs(fut.exception(), exc) # Does nothing c.set_result(3) d.cancel() e.set_exception(RuntimeError()) e.exception()
def test_one_cancellation(self): a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)] fut = asyncio.gather(a, b, c, d, e) cb = test_utils.MockCallback() fut.add_done_callback(cb) a.set_result(1) b.cancel() self._run_loop(self.one_loop) self.assertTrue(fut.done()) cb.assert_called_once_with(fut) self.assertFalse(fut.cancelled()) self.assertIsInstance(fut.exception(), asyncio.CancelledError) # Does nothing c.set_result(3) d.cancel() e.set_exception(RuntimeError()) e.exception()
def test_result_exception_one_cancellation(self): a, b, c, d, e, f = [asyncio.Future(loop=self.one_loop) for i in range(6)] fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True) cb = test_utils.MockCallback() fut.add_done_callback(cb) a.set_result(1) zde = ZeroDivisionError() b.set_exception(zde) c.cancel() self._run_loop(self.one_loop) self.assertFalse(fut.done()) d.set_result(3) e.cancel() rte = RuntimeError() f.set_exception(rte) res = self.one_loop.run_until_complete(fut) self.assertIsInstance(res[2], asyncio.CancelledError) self.assertIsInstance(res[4], asyncio.CancelledError) res[2] = res[4] = None self.assertEqual(res, [1, zde, None, 3, None, rte]) cb.assert_called_once_with(fut)
def test_exception_marking(self): # Test for the first line marked "Mark exception retrieved." @asyncio.coroutine def inner(f): yield from f raise RuntimeError('should not be ignored') a = asyncio.Future(loop=self.one_loop) b = asyncio.Future(loop=self.one_loop) @asyncio.coroutine def outer(): yield from asyncio.gather(inner(a), inner(b), loop=self.one_loop) f = asyncio.async(outer(), loop=self.one_loop) test_utils.run_briefly(self.one_loop) a.set_result(None) test_utils.run_briefly(self.one_loop) b.set_result(None) test_utils.run_briefly(self.one_loop) self.assertIsInstance(f.exception(), RuntimeError)
def main(): async def job1(): job1.counter += 1 if job1.counter == 10: future1.cancel() return print('job1:', job1.counter, datetime.datetime.now()) async def job2(): job2.counter += 1 if job2.counter == 10: future2.cancel() return print('job2:', job2.counter, datetime.datetime.now()) job1.counter = 0 job2.counter = -10 loop = asyncio.get_event_loop() future1 = aschedule.every(job1, seconds=1) future2 = aschedule.every(job2, seconds=1) loop.run_until_complete(asyncio.gather(future1, future2, return_exceptions=True))
def test_every_param_loop(self): asyncio.set_event_loop(None) # scheduled executions 1, 3, 5, 7, 9 schedule = self.schedule_manager.every(self.get_coroutine, timedelta(seconds=2), datetime.now() + timedelta(seconds=1), loop=self.loop) # will be cancelled at cancel_in_seconds = 10 async def cancel_schedule(): await asyncio.sleep(cancel_in_seconds, loop=self.loop) self.schedule_manager.cancel(schedule, running_jobs=True) try: self.loop.run_until_complete( asyncio.gather(cancel_schedule(), schedule.future, loop=self.loop)) except asyncio.CancelledError: pass # making sure that all running jobs and the schedule are cancelled self.loop.run_until_complete(asyncio.sleep(10, loop=self.loop)) self.assertEqual(5, self.count) asyncio.set_event_loop(self.loop)
def test_once_at_param_coroutine(self): start = self.loop.time() times = [] cancel_in_seconds = 10 async def record_times(): times.append(round(self.loop.time() - start)) async def cancel_schedule(): await asyncio.sleep(cancel_in_seconds) # should report an error that the given schedule doesn't belong to this schedule manager # as the schedule is completed before this arises with self.assertRaises(aschedule.AScheduleException): self.schedule_manager.cancel(schedule, running_jobs=True) schedule = self.schedule_manager.once_at(record_times, datetime.now() + timedelta(seconds=5)) self.loop.run_until_complete( asyncio.gather(cancel_schedule(), schedule.future)) # making sure that all running jobs and the schedule are cancelled self.loop.run_until_complete(asyncio.sleep(10)) self.assertEqual([5], times)
def test_once_at_param_loop(self): asyncio.set_event_loop(None) start = self.loop.time() times = [] cancel_in_seconds = 10 async def record_times(): times.append(round(self.loop.time() - start)) async def cancel_schedule(): await asyncio.sleep(cancel_in_seconds, loop=self.loop) # should report an error that the given schedule doesn't belong to this schedule manager # as the schedule is completed before this arises with self.assertRaises(aschedule.AScheduleException): self.schedule_manager.cancel(schedule, running_jobs=True) schedule = self.schedule_manager.once_at(record_times, datetime.now() + timedelta(seconds=5), loop=self.loop) self.loop.run_until_complete( asyncio.gather(cancel_schedule(), schedule.future, loop=self.loop)) # making sure that all running jobs and the schedule are cancelled self.loop.run_until_complete(asyncio.sleep(10, loop=self.loop)) self.assertEqual([5], times) asyncio.set_event_loop(self.loop)
def req_handle(): ua=UserAgent() def do_req(u): return requests.get(u, headers={'user-agent': ua.random}) with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) as executor: loop = asyncio.get_event_loop() futures = [ loop.run_in_executor(executor, do_req, HOST+w) for w in words ] for response in await asyncio.gather(*futures): if response.status_code < 400: if response.url[-1] == '/': print("--DIR: %s - %i" % (response.url, response.status_code)) else: print("%s - %i (%i bytes)" % (response.url, response.status_code, len(response.content))) pass
def _setup(self): if self._session is None: logger.debug("Creating session") self._session = aiohttp.ClientSession() # this will allow requests to be made starting from this point self.setup.early.set_result(True) init_tasks = self.init_tasks if callable(init_tasks): init_tasks = init_tasks() if init_tasks: logger.debug("Starting init tasks") await asyncio.gather(*init_tasks) self.setup.set_result(True)
def test_call_complicated(self): async def call_basic(): print("Calling...") class _Args: dbus_signature = "a{sv}" dbus_value = {"d": 10, "c": "a1234"} value = await adbus.client.call( self.service, "adbus.test", "/adbus/test/Tests1", "adbus.test", "ComplicatedArgs", args=[_Args], timeout_ms=6000 ) print(f"Returned {value}") self.loop.run_until_complete(asyncio.gather(call_basic(), ))
def broadcastToMiners(self, block): self.log.info("broadcasting block to miners: " + repr(block)) coros = [peer.sendMinerBlock(block) for id, peer in self.peers.items() if peer.role == 'M'] f = gather(*coros, loop=self.loop) loop = self.loop if loop.is_running(): future = asyncio.run_coroutine_threadsafe(f, loop) res = future.result(30) else: res = loop.run_until_complete(f) return res # send block to peers
def create_connection(event_loop): conns = [] @asyncio.coroutine def f(*args, **kwargs): kwargs.setdefault('loop', event_loop) conn = yield from aiossdb.create_connection(*args, **kwargs) # ??????????????????????yield?????return # ??????????????? conns.append(conn) return conn try: yield f finally: waiters = [] while conns: conn = conns.pop() conn.close() waiters.append(conn.wait_closed()) if waiters: event_loop.run_until_complete(asyncio.gather(*waiters, loop=event_loop))
def create_connection_pool(event_loop): pools = [] @asyncio.coroutine def f(*args, **kwargs): kwargs.setdefault('loop', event_loop) pool = yield from aiossdb.create_pool(*args, **kwargs) # ??????????????????????yield?????return # ??????????????? pools.append(pool) return pool try: yield f finally: waiters = [] while pools: conn = pools.pop() conn.close() waiters.append(conn.wait_closed()) if waiters: event_loop.run_until_complete(asyncio.gather(*waiters, loop=event_loop))
def run(self): """This is run for each step in a sweep.""" for dig in self.digitizers: dig.acquire() await asyncio.sleep(0.75) if not self.cw_mode: for awg in self.awgs: awg.run() # Wait for all of the acquisitions to complete timeout = 10 try: await asyncio.gather(*[dig.wait_for_acquisition(timeout) for dig in self.digitizers]) except Exception as e: logger.error("Received exception %s in run loop. Bailing", repr(e)) self.shutdown() sys.exit(0) for dig in self.digitizers: dig.stop() if not self.cw_mode: for awg in self.awgs: awg.stop()
def daemon(): try_init_cgroup() async with VJ4Session(config['server_url']) as session: while True: try: await session.login_if_needed(config['uname'], config['password']) done, pending = await wait([do_judge(session), do_noop(session)], return_when=FIRST_COMPLETED) for task in pending: task.cancel() await gather(*done) except Exception as e: logger.exception(e) logger.info('Retrying after %d seconds', RETRY_DELAY_SEC) await sleep(RETRY_DELAY_SEC)
def test_on_event(client): """Test event handler registration and removal.""" mock_calls = [] def callback(event, data): """Mock callback.""" mock_calls.append((event, data)) remove = client.on('location:updated', callback) client._handle_event('location:updated', 'mock_data_1') tasks = asyncio.Task.all_tasks(client.loop) client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop)) assert len(mock_calls) == 1 assert mock_calls[0] == ('location:updated', 'mock_data_1') mock_calls = [] remove() client._handle_event('location:updated', 'mock_data_1') tasks = asyncio.Task.all_tasks(client.loop) client.loop.run_until_complete(asyncio.gather(*tasks, loop=client.loop)) assert len(mock_calls) == 0
def _update_user_reputation(database_config, push_urls, signing_key, reviewee_id): con = await asyncpg.connect(**database_config) score, count, avg, _ = await calculate_user_reputation(con, reviewee_id) await con.close() body = json.dumps({ "toshi_id": reviewee_id, "review_count": count, "average_rating": avg, "reputation_score": score }) address = private_key_to_address(signing_key) futs = [] for push_url in push_urls: futs.append(do_push(push_url, body, address, signing_key, reviewee_id)) await asyncio.gather(*futs)
def data_model_csv(): path_to = join(settings.DATA_PATH, "incoming") filenames = multi_filenames(path_to_history=path_to, csv=True) cnt = len(filenames) batch_size = int(cnt/settings.CPUS) diff = cnt - (settings.CPUS * batch_size) def start_loop(loop, filenames): set_event_loop(loop) loop.run_until_complete(gather(*[make_initial_file(filename=filename) \ for filename in filenames], return_exceptions=True)) processes = [] for cpu in range(settings.CPUS): if (cpu+1) == settings.CPUS: p = Process(target=start_loop, args=(new_event_loop(), filenames[cpu*batch_size:(cpu+1)*batch_size+diff])) else: p = Process(target=start_loop, args=(new_event_loop(), filenames[cpu*batch_size:(cpu+1)*batch_size])) processes.append(p) p.start() for p in processes: p.join()
def read_failing(filenames, path_to, loop, list_failing): def start_loop(loop, filenames): set_event_loop(loop) loop.run_until_complete(gather(*[gather_bad_file(filename=filename, \ path_to=path_to, list_failing=list_failing) for filename in filenames])) cnt = len(filenames) batch_size = int(cnt/settings.CPUS) diff = cnt - (settings.CPUS * batch_size) processes = [] for cpu in range(settings.CPUS): if (cpu+1) == settings.CPUS: p = Process(target=start_loop, args=(new_event_loop(), filenames[cpu*batch_size:(cpu+1)*batch_size+diff])) else: p = Process(target=start_loop, args=(new_event_loop(), filenames[cpu*batch_size:(cpu+1)*batch_size])) processes.append(p) p.start() for p in processes: p.join() return list_failing
def data_checker(loop): list_failing = [] p = join(settings.DATA_PATH, "incoming") filenames = multi_filenames(path_to_history=p, csv=True) list_failing = read_failing(filenames=filenames, path_to=p, loop=loop, list_failing=list_failing) if settings.SHOW_DEBUG: print("Failing symbols: {}\n".format(list_failing)) cnt = len(list_failing) print("Failing number: {}\n".format(cnt)) if (cnt > 0) & (cnt < 10): subject = "Failing datafiles: {}".format(cnt) message = "{0}\n\n".format(list_failing) sender = settings.DEFAULT_FROM_EMAIL send_mail(subject, message, sender, settings.NOTIFICATIONS_EMAILS) loop.run_until_complete(gather(*[clean_failed_file(path_to=p, \ file_name=file_name) for file_name in list_failing], return_exceptions=True ))
def bootstrap(self): try: self.log.warning('Starting bootstrap phase 1.') await self.bootstrap_one() except CancelledError: raise except Exception: self.log.exception('An exception occurred during bootstrap phase 1.') try: self.log.warning('Starting bootstrap phase 2.') await self.bootstrap_two() except CancelledError: raise except Exception: self.log.exception('An exception occurred during bootstrap phase 2.') self.log.warning('Starting bootstrap phase 3.') unknowns = list(spawns.unknown) shuffle(unknowns) tasks = (self.try_again(point) for point in unknowns) await gather(*tasks, loop=LOOP) self.log.warning('Finished bootstrapping.')
def bootstrap_one(self): async def visit_release(worker, num, *args): async with self.coroutine_semaphore: async with worker.busy: point = get_start_coords(num, *args) self.log.warning('start_coords: {}', point) self.visits += await worker.bootstrap_visit(point) if bounds.multi: areas = [poly.polygon.area for poly in bounds.polygons] area_sum = sum(areas) percentages = [area / area_sum for area in areas] tasks = [] for i, workers in enumerate(percentage_split( self.workers, percentages)): grid = best_factors(len(workers)) tasks.extend(visit_release(w, n, grid, bounds.polygons[i]) for n, w in enumerate(workers)) else: tasks = (visit_release(w, n) for n, w in enumerate(self.workers)) await gather(*tasks, loop=LOOP)
def on_record_change(self, e): rdoc = e['value'] for key, value in self.query.items(): if rdoc[key] != value: return if rdoc['tid']: show_status, tdoc = await self.rdoc_contest_visible(rdoc) if not show_status: return # TODO(iceboy): projection. udoc, pdoc = await asyncio.gather(user.get_by_uid(rdoc['uid']), problem.get(rdoc['domain_id'], rdoc['pid'])) # check permission for visibility: hidden problem if pdoc.get('hidden', False) and (pdoc['domain_id'] != self.domain_id or not self.has_perm(builtin.PERM_VIEW_PROBLEM_HIDDEN)): pdoc = None self.send(html=self.render_html('record_main_tr.html', rdoc=rdoc, udoc=udoc, pdoc=pdoc))
def post_change_mail(self, *, current_password: str, mail: str): validator.check_mail(mail) udoc, mail_holder_udoc = await asyncio.gather( user.check_password_by_uid(self.user['_id'], current_password), user.get_by_mail(mail)) # TODO(twd2): raise other errors. if not udoc: raise error.CurrentPasswordError(self.user['uname']) if mail_holder_udoc: raise error.UserAlreadyExistError(mail) rid, _ = await token.add(token.TYPE_CHANGEMAIL, options.changemail_token_expire_seconds, uid=udoc['_id'], mail=mail) await self.send_mail(mail, 'Change Email', 'user_changemail_mail.html', url=self.reverse_url('user_changemail_with_code', code=rid), uname=udoc['uname']) self.render('user_changemail_mail_sent.html')
def _post_judge(handler, rdoc): accept = rdoc['status'] == constant.record.STATUS_ACCEPTED bus.publish_throttle('record_change', rdoc, rdoc['_id']) post_coros = list() # TODO(twd2): ignore no effect statuses like system error, ... if rdoc['type'] == constant.record.TYPE_SUBMISSION: if accept: post_coros.append(_send_ac_mail(handler, rdoc)) if rdoc['tid']: post_coros.append(contest.update_status(rdoc['domain_id'], rdoc['tid'], rdoc['uid'], rdoc['_id'], rdoc['pid'], accept, rdoc['score'])) if not rdoc.get('rejudged'): if await problem.update_status(rdoc['domain_id'], rdoc['pid'], rdoc['uid'], rdoc['_id'], rdoc['status']): if accept: # TODO(twd2): enqueue rdoc['pid'] to recalculate rp. await problem.inc(rdoc['domain_id'], rdoc['pid'], 'num_accept', 1) post_coros.append(domain.inc_user(rdoc['domain_id'], rdoc['uid'], num_accept=1)) else: # TODO(twd2): enqueue rdoc['pid'] to recalculate rp. await job.record.user_in_problem(rdoc['uid'], rdoc['domain_id'], rdoc['pid']) post_coros.append(job.difficulty.update_problem(rdoc['domain_id'], rdoc['pid'])) await asyncio.gather(*post_coros)
def gather_map(map, coro, *args, **kwargs): _keys = [] _coros = [] for key, value in map: _keys.append(key) _coros.append(coro(value, *args, **kwargs)) _results = await asyncio.gather(*_coros) return zip(_keys, _results)
def _atexit_handler(): """ Wait for completion of the event loop """ loop = asyncio.get_event_loop() pending = asyncio.Task.all_tasks() loop.run_until_complete(asyncio.gather(*pending)) loop.close()