我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用asyncio.get_event_loop()。
def kas(argv): """ The main entry point of kas. """ create_logger() parser = kas_get_argparser() args = parser.parse_args(argv) if args.debug: logging.getLogger().setLevel(logging.DEBUG) logging.info('%s %s started', os.path.basename(sys.argv[0]), __version__) loop = asyncio.get_event_loop() for sig in (signal.SIGINT, signal.SIGTERM): loop.add_signal_handler(sig, interruption) atexit.register(_atexit_handler) for plugin in getattr(kasplugin, 'plugins', []): if plugin().run(args): return parser.print_help()
def waitFor(self, selectorOrFunctionOrTimeout: Union[str, int, float], options: dict = None, **kwargs: Any) -> Awaitable: """Wait until `selectorOrFunctionOrTimeout`.""" if options is None: options = dict() options.update(kwargs) if isinstance(selectorOrFunctionOrTimeout, (int, float)): fut: Awaitable[None] = asyncio.ensure_future( asyncio.sleep(selectorOrFunctionOrTimeout)) return fut if not isinstance(selectorOrFunctionOrTimeout, str): fut = asyncio.get_event_loop().create_future() fut.set_exception(TypeError( 'Unsupported target type: ' + str(type(selectorOrFunctionOrTimeout)) )) return fut if ('=>' in selectorOrFunctionOrTimeout or selectorOrFunctionOrTimeout.strip().startswith('function')): return self.waitForFunction(selectorOrFunctionOrTimeout, options) return self.waitForSelector(selectorOrFunctionOrTimeout, options)
def toTaskWCb(func): """ ???????????????????????? """ def makeUp(callback): def makeUps(*args, **kwargs): eventLoop = asyncio.get_event_loop() future = eventLoop.create_task(func(*args, **kwargs)) future.add_done_callback(callback) return future return makeUps return makeUp
def repos_fetch(config, repos): """ Fetches the list of repositories to the kas_work_dir. """ tasks = [] for repo in repos: if not hasattr(asyncio, 'ensure_future'): # pylint: disable=no-member,deprecated-method task = asyncio.async(_repo_fetch_async(config, repo)) else: task = asyncio.ensure_future(_repo_fetch_async(config, repo)) tasks.append(task) loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.wait(tasks)) for task in tasks: if task.result(): sys.exit(task.result())
def __init__( self, loop=None, max_frame_size=None, bakery_client=None, jujudata=None, ): '''Initialize a connector that will use the given parameters by default when making a new connection''' self.max_frame_size = max_frame_size self.loop = loop or asyncio.get_event_loop() self.bakery_client = bakery_client self._connection = None self.controller_name = None self.model_name = None self.jujudata = jujudata or FileJujuData()
def __init__(self, client=None, download_strategy=None, request_strategy=None): if not client: # Get the event loop and initialize a client session if not provided self.loop = asyncio.get_event_loop() self.client = aiohttp.ClientSession(loop=self.loop) else: # Or grab the event loop from the client session self.loop = client._loop self.client = client # Configuration objects managing download and request strategies self._download_strategy = download_strategy or DownloadStrategy() # chunk_size, home, skip_cached self._request_strategy = request_strategy or Lenient() # concurrent, max_attempts, timeout # Bounded semaphore guards how many requests can run concurrently self._main_semaphore = asyncio.BoundedSemaphore(self._request_strategy.concurrent)
def defcallback(task): """ Not-so-simple-callback: log what happened to STDOUT and schedule any task dependency. """ if task.cancelled(): print(f'[task {id(task)}] was cancelled :-(') elif task.exception() is not None: ex = task.exception() ex_type = ex.__class__.__name__ print(f'[task {id(task)}] raised "{ex_type}({ex})"') elif task.done(): print(f'[task {id(task)}] returned {task.result()}') if hasattr(task, 'children'): loop = asyncio.get_event_loop() for coroutine in task.children: print(f'[task {id(task)}] scheduling child coroutine') task = loop.create_task(coroutine) task.add_done_callback(defcallback) else: print(f'[task {id(task)}]: we do not know what happened :-\\')
def send(self, method: str, params: dict = None) -> dict: """Send message to the connected session.""" self._lastId += 1 _id = self._lastId msg = json.dumps(dict(id=_id, method=method, params=params)) callback = asyncio.get_event_loop().create_future() self._callbacks[_id] = callback callback.method: str = method # type: ignore if not self._connection: raise NetworkError('Connection closed.') await self._connection.send('Target.sendMessageToTarget', { 'sessionId': self._sessionId, 'message': msg, }) return await callback
def test_make_call(self): connection = AsyncConnection( TEST_USERNAME, TEST_SECRET, EMARSYS_URI ) with aioresponses() as m: m.get( urljoin(EMARSYS_URI, 'api/v2/settings'), status=200, payload=EMARSYS_SETTINGS_RESPONSE ) coroutine = connection.make_call('GET', 'api/v2/settings') loop = asyncio.get_event_loop() response = loop.run_until_complete(coroutine) assert response == EMARSYS_SETTINGS_RESPONSE
def monitor(): """Wrapper to call console with a loop.""" devicelist = ( { "address": "3c4fc5", "cat": 0x05, "subcat": 0x0b, "firmware": 0x00 }, { "address": "43af9b", "cat": 0x02, "subcat": 0x1a, "firmware": 0x00 } ) log = logging.getLogger(__name__) loop = asyncio.get_event_loop() asyncio.async(console(loop, log, devicelist)) loop.run_forever()
def StartChunkRequesting(self): """Start running chunk selection algorithm""" if self._chunk_selction_handle != None: # Error in program logic somewhere raise Exception # Schedule the execution of selection alg if self.vod: self._chunk_selction_handle = asyncio.get_event_loop().call_later( 1 / self._selection_rps, self.greedy_chunk_request) return if self.live and not self.live_src: self._chunk_selction_handle = asyncio.get_event_loop().call_later( 1 / self._selection_rps, self.greedy_chunk_request) else: self._chunk_selction_handle = asyncio.get_event_loop().call_later( 1 / self._selection_rps, self.ChunkRequest)
def main(): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') logging.info("LEDBAT TEST SINK starting") loop = asyncio.get_event_loop() listen = loop.create_datagram_endpoint(PeerProtocol, local_addr=("0.0.0.0", 6778)) transport, protocol = loop.run_until_complete(listen) if os.name == 'nt': def wakeup(): # Call again later loop.call_later(0.5, wakeup) loop.call_later(0.5, wakeup) try: loop.run_forever() except KeyboardInterrupt: pass
def __init__(self, args): self._peer_addr = (args.target_ip, 6778) self._transport = None self._send_handle = None self._stat_handle = None self._ledbat = LEDBAT() self._loop = asyncio.get_event_loop() self._next_id = 1 self._in_flight = set() self._ret_control = collections.deque(5*[None], 5) self._start_time = None self._int_time = None self._sent_data = 0 self._int_data = 0 self._num_retrans = 0 self._int_retrans = 0 self._delays = collections.deque(10*[None], 10)
def main(args): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') logging.info("LEDBAT TEST SOURCE starting. Target: {}".format(args.target_ip)) loop = asyncio.get_event_loop() listen = loop.create_datagram_endpoint(lambda: PeerProtocol(args), local_addr=("0.0.0.0", 6778)) transport, protocol = loop.run_until_complete(listen) if os.name == 'nt': def wakeup(): # Call again later loop.call_later(0.5, wakeup) loop.call_later(0.5, wakeup) try: loop.run_forever() except KeyboardInterrupt: pass
def HandleRequest(self, msg_request): """Handle incomming REQUEST message""" for x in range(msg_request.start_chunk, msg_request.end_chunk + 1): # Ignore requests for discarded chunks if x <= self._swarm._last_discarded_id: continue self.set_requested.add(x) # TODO: We might want a more intelligent ACK mechanism than this, but this works well for now self.set_sent.discard(x) if self._logger.isEnabledFor(logging.DEBUG): logging.debug("FROM > {0} > REQUEST: {1}".format(self._peer_num, msg_request)) # Try to send some data if self._sending_handle == None: self._sending_handle = asyncio.get_event_loop().call_soon(self.SendRequestedChunks)
def rconCmd(cmd): host = "localhost" port = 27015 try: with open("/tmp/factorioRcon", "r") as phraseFile: phrase = phraseFile.readline().strip() cmd = ' '.join(cmd) loop = asyncio.get_event_loop() conn = RconConnection(host, port, phrase) resp = loop.run_until_complete(conn.exec_command(cmd)) print(resp, end='') except FileNotFoundError: print("Cannot find the rcon password. Is the server running?") sys.exit(1)
def __init__(self, hostname="localhost", port="8080", processor=None): self.hostname = hostname self.port = port self.processor = processor factory = WebSocketServerFactory( u"ws://" + hostname + u":" + str(port)) protocol = EventProtocol protocol.processor = processor protocol.app = self factory.protocol = protocol self.loop = asyncio.get_event_loop() self.server = self.loop.create_server(factory, '0.0.0.0', port)
def loop(self): """ Return bot's main loop as coroutine. Use with asyncio. :Example: >>> loop = asyncio.get_event_loop() >>> loop.run_until_complete(bot.loop()) or >>> loop = asyncio.get_event_loop() >>> loop.create_task(bot.loop()) """ self._running = True while self._running: updates = await self.api_call( 'getUpdates', offset=self._offset + 1, timeout=self.api_timeout ) self._process_updates(updates)
def run_webhook(self, webhook_url, **options): """ Convenience method for running bots in webhook mode :Example: >>> if __name__ == '__main__': >>> bot.run_webhook(webhook_url="https://yourserver.com/webhooktoken") Additional documentation on https://core.telegram.org/bots/api#setwebhook """ loop = asyncio.get_event_loop() loop.run_until_complete(self.set_webhook(webhook_url, **options)) if webhook_url: url = urlparse(webhook_url) app = self.create_webhook_app(url.path, loop) host = os.environ.get('HOST', '0.0.0.0') port = int(os.environ.get('PORT', 0)) or url.port web.run_app(app, host=host, port=port)
def start(self): """ Called by SocketShark to initialize the server and prepare & run SocketShark. """ async def serve(websocket, path): client = Client(self.shark, websocket) await client.consumer_handler() config = self.shark.config loop = asyncio.get_event_loop() loop.run_until_complete(self.shark.prepare()) ssl_context = self.shark.get_ssl_context() start_server = websockets.serve(serve, config['WS_HOST'], config['WS_PORT'], ssl=ssl_context) self.server = loop.run_until_complete(start_server) self.shark.signal_ready() loop.run_until_complete(self.shark.run()) loop.run_forever() loop.run_until_complete(self.shutdown()) self.shark.signal_shutdown()
def scrape_archives(url, scrape_function, min_date, max_date, user_agent, min_timedelta=None, concurrency=5): """ Scrape the archives of the given URL. The min_date and start_date parameters allow to restrict the archives to a given period. A minimum time delta between two archives can be specified with the timedelta parameter. The concurrency parameter limits the number of concurrent connections to the web archive. """ # Get the list of archive available for the given url archive_timestamps = list_archive_timestamps(url, min_date, max_date, user_agent) # Filter the timestamps to have a minimum timedelta between each timestamp if min_timedelta and len(archive_timestamps): archive_timestamps = timedelta_filter(archive_timestamps, min_timedelta) loop = asyncio.get_event_loop() # Scrape each archives asynchronously and gather the results scraping_task = loop.create_task(run_scraping(url, archive_timestamps, scrape_function, concurrency, user_agent)) try: loop.run_until_complete(scraping_task) finally: loop.close() return scraping_task.result()
def run(self): missing_contextes = [ c for spec in self.current.job_specs.values() for c in self.current.jobs[spec.name].list_contexts(spec) if c not in self.current.statuses ] loop = asyncio.get_event_loop() tasks = [ loop.create_task( self.current.last_commit.maybe_update_status( dict( context=context, description='Backed', state='pending', ) ) ) for context in missing_contextes ] yield from asyncio.gather(*tasks)
def __init__(self, name='GenericService', spec=None, plugins=None, config=None, parser=None, serializer=None, base_path='', loop=None, logger=None): self._plugins = [] self.logger = logger or logging.getLogger('serviceClient.{}'.format(name)) self.name = name self.spec = spec or {} self.add_plugins(plugins or []) self.config = config or {} self.parser = parser or (lambda x, *args, **kwargs: x) self.serializer = serializer or (lambda x, *args, **kwargs: x) self.base_path = base_path self.loop = loop or get_event_loop() self.connector = TCPConnector(loop=self.loop, **self.config.get('connector', {})) self.session = ClientSession(connector=self.connector, loop=self.loop, response_class=self.create_response, **self.config.get('session', {}))
def __init__(self, ip, port, repository: Repository): self._ip = ip self._port = port self._loop = asyncio.get_event_loop() self._client_protocols = {} self._service_protocols = {} self._repository = repository self._tcp_pingers = {} self._http_pingers = {} self.logger = logging.getLogger() try: config = json_file_to_dict('./config.json') self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) self._ssl_context.load_cert_chain(config['SSL_CERTIFICATE'], config['SSL_KEY']) except: self._ssl_context = None
def periodic_stats_logger(cls): logd = defaultdict(lambda: 0) logd['hostname'] = cls.hostname logd['service_name'] = cls.name for key, value in cls.http_stats.items(): logd[key] += value logd['http_' + key] = value for key, value in cls.tcp_stats.items(): logd[key] += value logd['tcp_' + key] = value _logger = logging.getLogger('stats') _logger.info(dict(logd)) asyncio.get_event_loop().call_later(120, cls.periodic_stats_logger)
def __init__(self, handler, interval, timeout, loop=None, max_failures=5): """ Aysncio based pinger :param handler: Pinger uses it to send a ping and inform when timeout occurs. Must implement send_ping() and on_timeout() methods :param int interval: time interval between ping after a pong :param loop: Optional event loop """ self._handler = handler self._interval = interval self._timeout = timeout self._loop = loop or asyncio.get_event_loop() self._timer = None self._failures = 0 self._max_failures = max_failures self.logger = logging.getLogger()
def __init__(self, *callbacks): self._logger = Log.get('uchroma.devicemanager') self._devices = OrderedDict() self._monitor = False self._udev_context = Context() self._udev_observer = None self._callbacks = [] if callbacks is not None: self._callbacks.extend(callbacks) self._loop = asyncio.get_event_loop() self.device_added = Signal() self.device_removed = Signal() self.discover()
def _close_input_devices(self): if not hasattr(self, '_opened') or not self._opened: return self._opened = False for event_device in self._event_devices: asyncio.get_event_loop().remove_reader(event_device.fileno()) event_device.close() tasks = [] for task in self._tasks: if not task.done(): task.cancel() tasks.append(task) await asyncio.wait(tasks, return_when=futures.ALL_COMPLETED) self._event_devices.clear()
def ensure_future(coro, loop=None): """ Wrapper for asyncio.ensure_future which dumps exceptions """ if loop is None: loop = asyncio.get_event_loop() fut = asyncio.ensure_future(coro, loop=loop) def exception_logging_done_cb(fut): try: e = fut.exception() except asyncio.CancelledError: return if e is not None: loop.call_exception_handler({ 'message': 'Unhandled exception in async future', 'future': fut, 'exception': e, }) fut.add_done_callback(exception_logging_done_cb) return fut
def animate(self, start: float, end: float, done_cb=None): """ Executes the given callback over the period of max_time at the given FPS, to animate from start to end. This can be used for things like brightness levels. :param start: Starting value :param end: Ending value """ if asyncio.get_event_loop().is_running(): if self._task is not None: self._task.cancel() self._task = ensure_future(self._animate(start, end)) if done_cb is not None: self._task.add_done_callback(done_cb) else: self._callback(end)
def get_treasure_map(self, policy_group): dht_key = policy_group.treasure_map_dht_key() ursula_coro = self.server.get(dht_key) event_loop = asyncio.get_event_loop() packed_encrypted_treasure_map = event_loop.run_until_complete(ursula_coro) _signature_for_ursula, pubkey_sig_alice, hrac, encrypted_treasure_map = dht_value_splitter( packed_encrypted_treasure_map[5::], msgpack_remainder=True) verified, cleartext = self.verify_from(self.alice, encrypted_treasure_map, signature_is_on_cleartext=True, decrypt=True) alices_signature, packed_node_list = BytestringSplitter(Signature)(cleartext, return_remainder=True) if not verified: return NOT_FROM_ALICE else: from nkms.policy.models import TreasureMap self.treasure_maps[policy_group.hrac] = TreasureMap(msgpack.loads(packed_node_list)) return self.treasure_maps[policy_group.hrac]
def test_vladimir_illegal_interface_key_does_not_propagate(ursulas): """ Although Ursulas propagate each other's interface information, as demonstrated above, they do not propagate interface information for Vladimir, an Evil Ursula. """ vladimir = ursulas[0] ursula = ursulas[1] # Ursula hasn't seen any illegal keys. assert ursula.server.protocol.illegal_keys_seen == [] # Vladimir does almost everything right.... value = vladimir.interface_dht_value() # Except he sets an illegal key for his interface. illegal_key = "Not allowed to set arbitrary key for this." setter = vladimir.server.set(key=illegal_key, value=value) loop = asyncio.get_event_loop() loop.run_until_complete(setter) # Now Ursula has seen an illegal key. assert digest(illegal_key) in ursula.server.protocol.illegal_keys_seen
def main(args): if args.debug: logger = logging.getLogger('asyncio') logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) example = Example() example.prepare(args.path) loop = asyncio.get_event_loop() if args.debug: loop.set_debug(True) setup_signal_handlers(loop, example) example.run(loop, args.events) try: loop.run_forever() finally: loop.close()
def create_task(self,coro_obj): ''' wrapper for creating a task that can be used for waiting until a task has started. :param coro_obj: coroutine object to schedule :param loop: event loop :returns: a two element tuple where the first element is the task object. Awaiting on this will return when the coroutine object is done executing. The second element is a future that becomes done when the coroutine object is started. .. note:: must only be called from within the thread where the event loop resides ''' loop = asyncio.get_event_loop() async def task_wrapper(coro_obj, launched): launched.set_result(True) await coro_obj launched = loop.create_future() return loop.create_task(task_wrapper(coro_obj, launched)), launched
def register_producer(self, *, exchange_name, exchange_type): ''' Register a producer on the channel by providing information to the broker about the exchange the channel is going to use. :param exchange_name: name of the exchange :type exchange_name: str :param exchange_type: Type of the exchange. Accepted values are "direct", "topic" or "fanout" :type exchange_type: str :returns: None ''' loop = asyncio.get_event_loop() func = partial(self._register_producer, exchange_name=exchange_name, exchange_type=exchange_type) await loop.run_in_executor(None, func)
def __init__(self, *, host, port): ''' Add the broker to the registry. Each broker is given a unique name of "host_port" in the registry. :param host: the hostname of the broker you wish to connect to :type host: str :param port: the port of the broker you wish to connect to :type port: int ''' self.host = host self.port = port self.name = "{}_{}".format(self.host, self.port) broker_registry[self.name] = self self.loop = asyncio.get_event_loop()
def default(self, line, *args, **kwargs): async def query(): data = await self.request.query(line) if self.print_full_response: print('*' * 80) print('Full Response: \n') pprint.pprint(data) print('*' * 80) print('\n') table = Tabulate(data) print(table.draw()) asyncio.get_event_loop().run_until_complete(query())
def test_query_hist_data(self): async def run(loop, query_parms, blk): engine = await aiosa.create_engine( user=self.db_info['user'], db=self.db_info['db'], host=self.db_info['host'], password=self.db_info['password'], loop=loop) # Insert and Query await insert_hist_data(engine, query_parms[0], blk) blk = await query_hist_data(engine, *query_parms) engine.close() await engine.wait_closed() return blk # Execute and verify query self._clear_db() init_db(self.db_info) blk_source = MarketDataBlock(testdata_query_hist_data[0]) query_parms = testdata_query_hist_data[1] loop = asyncio.get_event_loop() blk = loop.run_until_complete(run(loop, query_parms, blk_source)) assert_frame_equal(blk.df, blk_source.df.loc(axis=0)[ :, :, :, query_parms[-2]:query_parms[-1]])
def tool_start(func, parser=None, include_stage=False): if not parser: parser = OptionParser() parser.add_option('-s', '--stage', dest="stage", default='dev', choices=['live', 'dev']) (options, args) = parser.parse_args() loop = asyncio.get_event_loop() print("Configuring database connections...") loop.run_until_complete(prepare_configs(None, app, loop)) conf = app.configs[options.stage] opts = vars(options) if not include_stage: opts.pop('stage') print("Starting...") loop.run_until_complete(func(conf, **opts)) app.http.close()
def aAsync(func, *args, **kwargs): """ future ????????????awaitable??? ?? future = aAsync(requests.get, 'http://www.xxx.com', headers=headers) data = yield from future """ # run_in_evecutor???**kwargs. # args?????kwargs????? # ???? run_in_evecutor(None, makeUp, args, kwargs) # ???????*??*????????? def makeUp(args, kwargs): return func(*args, **kwargs) eventLoop = asyncio.get_event_loop() future = eventLoop.run_in_executor(None, makeUp, args, kwargs) return future
def __init__(self, auth_url, username, tenant, loop=None, log=None, cafile=None, token_renew_delay=3300): self.auth_url = auth_url self.username = username self.tenant = tenant self.log = log self.token_renew_delay = token_renew_delay self.loop = loop or asyncio.get_event_loop() self.headers = {"content-type": "application/json", "accept": "application/json"} if cafile: sslcontext = ssl.create_default_context(cafile=cafile) conn = aiohttp.TCPConnector(ssl_context=sslcontext) self.session = aiohttp.ClientSession(connector=conn, loop=self.loop) else: session = aiohttp.ClientSession(loop=self.loop)
def __init__(self, dbcon: DBConnection, notification_manager: NotificationManager, max_concurrent_jobs: int, *, debug_mode: bool=False, loop: asyncio.AbstractEventLoop=None) -> None: self.loop = loop or asyncio.get_event_loop() self.dbcon = dbcon self.notification_manager = notification_manager self.max_concurrent_jobs = max_concurrent_jobs self.debug_mode = debug_mode if debug_mode: log.debug('Debug mode active, all monitors will be started immediately') self.monitor_defs = {} # type: Dict[int, ActiveMonitorDef] self.monitors = {} # type: Dict[int, ActiveMonitor] self.num_running_jobs = 0 stats.set('total_jobs_run', 0, 'ACT_MON') stats.set('cur_running_jobs', 0, 'ACT_MON') stats.set('num_monitors', 0, 'ACT_MON') stats.set('jobs_deferred', 0, 'ACT_MON') stats.set('checks_up', 0, 'ACT_MON') stats.set('checks_down', 0, 'ACT_MON') stats.set('checks_unknown', 0, 'ACT_MON')
def setup(config={}): """Setup a node.""" config = Config(config=config) start_logger() logger = logging.getLogger(__name__) loop = asyncio.get_event_loop() orchestrator = Orchestrator() coro = loop.create_datagram_endpoint(lambda: PeerProtocol(orchestrator), local_addr=config.address) transport, _ = loop.run_until_complete(coro) orchestrator.peer_transport = transport coro = loop.create_server(lambda: ClientProtocol(orchestrator), *config.address) server = loop.run_until_complete(coro) logger.info('Serving on %s', config.address) return server
def main(): """Wait for all checkboxes to get checked off""" try: github_access_token = os.environ['GITHUB_ACCESS_TOKEN'] except KeyError: raise Exception("Missing GITHUB_ACCESS_TOKEN") parser = argparse.ArgumentParser() parser.add_argument("repo") parser.add_argument("--org", default="mitodl") args = parser.parse_args() if "." in args.repo or "/" in args.repo: raise Exception("repo is just the repo name, not a URL or directory (ie 'micromasters')") loop = asyncio.get_event_loop() loop.run_until_complete(wait_for_checkboxes(github_access_token, args.org, args.repo)) loop.close()
def _atexit_handler(): """ Wait for completion of the event loop """ loop = asyncio.get_event_loop() pending = asyncio.Task.all_tasks() loop.run_until_complete(asyncio.gather(*pending)) loop.close()
def run_cmd(cmd, cwd, env=None, fail=True, shell=False, liveupdate=True): """ Runs a command synchronously. """ # pylint: disable=too-many-arguments loop = asyncio.get_event_loop() (ret, output) = loop.run_until_complete( run_cmd_async(cmd, cwd, env, fail, shell, liveupdate)) if ret and fail: sys.exit(ret) return (ret, output)
def __init__(self, process_name): """ The actual instance of the controller. :param process_name: EnvironmentProcess class specific for this process. :type process_name: str """ # Initiate all the core components. self.process_name = process_name self.loop = asyncio.get_event_loop() self.game = Game self.gbx = GbxClient.create_from_settings(self, settings.DEDICATED[self.process_name]) self.db = Database.create_from_settings(self, settings.DATABASES[self.process_name]) self.storage = Storage.create_from_settings(self, settings.STORAGE[self.process_name]) self.signals = SignalManager self.ui_manager = GlobalUIManager(self) self.apps = Apps(self) # Contrib components. self.map_manager = MapManager(self) self.player_manager = PlayerManager(self) self.permission_manager = PermissionManager(self) self.command_manager = CommandManager(self) self.setting_manager = GlobalSettingManager(self) self.mode_manager = ModeManager(self) self.chat_manager = self.chat = ChatManager(self) # Populate apps. self.apps.populate(settings.MANDATORY_APPS, in_order=True) try: self.apps.populate(settings.APPS[self.process_name]) except KeyError as e: raise ImproperlyConfigured( 'One of the pool names doesn\'t reflect into the APPS setting! You must ' 'declare the apps per pool! ({})'.format(str(e)) )
def __init__(self, host, port, event_pool=None, user=None, password=None, api_version='2013-04-16', instance=None): """ Initiate the GbxRemote client. :param host: Host of the dedicated server. :param port: Port of the dedicated XML-RPC server. :param event_pool: Asyncio pool to execute the handling on. :param user: User to authenticate with, in most cases this is 'SuperAdmin' :param password: Password to authenticate with. :param api_version: API Version to use. In most cases you won't override the default because version changes should be abstracted by the other core components. :param instance: Instance of the app. :type host: str :type port: str int :type event_pool: asyncio.BaseEventPool :type user: str :type password: str :type api_version: str :type instance: pyplanet.core.instance.Instance """ self.host = host self.port = port self.user = user self.password = password self.api_version = api_version self.instance = instance self.dedicated_version = None self.dedicated_build = None self.event_loop = event_pool or asyncio.get_event_loop() self.gbx_methods = list() self.handlers = dict() self.handler_nr = 0x80000000 self.script_handlers = dict() self.reader = None self.writer = None self.loop_task = None
def __init__(self, argv): self.argv = argv or sys.argv[:] self.prog_name = os.path.basename(self.argv[0]) self.version = version self.loop = asyncio.get_event_loop() self.settings_exception = None self.commands = find_commands(__path__[0])