我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用asyncio.subprocess()。
def execute(self, code: str, message: Message) -> CodeOutput: dmepath = await self.make_project(code) proc = await create_subprocess_exec("DreamMaker", dmepath, stdout=asyncio.subprocess.PIPE) # type: asyncio.Process await proc.wait() data = await proc.stdout.read() # type: bytes compile_log = data.decode("ascii") # type: str out = CodeOutput() # type: CodeOutput out.compile_output = compile_log if proc.returncode: out.state = CodeHandlerState.failed_compile return out else: out.state = CodeHandlerState.compiled proc = await create_subprocess_exec("DreamDaemon", dmepath + "b", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) await proc.wait() data = await proc.stderr.read() + await proc.stdout.read() log = data.decode("ascii") # type: str out.output = log return out
def call_method( self, method, arg_signature, args, return_signature, returns ): cmd = 'busctl --user -- call ' cmd += f'{service_name} {object_path} {object_interface} {method}' cmd += f' "{arg_signature}"' for i in args: cmd += f' {i}' create = asyncio.create_subprocess_shell( cmd, stdout=asyncio.subprocess.PIPE ) proc = await create # Read one line of output data = await proc.stdout.readline() line = data.decode('ascii').rstrip() self.assertEqual(line, f'{return_signature} {returns}') await proc.wait()
def run_df(): print('in run_df') buffer = bytearray() create = asyncio.create_subprocess_exec( 'df', '-hl', stdout=asyncio.subprocess.PIPE, ) print('launching process') proc = await create print('process started {}'.format(proc.pid)) while True: line = await proc.stdout.readline() print('read {!r}'.format(line)) if not line: print('no more output from command') break buffer.extend(line) print('waiting for process to complete') await proc.wait() return_code = proc.returncode print('return code {}'.format(return_code)) if not return_code: cmd_output = bytes(buffer).decode() results = _parse_results(cmd_output) else: results = [] return (return_code, results)
def get_optional_nbd_args() -> List[str]: global _nbd_args if _nbd_args is not None: return _nbd_args log.debug('Detecting which optional features are supported in qemu-nbd.') help = subprocess.check_output(['qemu-nbd', '--help'], stderr=subprocess.STDOUT) base_args = [] # type: List[str] if b'--discard=' in help: log.info('Discard is supported by qemu-nbd.') base_args.append('--discard=unmap') if b'--detect-zeroes' in help: log.info('Zeroes detection is supported by qemu-nbd.') base_args.append('--detect-zeroes=unmap') else: log.warning('Zeroes detection is not supported by qemu-nbd.') else: log.warning('Discard is not supported by qemu-nbd.') return base_args
def convert_pdf_to_jpg(user_path, the_hash) -> str: tex_logger.debug("Converting to jpg...") os.chdir(user_path) conversion_process = await asyncio.create_subprocess_exec(*["gs", "-o", "{}.jpg".format(the_hash), "-sDEVICE=jpeg", "-dJPEGQ=100", "-r1000", "the_latex.pdf"], stdout=asyncio.subprocess.DEVNULL) try: await conversion_process.wait() except asyncio.CancelledError: tex_logger.debug("Conversion cancelled.\n") conversion_process.kill() raise os.chdir(os.path.dirname(user_path)) tex_logger.debug(" Converted.\n") return os.path.join(user_path, "{}.jpg".format(the_hash))
def __init__(self, config: JobConfig, retry_state: Optional[JobRetryState]) -> None: self.config = config self.proc = None # type: Optional[asyncio.subprocess.Process] self.retcode = None # type: Optional[int] self._stderr_reader = None # type: Optional[StreamReader] self._stdout_reader = None # type: Optional[StreamReader] self.stderr = None # type: Optional[str] self.stdout = None # type: Optional[str] self.stderr_discarded = 0 self.stdout_discarded = 0 self.execution_deadline = None # type: Optional[float] self.retry_state = retry_state self.env = None # type: Optional[Dict[str, str]] statsd_config = self.config.statsd if statsd_config is not None: self.statsd_writer = StatsdJobMetricWriter( host=statsd_config['host'], port=statsd_config['port'], prefix=statsd_config['prefix'], job=self, ) # type: Optional[StatsdJobMetricWriter] else: self.statsd_writer = None
def to_upper(input): print('in to_upper') create = asyncio.create_subprocess_exec( 'tr', '[:lower:]', '[:upper:]', stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE, ) print('launching process') proc = await create print('pid {}'.format(proc.pid)) print('communicating with process') stdout, stderr = await proc.communicate(input.encode()) print('waiting for process to complete') await proc.wait() return_code = proc.returncode print('return code {}'.format(return_code)) if not return_code: results = bytes(stdout).decode() else: results = '' return (return_code, results)
def setUpClass(cls): cls.server = subprocess.Popen( [ 'python', '-m', 'unittest', 'tests.test_server.Test.test_method_wait' ] ) time.sleep(3) cls.loop = asyncio.get_event_loop() cls.service = adbus.Service(bus='session')
def max_timeout(context, proc, timeout): """Make sure the proc pid's process and process group are killed. First, kill the process group (-pid) and then the pid. Args: context (scriptworker.context.Context): the scriptworker context. proc (subprocess.Process): the subprocess proc. This is compared against context.proc to make sure we're killing the right pid. timeout (int): Used for the log message. """ # We may be called with proc1. proc1 may finish, and proc2 may start # before this function is called. Make sure we're still running the # proc we were called with. if proc != context.proc: return pid = context.proc.pid log.warning("Exceeded timeout of {} seconds: {}".format(timeout, pid)) loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.wait([ asyncio.ensure_future(kill(-pid)), asyncio.ensure_future(kill(pid)) ])) # claim_work {{{1
def check_ownertrust(context, gpg_home=None): """In theory, this will repair a broken trustdb. Rebuild the trustdb via --import-ownertrust if not. Args: context (scriptworker.context.Context): the scriptworker context. gpg_home (str, optional): override the gpg_home with a different gnupg home directory here. Defaults to None. """ gpg_home = guess_gpg_home(context, gpg_home=gpg_home) gpg_path = guess_gpg_path(context) subprocess.check_call([gpg_path] + gpg_default_args(gpg_home) + ["--check-trustdb"])
def verify_signed_tag(context, tag, exec_function=subprocess.check_call): """Verify ``git_key_repo_dir`` is at the valid signed ``tag``. Args: context (scriptworker.context.Context): the scriptworker context. tag (str): the tag to verify. Raises: ScriptWorkerGPGException: if we're not updated to ``tag`` """ path = context.config['git_key_repo_dir'] try: exec_function(["git", "tag", "-v", tag], cwd=path) except subprocess.CalledProcessError as exc: raise ScriptWorkerGPGException( "Can't verify tag {} signature at {}!".format(tag, path) ) tag_revision = await get_git_revision(path, tag) head_revision = await get_git_revision(path, "HEAD") if tag_revision != head_revision: raise ScriptWorkerGPGException( "{}: Tag {} revision {} != current revision {}!".format( path, tag, tag_revision, head_revision ) ) # last_good_git_revision_file functions {{{1
def update(message: discord.Message): """ Update the bot by running `git pull`. """ await client.say(message, "```diff\n{}```".format(await utils.subprocess("git", "pull")))
def reset(message: discord.Message): """ **RESET THE HEAD** before updating. This removes all local changes done to the repository (excluding the .gitignore files). """ confirmed = await utils.confirm(message, "Are you sure you want to remove all local changes?") assert confirmed, "Aborted." await utils.subprocess("git", "reset", "--hard") await update(message)
def process(self, job: Job) -> Awaitable: self.log.debug(f'creating subprocess for {job}') return asyncio.create_subprocess_exec( job.exe, *job.args, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=str(job.cwd), loop=self.loop, **job.kw, )
def _kill_subprocess(self, proc): # type: (Optional[Process]) -> None """Helper method; send SIGTERM/SIGKILL to a subprocess. This method first sends SIGTERM to the subprocess. If the process hasn't terminated after a given timeout, it sends SIGKILL. Parameter --------- proc : Optional[Process] the process to attempt to terminate. If None, this method does nothing. """ if proc is not None: if proc.returncode is None: try: proc.terminate() try: await asyncio.shield(asyncio.wait_for(proc.wait(), self._cancel_timeout)) except CancelledError: pass if proc.returncode is None: proc.kill() try: await asyncio.shield(asyncio.wait_for(proc.wait(), self._cancel_timeout)) except CancelledError: pass except ProcessLookupError: pass
def batch_subprocess(self, proc_info_list): # type: (Sequence[ProcInfo]) -> Optional[Sequence[Union[int, Exception]]] """Run all given subprocesses in parallel. Parameters ---------- proc_info_list : Sequence[ProcInfo] a list of process information. Each element is a tuple of: args : Union[str, Sequence[str]] command to run, as string or list of string arguments. log : str log file name. env : Optional[Dict[str, str]] environment variable dictionary. None to inherit from parent. cwd : Optional[str] working directory path. None to inherit from parent. Returns ------- results : Optional[Sequence[Union[int, Exception]]] if user cancelled the subprocesses, None is returned. Otherwise, a list of subprocess return codes or exceptions are returned. """ num_proc = len(proc_info_list) if num_proc == 0: return [] coro_list = [self.async_new_subprocess(args, log, env, cwd) for args, log, env, cwd in proc_info_list] return batch_async_task(coro_list)
def batch_subprocess_flow(self, proc_info_list): # type: (Sequence[Sequence[FlowInfo]]) -> Optional[Sequence[Union[int, Exception]]] """Run all given subprocesses flow in parallel. Parameters ---------- proc_info_list : Sequence[Sequence[FlowInfo] a list of process flow information. Each element is a sequence of tuples of: args : Union[str, Sequence[str]] command to run, as string or list of string arguments. log : str log file name. env : Optional[Dict[str, str]] environment variable dictionary. None to inherit from parent. cwd : Optional[str] working directory path. None to inherit from parent. vfun : Sequence[Callable[[Optional[int], str], Any]] a function to validate if it is ok to execute the next process. The output of the last function is returned. The first argument is the return code, the second argument is the log file name. Returns ------- results : Optional[Sequence[Any]] if user cancelled the subprocess flows, None is returned. Otherwise, a list of flow return values or exceptions are returned. """ num_proc = len(proc_info_list) if num_proc == 0: return [] coro_list = [self.async_new_subprocess_flow(flow_info) for flow_info in proc_info_list] return batch_async_task(coro_list)
def create_pdf(user_path) -> None: tex_logger.debug("Creating pdf... ") os.chdir(user_path) pdflatex_process = await asyncio.create_subprocess_exec(*["pdflatex", "-no-shell-escape", "the_latex.tex"], stdout=asyncio.subprocess.DEVNULL) try: await pdflatex_process.wait() except asyncio.CancelledError: tex_logger.debug("PDF generation cancelled.") pdflatex_process.kill() raise os.chdir(os.path.dirname(user_path)) tex_logger.debug("Created.\n")
def copy_to_server(local_path, the_remote_path) -> None: tex_logger.debug("Copying to server... ") scp_process =\ await asyncio.create_subprocess_exec(*["scp", local_path, "{}@{}:{}".format(username, host, the_remote_path)], stdout=asyncio.subprocess.DEVNULL) try: await scp_process.wait() except asyncio.CancelledError: tex_logger.debug("Cancelled while uplading result to server.\n") scp_process.kill() raise tex_logger.debug("Copied.\n")
def get_width_and_height(user_path, the_hash) -> Tuple[int, int]: tex_logger.debug("Measuring result...") os.chdir(user_path) measurer_process = await asyncio.create_subprocess_exec(*["identify", "-format", r"%wx%h", "{}.jpg".format(the_hash)], stdout=asyncio.subprocess.PIPE) try: stdout_bytes, stderr_byets = await measurer_process.communicate() except asyncio.CancelledError: tex_logger.debug("Cancelled while measuring dimensions\n") measurer_process.kill() return regex = re.match(r"(\d+)x(\d+)", stdout_bytes.decode()) os.chdir(os.path.dirname(user_path)) tex_logger.debug(" Got {}x{}\n".format(regex.group(1), regex.group(2))) return regex.group(1), regex.group(2)
def sign_key(context, target_fingerprint, signing_key=None, exportable=False, gpg_home=None): """Sign the ``target_fingerprint`` key with ``signing_key`` or default key. This signs the target key with the signing key, which adds to the web of trust. Due to pexpect async issues, this function is once more synchronous. Args: context (scriptworker.context.Context): the scriptworker context. target_fingerprint (str): the fingerprint of the key to sign. signing_key (str, optional): the fingerprint of the signing key to sign with. If not set, this defaults to the default-key in the gpg.conf. Defaults to None. exportable (bool, optional): whether the signature should be exportable. Defaults to False. gpg_home (str, optional): override the gpg_home with a different gnupg home directory here. Defaults to None. Raises: ScriptWorkerGPGException: on a failed signature. """ args = [] gpg_path = guess_gpg_path(context) gpg_home = guess_gpg_home(context, gpg_home=gpg_home) message = "Signing key {} in {}".format(target_fingerprint, gpg_home) if signing_key: args.extend(['-u', signing_key]) message += " with {}...".format(signing_key) log.info(message) if exportable: args.append("--sign-key") else: args.append("--lsign-key") args.append(target_fingerprint) cmd_args = gpg_default_args(gpg_home) + args log.debug(subprocess.list2cmdline([gpg_path] + cmd_args)) child = pexpect.spawn(gpg_path, cmd_args, timeout=context.config['sign_key_timeout']) try: while True: index = child.expect([pexpect.EOF, b".*Really sign\? \(y/N\) ", b".*Really sign all user IDs\? \(y/N\) "]) if index == 0: break child.sendline(b'y') except (pexpect.exceptions.TIMEOUT): raise ScriptWorkerGPGException( "Failed signing {}! Timeout".format(target_fingerprint) ) child.close() if child.exitstatus != 0 or child.signalstatus is not None: raise ScriptWorkerGPGException( "Failed signing {}! exit {} signal {}".format( target_fingerprint, child.exitstatus, child.signalstatus ) ) # ownertrust {{{1
def update_ownertrust(context, my_fingerprint, trusted_fingerprints=None, gpg_home=None): """Trust my key ultimately; trusted_fingerprints fully. Args: context (scriptworker.context.Context): the scriptworker context. my_fingerprint (str): the fingerprint of the key we want to specify as ultimately trusted. trusted_fingerprints (list, optional): the list of fingerprints that we want to mark as fully trusted. These need to be signed by the my_fingerprint key before they are trusted. gpg_home (str, optional): override the gpg_home with a different gnupg home directory here. Defaults to None. Raises: ScriptWorkerGPGException: if there is an error. """ gpg_home = guess_gpg_home(context, gpg_home=gpg_home) log.info("Updating ownertrust in {}...".format(gpg_home)) ownertrust = [] trusted_fingerprints = list(set(trusted_fingerprints or [])) gpg_path = guess_gpg_path(context) trustdb = os.path.join(gpg_home, "trustdb.gpg") rm(trustdb) # trust my_fingerprint ultimately ownertrust.append("{}:6\n".format(my_fingerprint)) # Trust trusted_fingerprints fully. Once they are signed by my key, any # key they sign will be valid. Only do this for root/intermediate keys # that are intended to sign other keys. for fingerprint in trusted_fingerprints: if fingerprint != my_fingerprint: ownertrust.append("{}:5\n".format(fingerprint)) log.debug(pprint.pformat(ownertrust)) ownertrust = ''.join(ownertrust).encode('utf-8') cmd = [gpg_path] + gpg_default_args(gpg_home) + ["--import-ownertrust"] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) stdout = p.communicate(input=ownertrust)[0] or b'' if p.returncode: raise ScriptWorkerGPGException("gpg update_ownertrust error!\n{}".format(stdout.decode('utf-8'))) verify_ownertrust( context, my_fingerprint, trusted_fingerprints=trusted_fingerprints, gpg_home=gpg_home )
def verify_ownertrust(context, my_fingerprint, trusted_fingerprints=None, gpg_home=None): """Verify the ownertrust is exactly as expected. Args: context (scriptworker.context.Context): the scriptworker context. my_fingerprint (str): the fingerprint of the key we specified as ultimately trusted. trusted_fingerprints (list, optional): the list of fingerprints that we marked as fully trusted. gpg_home (str, optional): override the gpg_home with a different gnupg home directory here. Defaults to None. Raises: ScriptWorkerGPGException: if there is an error. """ gpg_home = guess_gpg_home(context, gpg_home=gpg_home) gpg_path = guess_gpg_path(context) expected = ['{}:6:'.format(my_fingerprint)] for fp in trusted_fingerprints: if fp != my_fingerprint: expected.append('{}:5:'.format(fp)) expected = set(expected) real = [] output = subprocess.check_output( [gpg_path] + gpg_default_args(gpg_home) + ["--export-ownertrust"], stderr=subprocess.STDOUT ).decode('utf-8') for line in output.split('\n'): if line and not line.startswith('#'): real.append(line) real = set(real) messages = [] extra = real.difference(expected) missing = expected.difference(real) if extra: messages.append("Extra trust lines!\n{}".format(extra)) if missing: messages.append("Missing trust lines!\n{}".format(missing)) if messages: raise ScriptWorkerGPGException( "{}\n{}".format('\n'.join(messages), output) ) # data signatures and verification {{{1
def async_new_subprocess(self, args, log, env=None, cwd=None): # type: (Union[str, Sequence[str]], str, Optional[Dict[str, str]], Optional[str]) -> Optional[int] """A coroutine which starts a subprocess. If this coroutine is cancelled, it will shut down the subprocess gracefully using SIGTERM/SIGKILL, then raise CancelledError. Parameters ---------- args : Union[str, Sequence[str]] command to run, as string or sequence of strings. log : str the log file name. env : Optional[Dict[str, str]] an optional dictionary of environment variables. None to inherit from parent. cwd : Optional[str] the working directory. None to inherit from parent. Returns ------- retcode : Optional[int] the return code of the subprocess. """ if isinstance(args, str): args = [args] # get log file name, make directory if necessary log = os.path.abspath(log) if os.path.isdir(log): raise ValueError('log file %s is a directory.' % log) os.makedirs(os.path.dirname(log), exist_ok=True) async with self._semaphore: proc = None with open(log, 'w') as logf: logf.write('command: %s\n' % (' '.join(args))) logf.flush() try: proc = await asyncio.create_subprocess_exec(*args, stdout=logf, stderr=subprocess.STDOUT, env=env, cwd=cwd) retcode = await proc.wait() return retcode except CancelledError as err: await self._kill_subprocess(proc) raise err
def start(self) -> None: if self.proc is not None: raise RuntimeError("process already running") kwargs = {} # type: Dict[str, Any] if isinstance(self.config.command, list): create = asyncio.create_subprocess_exec cmd = self.config.command else: if self.config.shell: create = asyncio.create_subprocess_exec cmd = [self.config.shell, '-c', self.config.command] else: create = asyncio.create_subprocess_shell cmd = [self.config.command] if self.config.environment: env = dict(os.environ) for envvar in self.config.environment: env[envvar['key']] = envvar['value'] self.env = env kwargs['env'] = env logger.debug("%s: will execute argv %r", self.config.name, cmd) if self.config.captureStderr: kwargs['stderr'] = asyncio.subprocess.PIPE if self.config.captureStdout: kwargs['stdout'] = asyncio.subprocess.PIPE if self.config.executionTimeout: self.execution_deadline = (time.perf_counter() + self.config.executionTimeout) self.proc = await create(*cmd, **kwargs) await self._on_start() if self.config.captureStderr: assert self.proc.stderr is not None self._stderr_reader = \ StreamReader(self.config.name, 'stderr', self.proc.stderr, self.config.saveLimit) if self.config.captureStdout: assert self.proc.stdout is not None self._stdout_reader = \ StreamReader(self.config.name, 'stdout', self.proc.stdout, self.config.saveLimit)