我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用subprocess.CalledProcessError()。
def service_running(service_name): """Determine whether a system service is running""" if init_is_systemd(): return service('is-active', service_name) else: try: output = subprocess.check_output( ['service', service_name, 'status'], stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: # This works for upstart scripts where the 'service' command # returns a consistent string to represent running 'start/running' if ("start/running" in output or "is running" in output or "up and running" in output): return True # Check System V scripts init script return codes if service_name in systemv_services_running(): return True return False
def run_command(args, wait=False): try: if (wait): p = subprocess.Popen( args, stdout = subprocess.PIPE) p.wait() else: p = subprocess.Popen( args, stdin = None, stdout = None, stderr = None, close_fds = True) (result, error) = p.communicate() except subprocess.CalledProcessError as e: sys.stderr.write( "common::run_command() : [ERROR]: output = %s, error code = %s\n" % (e.output, e.returncode)) return result
def is_crm_leader(resource, retry=False): """ Returns True if the charm calling this is the elected corosync leader, as returned by calling the external "crm" command. We allow this operation to be retried to avoid the possibility of getting a false negative. See LP #1396246 for more info. """ if resource == DC_RESOURCE_NAME: return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) if not isinstance(status, six.text_type): status = six.text_type(status, "utf-8") except subprocess.CalledProcessError: status = None if status and get_unit_hostname() in status: return True if status and "resource %s is NOT running" % (resource) in status: raise CRMResourceNotFound("CRM resource %s not found" % (resource)) return False
def import_key(keyid): key = keyid.strip() if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and key.endswith('-----END PGP PUBLIC KEY BLOCK-----')): juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG) juju_log("Importing ASCII Armor PGP key", level=DEBUG) with tempfile.NamedTemporaryFile() as keyfile: with open(keyfile.name, 'w') as fd: fd.write(key) fd.write("\n") cmd = ['apt-key', 'add', keyfile.name] try: subprocess.check_call(cmd) except subprocess.CalledProcessError: error_out("Error importing PGP key '%s'" % key) else: juju_log("PGP key found (looks like Radix64 format)", level=DEBUG) juju_log("Importing PGP key from keyserver", level=DEBUG) cmd = ['apt-key', 'adv', '--keyserver', 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key] try: subprocess.check_call(cmd) except subprocess.CalledProcessError: error_out("Error importing PGP key '%s'" % key)
def _git_update_requirements(venv, package_dir, reqs_dir): """ Update from global requirements. Update an OpenStack git directory's requirements.txt and test-requirements.txt from global-requirements.txt. """ orig_dir = os.getcwd() os.chdir(reqs_dir) python = os.path.join(venv, 'bin/python') cmd = [python, 'update.py', package_dir] try: subprocess.check_call(cmd) except subprocess.CalledProcessError: package = os.path.basename(package_dir) error_out("Error updating {} from " "global-requirements.txt".format(package)) os.chdir(orig_dir)
def _clean_check(cmd, target): """ Run the command to download target. If the command fails, clean up before re-raising the error. """ try: subprocess.check_call(cmd) except subprocess.CalledProcessError: if os.access(target, os.F_OK): os.unlink(target) raise
def run_command(command, wait=False): try: if (wait): p = subprocess.Popen( [command], stdout = subprocess.PIPE, shell = True) p.wait() else: p = subprocess.Popen( [command], shell = True, stdin = None, stdout = None, stderr = None, close_fds = True) (result, error) = p.communicate() except subprocess.CalledProcessError as e: sys.stderr.write( "common::run_command() : [ERROR]: output = %s, error code = %s\n" % (e.output, e.returncode)) return result
def install_python(version, arch, home): print("Installing Python", version, "for", arch, "bit architecture to", home) if exists(home): return path = download_python(version, arch) print("Installing", path, "to", home) success = False for cmd in INSTALL_CMD[version]: cmd = [part.format(home=home, path=path) for part in cmd] print("Running:", " ".join(cmd)) try: check_call(cmd) except CalledProcessError as exc: print("Failed command", cmd, "with:", exc) if exists("install.log"): with open("install.log") as fh: print(fh.read()) else: success = True if success: print("Installation complete!") else: print("Installation failed")
def stopScheduler(): try: subprocess.check_call(["sudo", "service", "supervisor", "stop"]) except subprocess.CalledProcessError as e: print "ERROR: couldn't stop the scheduler (supervisor): {reason}".format(reason=e) exit(-1) try: subprocess.check_call(["sudo", "service", "rabbitmq-server", "stop"]) except subprocess.CalledProcessError as e: print "ERROR: couldn't stop the scheduler (rabbitmq): {reason}".format(reason=e) exit(-1) print "Scheduler stopped successfully!"
def fstab_mount(mountpoint): """Mount filesystem using fstab""" cmd_args = ['mount', mountpoint] try: subprocess.check_output(cmd_args) except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False return True
def relation_get(attribute=None, unit=None, rid=None): """Get relation information""" _args = ['relation-get', '--format=json'] if rid: _args.append('-r') _args.append(rid) _args.append(attribute or '-') if unit: _args.append(unit) try: return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None except CalledProcessError as e: if e.returncode == 2: return None raise
def is_crm_dc(): """ Determine leadership by querying the pacemaker Designated Controller """ cmd = ['crm', 'status'] try: status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) if not isinstance(status, six.text_type): status = six.text_type(status, "utf-8") except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True elif current_dc == 'NONE': raise CRMDCNotFound('Current DC: NONE') return False
def run(daemon): if daemon: pid_file = './sensor21.pid' if os.path.isfile(pid_file): pid = int(open(pid_file).read()) os.remove(pid_file) try: p = psutil.Process(pid) p.terminate() except: pass try: p = subprocess.Popen(['python3', 'sensor21-server.py']) open(pid_file, 'w').write(str(p.pid)) except subprocess.CalledProcessError: raise ValueError("error starting sensor21-server.py daemon") else: print("Server running...") app.run(host='::', port=5002)
def del_addr(linkname, address): try: subprocess.run(['ip', 'address', 'del', address, 'dev', str(linkname)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, check=True) return [True, str(linkname)] except subprocess.CalledProcessError as suberror: return [False, "delete address failed : %s" % suberror.stdout.decode('utf-8')] # ovs-vsctl list-br # ovs-vsctl br-exists <Bridge> # ovs-vsctl add-br <Bridge> # ovs-vsctl del-br <Bridge> # ovs-vsctl list-ports <Bridge> # ovs-vsctl del-port <Bridge> <Port> # ovs-vsctl add-port <Bridge> <Port> -- set interface <Port> type=gre options:remote_ip=<RemoteIP> # ovs-vsctl add-port <Bridge> <Port> tag=<ID> -- set interface <Port> type=internal # ovs-vsctl port-to-br <Port> # ovs-vsctl set Port <Port> tag=<ID> # ovs-vsctl clear Port <Port> tag
def try_initialize_swauth(): if is_leader() and config('auth-type') == 'swauth': if leader_get('swauth-init') is not True: try: admin_key = config('swauth-admin-key') if admin_key == '' or admin_key is None: admin_key = leader_get('swauth-admin-key') if admin_key is None: admin_key = uuid.uuid4() leader_set({'swauth-admin-key': admin_key}) bind_port = config('bind-port') bind_port = determine_api_port(bind_port, singlenode_mode=True) subprocess.check_call([ 'swauth-prep', '-A', 'http://localhost:{}/auth'.format(bind_port), '-K', admin_key]) leader_set({'swauth-init': True}) except subprocess.CalledProcessError: log("had a problem initializing swauth!")
def test_failure(self): """Ensure that action_fail is called on failure.""" self.config.return_value = "swauth" self.action_get.return_value = "test" self.determine_api_port.return_value = 8070 self.CalledProcessError = ValueError self.check_call.side_effect = subprocess.CalledProcessError( 0, "hi", "no") actions.add_user.add_user() self.leader_get.assert_called_with("swauth-admin-key") calls = [call("account"), call("username"), call("password")] self.action_get.assert_has_calls(calls) self.action_set.assert_not_called() self.action_fail.assert_called_once_with( 'Adding user test failed with: "Command \'hi\' returned non-zero ' 'exit status 0"')
def ensure_compliance(self): """Ensures that the modules are not loaded.""" if not self.modules: return try: loaded_modules = self._get_loaded_modules() non_compliant_modules = [] for module in self.modules: if module in loaded_modules: log("Module '%s' is enabled but should not be." % (module), level=INFO) non_compliant_modules.append(module) if len(non_compliant_modules) == 0: return for module in non_compliant_modules: self._disable_module(module) self._restart_apache() except subprocess.CalledProcessError as e: log('Error occurred auditing apache module compliance. ' 'This may have been already reported. ' 'Output is: %s' % e.output, level=ERROR)
def test_RTagsDaemonStartClean(self): try: os.chdir("clean") except OSError: print("Test Error: Couldn't cd into 'dirty' test directory.") raise self.assertFalse(self.cmake_build_info["build_dir"].is_dir()) self.plugin.setup_rtags_daemon() try: rtags_daemon_status = subprocess.check_output( self.cmake_cmd_info["rtags_status"]) except subprocess.CalledProcessError as e: print(e.output) self.assertTrue( len("*********************************\nfileids\n*********************************\n*********************************\nheadererrors\n*********************************\n*********************************\ninfo\n*********************************\nRunning a release build\nsocketFile: /Users/phillipbonhomme/.rdm\ndataDir: /Users/phillipbonhomme/.cache/rtags/\noptions: 0x14jobCount: 4\nrpVisitFileTimeout: 60000\nrpIndexDataMessageTimeout: 60000\nrpConnectTimeout: 0\nrpConnectTimeout: 0\ndefaultArguments: List<String>(-ferror-limit=50, -Wall, -fspell-checking, -Wno-unknown-warning-option\")\nincludePaths: List<Source::Include>(\")\ndefines: List<Source::Define>(-DRTAGS=\")\nignoredCompilers: Set<Path>(\")\n*********************************\njobs\n*********************************\n" ) <= len(str(rtags_daemon_status)))
def test_RTagsDaemonStartDirty(self): try: os.chdir("dirty") except OSError: print("Test Error: Couldn't cd into 'dirty' test directory.") raise self.assertTrue(self.cmake_build_info["build_dir"].is_dir()) self.plugin.setup_rtags_daemon() try: rtags_daemon_status = subprocess.check_output( self.cmake_cmd_info["rtags_status"]) except subprocess.CalledProcessError as e: print(e.output) self.assertTrue( len("*********************************\nfileids\n*********************************\n*********************************\nheadererrors\n*********************************\n*********************************\ninfo\n*********************************\nRunning a release build\nsocketFile: /Users/phillipbonhomme/.rdm\ndataDir: /Users/phillipbonhomme/.cache/rtags/\noptions: 0x14jobCount: 4\nrpVisitFileTimeout: 60000\nrpIndexDataMessageTimeout: 60000\nrpConnectTimeout: 0\nrpConnectTimeout: 0\ndefaultArguments: List<String>(-ferror-limit=50, -Wall, -fspell-checking, -Wno-unknown-warning-option\")\nincludePaths: List<Source::Include>(\")\ndefines: List<Source::Define>(-DRTAGS=\")\nignoredCompilers: Set<Path>(\")\n*********************************\njobs\n*********************************\n" ) <= len(str(rtags_daemon_status)))
def test_RTagsClientStartDirty(self): try: os.chdir("dirty") except OSError: print("Test Error: Couldn't cd into 'dirty' test directory.") raise self.assertTrue(self.cmake_build_info["build_dir"].is_dir()) self.assertTrue(self.cmake_build_info["comp_data_cmake"].is_file()) self.plugin.setup_rtags_daemon() self.plugin.connect_rtags_client() try: rtags_client_status = subprocess.check_output( self.cmake_cmd_info["rtags_file_status"] + [str(src_info["cpp"])]) except subprocess.CalledProcessError as e: print(e.output) self.assertTrue(str(rtags_client_status).find("managed")) try: rtags_client_status = subprocess.check_output( self.cmake_cmd_info["rtags_file_status"] + [str(src_info["test_cpp"])]) except subprocess.CalledProcessError as e: print(e.output) self.assertTrue(str(rtags_client_status).find("managed"))
def test_RTagsClientSetFile(self): try: os.chdir("dirty") except OSError: print("Test Error: Couldn't cd into 'dirty' test directory.") raise self.assertTrue(self.cmake_build_info["build_dir"].is_dir()) self.assertTrue(self.cmake_build_info["comp_data_cmake"].is_file()) self.plugin.setup_rtags_daemon() self.plugin.connect_rtags_client() self.plugin.rtags_set_file([str(src_info["cpp"])]) try: rtags_client_status = subprocess.check_output( self.cmake_cmd_info["rtags_file_status"] + [str(src_info["cpp"])]) except subprocess.CalledProcessError as e: print(e.output) self.assertTrue(str(rtags_client_status).find("managed"))
def test_RTagsClientUpdateBuffers(self): try: os.chdir("dirty") except OSError: print("Test Error: Couldn't cd into 'dirty' test directory.") raise self.assertTrue(self.cmake_build_info["build_dir"].is_dir()) self.assertTrue(self.cmake_build_info["comp_data_cmake"].is_file()) self.plugin.setup_rtags_daemon() self.plugin.connect_rtags_client() self.plugin.update_rtags_buffers( [str(src_info["test_cpp"]), str(src_info["cpp"])]) try: rtags_client_status = subprocess.check_output( self.cmake_cmd_info["rtags_buffers"]) except subprocess.CalledProcessError as e: print(e.output) filepath = os.getcwd() + str(src_info["test_cpp"]) self.assertTrue(str(rtags_client_status).find(filepath))
def run_cmake(self): print("Running CMake") build_dir_cmd_out = subprocess.call( ["mkdir", "build"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if build_dir_cmd_out != 0: print("Can\'t setup CMake build directory.") return if self.cmake_build_info["build_dir"].is_dir(): try: subprocess.check_output( self.cmake_cmd_info["cmake_cmd"], cwd=str(self.cmake_build_info["build_dir"])) except subprocess.CalledProcessError as e: print(e.output) if not self.cmake_build_info["comp_data_cmake"].is_file(): print("Couldn't setup CMake Project") return else: print("Couldn't setup CMake Project") return
def _PrintLogs(pod_name_prefix, job_name): """Prints pod logs. If a pod has been restarted, prints logs from previous run. Otherwise, prints the logs from current run. We print logs for pods selected based on pod_name_prefix and job_name. Args: pod_name_prefix: value of 'name-prefix' selector. job_name: value of 'job' selector. """ for pod_name in _GetPodNames(pod_name_prefix, job_name): try: # Get previous logs. logs_command = [_KUBECTL, 'logs', '-p', pod_name] logging.info('Command to get logs: %s', ' '.join(logs_command)) output = subprocess.check_output(logs_command, universal_newlines=True) except subprocess.CalledProcessError: # We couldn't get previous logs, so we will try to get current logs. logs_command = [_KUBECTL, 'logs', pod_name] logging.info('Command to get logs: %s', ' '.join(logs_command)) output = subprocess.check_output(logs_command, universal_newlines=True) print('%s logs:' % pod_name) print(output)
def process_extract(extract): extract_file = os.path.join(target_dir, extract.extract + '.mbtiles') print('Create extract {}'.format(extract_file)) # Instead of patching copy over the patch source as target and # write directly to it (since that works concurrently). patch_src = args['--patch-from'] if patch_src: print('Use patch from {} as base'.format(patch_src)) shutil.copyfile(patch_src, extract_file) try: create_extract(extract, source_file, extract_file) except subprocess.CalledProcessError as e: # Failing extracts should not interrupt # the entire process print(e, file=sys.stderr) return print('Update metadata {}'.format(extract_file)) update_metadata(extract_file, extract.metadata(extract_file))
def get_main_git_path(): """Gets the remote URL of the setup repository. Returns: string: remote URL of the setup-repository. """ try: repository_basepath = subprocess.check_output( 'git config --get remote.origin.url'.split(' ')) except subprocess.CalledProcessError as err: setup_dir_path = os.path.dirname(os.path.realpath(__file__)) err_msg = ''' \'{}\' is not a git repository. Did you download a .zip file from GitHub? Use \'git clone https://github.com/foxBMS/foxBMS-setup\' to download the foxBMS-setup repository. '''.format(setup_dir_path) logging.error(err_msg) sys.exit(1) repository_basepath, repository_name = repository_basepath.rsplit('/', 1) return repository_basepath, repository_name
def callGit(self, workspacePath, *args): cmdLine = ['git'] cmdLine.extend(args) try: output = subprocess.check_output(cmdLine, cwd=os.path.join(os.getcwd(), workspacePath, self.__dir), universal_newlines=True, stderr=subprocess.DEVNULL) except subprocess.CalledProcessError as e: raise BuildError("git error:\n Directory: '{}'\n Command: '{}'\n'{}'".format( os.path.join(workspacePath, self.__dir), " ".join(cmdLine), e.output.rstrip())) return output # Get GitSCM status. The purpose of this function is to return the status of the given directory # # return values: # - error: The SCM is in a error state. Use this if git returned a error code. # - dirty: SCM is dirty. Could be: modified files, switched to another branch/tag/commit/repo, unpushed commits. # - clean: Same branch/tag/commit as specified in the recipe and no local changes. # - empty: Directory is not existing. # # This function is called when build with --clean-checkout. 'error' and 'dirty' SCMs are moved to attic, # while empty and clean directories are not.
def _scanDir(self, workspace, dir): self.__dir = dir dir = os.path.join(workspace, dir) try: remotes = subprocess.check_output(["git", "remote", "-v"], cwd=dir, universal_newlines=True).split("\n") remotes = (r[:-8].split("\t") for r in remotes if r.endswith("(fetch)")) self.__remotes = { remote:url for (remote,url) in remotes } self.__commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=dir, universal_newlines=True).strip() self.__description = subprocess.check_output( ["git", "describe", "--always", "--dirty"], cwd=dir, universal_newlines=True).strip() self.__dirty = subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"], cwd=dir) != 0 except subprocess.CalledProcessError as e: raise BuildError("Git audit failed: " + str(e)) except OSError as e: raise BuildError("Error calling git: " + str(e))
def callSubversion(self, workspacePath, *args): cmdLine = ['svn'] cmdLine.extend(args) try: output = subprocess.check_output(cmdLine, cwd=workspacePath, universal_newlines=True, stderr=subprocess.DEVNULL) except subprocess.CalledProcessError as e: raise BuildError("svn error:\n Directory: '{}'\n Command: '{}'\n'{}'".format( os.path.join(workspacePath, self.__dir), " ".join(cmdLine), e.output.rstrip())) return output # Get SvnSCM status. The purpose of this function is to return the status of the given directory # # return values: # - error: the scm is in a error state. Use this if svn call returns a error code. # - dirty: SCM is dirty. Could be: modified files, switched to another URL or revision # - clean: same URL and revision as specified in the recipe and no local changes. # - empty: directory is not existing # # This function is called when build with --clean-checkout. 'error' and 'dirty' scm's are moved to attic, # while empty and clean directories are not.
def cli_call(arg_list, expect_success=True, env=os.environ.copy()): """Executes a CLI command in a subprocess and return the results. Args: arg_list: a list command arguments expect_success: use False to return even if an error occurred when executing the command env: Returns: (string, string, int) output message, error message, return code """ p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) output, error = p.communicate() if p.returncode != 0: if output: print("Output:\n" + str(output)) if error: print("Error Message:\n" + str(error)) if expect_success: raise subprocess.CalledProcessError( p.returncode, arg_list, output) return output, error, p.returncode
def gunzip_sqlitecurve(sqlitecurve): '''This just uncompresses the sqlitecurve in gzip format. FIXME: this doesn't work with gzip < 1.6 or non-GNU gzip (probably). ''' # -k to keep the input .gz just in case something explodes cmd = 'gunzip -k %s' % sqlitecurve try: procout = subprocess.check_output(cmd, shell=True) return sqlitecurve.replace('.gz','') except subprocess.CalledProcessError: LOGERROR('could not uncompress %s' % sqlitecurve) return None ############################################### ## DECIDE WHICH COMPRESSION FUNCTIONS TO USE ## ###############################################
def available_vms(self, vms=None): """ List all VMs regardless of state, filtering if requested via the <vms> parameter provider by the CLI. """ try: possible_vms = [vm for vm in self.v.status()] except CalledProcessError, e: # TODO: Exception handling here assumes Vagrantfile is missing. # Vagrant seems to return 1 for many different errors, and finding # documentation for specific return codes has proven difficult. raise VagrantfileNotFound if vms: wanted_vms = [vm for vm in possible_vms if vm.name in vms] possible_vms = wanted_vms return possible_vms
def destroy_vms(self): """ Destroy target VMs. Operates on all available VMs if none are specified. """ for vm in self.vms: # Vagrant will return 1 if VM to be destroyed does not exist. if vm.state != "not_created": self.v.destroy(vm_name=vm.name) # Destroy a second time because the vagrant-digitalocean plugin # doesn't clean up after itself: # https://github.com/smdahlen/vagrant-digitalocean/issues/194 if vm.provider == "digital_ocean": try: self.v.destroy(vm_name=vm.name) except CalledProcessError: pass
def test_error_bad_path(tmpdir): """Test handling of bad paths. :param tmpdir: pytest fixture. """ with pytest.raises(CalledProcessError) as exc: pytest.run(tmpdir, ['sphinx-versioning', '-N', '-c', 'unknown', 'build', '.', str(tmpdir)]) assert 'Directory "unknown" does not exist.' in exc.value.output tmpdir.ensure('is_file') with pytest.raises(CalledProcessError) as exc: pytest.run(tmpdir, ['sphinx-versioning', '-N', '-c', 'is_file', 'build', '.', str(tmpdir)]) assert 'Directory "is_file" is a file.' in exc.value.output with pytest.raises(CalledProcessError) as exc: pytest.run(tmpdir, ['sphinx-versioning', '-N', 'build', '.', str(tmpdir)]) assert 'Failed to find local git repository root in {}.'.format(repr(str(tmpdir))) in exc.value.output repo = tmpdir.ensure_dir('repo') pytest.run(repo, ['git', 'init']) empty = tmpdir.ensure_dir('empty1857') with pytest.raises(CalledProcessError) as exc: pytest.run(repo, ['sphinx-versioning', '-N', '-g', str(empty), 'build', '.', str(tmpdir)]) assert 'Failed to find local git repository root in' in exc.value.output assert 'empty1857' in exc.value.output
def test_new_branch_tags(tmpdir, local_light, fail): """Test with new branches and tags unknown to local repo. :param tmpdir: pytest fixture. :param local_light: conftest fixture. :param bool fail: Fail by not fetching. """ remotes = [r for r in list_remote(str(local_light)) if r[1] == 'ob_at'] # Fail. sha = remotes[0][0] target = tmpdir.ensure_dir('exported', sha) if fail: with pytest.raises(CalledProcessError): export(str(local_light), sha, str(target)) return # Fetch. fetch_commits(str(local_light), remotes) # Export. export(str(local_light), sha, str(target)) files = [f.relto(target) for f in target.listdir()] assert files == ['README'] assert target.join('README').read() == 'new'
def get_root(directory): """Get root directory of the local git repo from any subdirectory within it. :raise GitError: If git command fails (dir not a git repo?). :param str directory: Subdirectory in the local repo. :return: Root directory of repository. :rtype: str """ command = ['git', 'rev-parse', '--show-toplevel'] try: output = run_command(directory, command, env_var=False) except CalledProcessError as exc: raise GitError('Failed to find local git repository root in {}.'.format(repr(directory)), exc.output) if IS_WINDOWS: output = output.replace('/', '\\') return output.strip()
def fetch_commits(local_root, remotes): """Fetch from origin. :raise CalledProcessError: Unhandled git command failure. :param str local_root: Local path to git root directory. :param iter remotes: Output of list_remote(). """ # Fetch all known branches. command = ['git', 'fetch', 'origin'] run_command(local_root, command) # Fetch new branches/tags. for sha, name, kind in remotes: try: run_command(local_root, ['git', 'reflog', sha]) except CalledProcessError: run_command(local_root, command + ['refs/{0}/{1}'.format(kind, name)]) run_command(local_root, ['git', 'reflog', sha])
def version_getter(config): """Get tag associated with HEAD; fall back to SHA1. If HEAD is tagged, return the tag name; otherwise fall back to HEAD's short SHA1 hash. .. note:: Only annotated tags are considered. TODO: Support non-annotated tags? """ try: check_output(['git', 'rev-parse', '--is-inside-work-tree'], stderr=DEVNULL) except CalledProcessError: return None encoding = getpreferredencoding(do_setlocale=False) try: version = check_output(['git', 'describe', '--exact-match'], stderr=DEVNULL) except CalledProcessError: version = check_output(['git', 'rev-parse', '--short', 'HEAD']) version = version.decode(encoding).strip() return version
def my_thread(): global files,path,timeout,options myname= threading.currentThread().getName() while files: #create command to run nextfile=files.pop() #print name of thread and command being run print('Thread {0} starts processing {1}'.format(myname,nextfile)) f=path + nextfile + options try: #timeout interrupts frozen command, shell=True does'nt open a console subprocess.check_call(args= f , shell=True, timeout=timeout) except subprocess.TimeoutExpired: print('Thread {0} Processing {0} took too long' .format(myname,nextfile)) except subprocess.CalledProcessError as e: print ('Thread {0} Processing {1} returned error {2}:{3}'.format(myname,nextfile,e.returncode,e.output)) except Exception as e: print ('Thread {0} Processing {1} returned error {2}'.format(myname,nextfile,type(e).__name__)) print ('thread {0} stopped'.format(myname))
def invoke(command, success_codes=(0,)): try: output = subprocess.check_output(command, stderr=subprocess.STDOUT) status = 0 except subprocess.CalledProcessError as error: output = error.output status = error.returncode output = output.decode('utf-8') if status not in success_codes: raise Exception( 'Command %r return exit code %d and output: """%s""".' % ( command, status, output, ) ) return status, output
def upgrade_charm(): if is_leader(): # if we are upgrading, then the old version might have used the # HEAT_PATH/encryption-key. So we grab the key from that, and put it in # leader settings to ensure that the key remains the same during an # upgrade. encryption_path = os.path.join(HEAT_PATH, 'encryption-key') if os.path.isfile(encryption_path): with open(encryption_path, 'r') as f: encryption_key = f.read() try: leader_set({'heat-auth-encryption-key': encryption_key}) except subprocess.CalledProcessError as e: log("upgrade: leader_set: heat-auth-encryption-key failed," " didn't delete the existing file: {}.\n" "Error was: ".format(encryption_path, str(e)), level=WARNING) else: # now we just delete the file os.remove(encryption_path) leader_elected()
def __init__(self, fqArchiveUrl, filtersDir, outputPrefix, outputUrl, diskSize, diskType, logsPath, container, scriptUrl, tag, cores, mem, preemptible): super(PipelineStep, self).__init__() fqFileName = os.path.basename(fqArchiveUrl) fqInputs = "{fqArchive}:{fqFileName}".format(fqArchive=fqArchiveUrl, fqFileName=fqFileName) try: filtersDirContents = subprocess.check_output(["gsutil", "ls", filtersDir]) except subprocess.CalledProcessError as e: print "ERROR: couldn't get a listing of filter files! -- {reason}".format(reason=e) exit(-1) bfInputs = [x for x in filtersDirContents.split('\n') if re.match('^.*\.bf$', x) or re.match('^.*\.txt', x)] bfInputs.append(fqInputs) inputs = ",".join(["{url}:{filename}".format(url=x, filename=os.path.basename(x)) for x in bfInputs]) outputs = "{outputPrefix}*:{outDir}".format(outputPrefix=outputPrefix, outDir=outputUrl) env = "INPUT_FILE={fqFileName},OUTPUT_PREFIX={outputPrefix},FILTERS_LIST={filtersList}".format(fqFileName=fqFileName, outputPrefix=outputPrefix, filtersList=','.join([os.path.basename(x) for x in bfInputs if re.match('^.*\.bf$', x)])) self._step = PipelineSchema("biobloomcategorizer", self._pipelinesConfig, logsPath, container, scriptUrl=scriptUrl, cores=cores, mem=mem, diskSize=diskSize, diskType=diskType, inputs=inputs, outputs=outputs, env=env, tag=tag, preemptible=preemptible)
def getJobLogs(args, config): # TODO: reimplement pipelineDbUtils = PipelineDbUtils(config) jobInfo = pipelineDbUtils.getJobInfo(select=["stdout_log", "stderr_log", "gcs_log_path"], where={"job_id": args.jobId}) with open(os.devnull, 'w') as fnull: if args.stdout: try: stdoutLogFile = subprocess.check_output( ["gsutil", "cat", os.path.join(jobInfo[0].gcs_log_path, jobInfo[0].stdout_log)], stderr=fnull) except subprocess.CalledProcessError as e: print "ERROR: couldn't get the stdout log : {reason}".format(reason=e) exit(-1) print "STDOUT:\n" print stdoutLogFile print "---------\n" if args.stderr: try: stderrLogFile = subprocess.check_output( ["gsutil", "-q", "cat", os.path.join(jobInfo[0].gcs_log_path, jobInfo[0].stderr_log)], stderr=fnull) except subprocess.CalledProcessError as e: print "ERROR: couldn't get the stderr log : {reason}".format(reason=e) exit(-1) print "STDERR:\n" print stderrLogFile print "---------\n" pipelineDbUtils.closeConnection()