我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用sh.ErrorReturnCode_1()。
def test_execute(self, training_command): finished = [] def done_callback(cmd, success, exit_code): finished.append(True) training_command._done_callback = done_callback if DRY_RUN: training_command._config_filepath = NOT_EXISTING_PATH running_command = training_command.execute() with pytest.raises(sh.ErrorReturnCode_1): running_command.wait() else: running_command = training_command.execute() running_command.wait() assert len(finished) == 1 and finished[0]
def hdd_info(uuid): try: info = {} for line in VBoxManage('showhdinfo', uuid, _iter=True): if not line.strip(): continue key, val = line.strip().split(':') val = val.strip() info[key] = val return info except ErrorReturnCode_1 as e: # if the VM was not found if 'VBOX_E_OBJECT_NOT_FOUND' in e.stderr: raise HDDNotFound(uuid) # something else happened, just let it go raise
def apply(self, desc, filename, force=False): """Simple apply with secrets support.""" changes = self.apply_secrets(desc, filename) action = "unknown" try: if force or len(changes): LOG.info( 'Secret changes detected: %s -- Replacing pod', changes ) action = "replace_path" self.replace_path(filename, force=force) else: action = "apply_file" self.apply_file(filename) except sh.ErrorReturnCode_1: LOG.error('%s failed (forcing)', action) if self.exists(desc.metadata.name): self.delete_path(filename) self.create_path(filename) return changes
def vm_ip(name, id): """ Return a running VMs IP for the given VM name and interface id, returns None if not running or the id does not exists :param name: str :param id: int :return: None|str """ try: prop = '/VirtualBox/GuestInfo/Net/%d/V4/IP' % (id) value = str(VBoxManage('guestproperty', 'get', name, prop)) if value == 'No value set!': return None return value[7:].strip() except ErrorReturnCode_1 as e: # if the VM was not found if 'VBOX_E_OBJECT_NOT_FOUND' in e.stderr: raise VMNotFound(name) # something else happened, just let it go raise
def vm_start(name, headless=True): """ Start or resume a VM in headmode by default :param name: str :param headless: bool :return: None """ try: VBoxManage('startvm', name, '--type', headless and 'headless' or 'gui') except ErrorReturnCode_1 as e: # if the VM was not found if 'VBOX_E_OBJECT_NOT_FOUND' in e.stderr: raise VMNotFound(name) if 'VBOX_E_INVALID_OBJECT_STATE' in e.stderr: raise InvalidState(e.stderr.split('\n')[0][17:]) # something else happened, just let it go raise
def vm_suspend(name): """ Save the state of a running VM, raises an InvalidState exception if the VM is not in a state where it can be saved :param name: str :return: None """ try: VBoxManage('controlvm', name, 'savestate') except ErrorReturnCode_1 as e: # if the VM was not found if 'VBOX_E_OBJECT_NOT_FOUND' in e.stderr: raise VMNotFound(name) if 'Machine in invalid state' in e.stderr: raise InvalidState(e.stderr[17:]) # something else happened, just let it go raise
def move_files(src_path, dst_path, *files): """ This helper function is aimed to move files from a source path to a destination path. :param src_path: absolute or relative source path :param dst_path: absolute or relative destination path :param files: tuples with the following format (source_filename, destination_filename) """ src_path, dst_path = __expand_folders(src_path, dst_path) for f in files: if isinstance(f, tuple): src, dst = f elif isinstance(f, string_types): src, dst = 2 * [f] else: continue src, dst = join(src_path, src), join(dst_path, dst) try: if src != dst: sh.mv(src, dst) except sh.ErrorReturnCode_1: pass
def move_folder(src_path, dst_path, new_folder_name=None): """ This helper function is aimed to copy a folder from a source path to a destination path, eventually renaming the folder to be moved. If it fails, it does it silently. :param src_path: absolute or relative source path :param dst_path: absolute or relative destination root path :param new_folder_name: new name for the source path's folder """ src_path, dst_path = __expand_folders(src_path, dst_path) if new_folder_name is not None: dst_path = join(dst_path, new_folder_name).rstrip("/") try: if src_path != dst_path: sh.mv(src_path, dst_path) except sh.ErrorReturnCode_1: pass
def strip_libraries(self, arch): info('Stripping libraries') if self.ctx.python_recipe.from_crystax: info('Python was loaded from CrystaX, skipping strip') return env = arch.get_env() strip = which('arm-linux-androideabi-strip', env['PATH']) if strip is None: warning('Can\'t find strip in PATH...') return strip = sh.Command(strip) filens = shprint(sh.find, join(self.dist_dir, 'private'), join(self.dist_dir, 'libs'), '-iname', '*.so', _env=env).stdout.decode('utf-8') logger.info('Stripping libraries in private dir') for filen in filens.split('\n'): try: strip(filen, _env=env) except sh.ErrorReturnCode_1: logger.debug('Failed to strip ' + filen)
def create(self, requirements): if not isinstance(requirements['reserved_size'], Number): return False try: self._btrfs('subvolume', 'create', self._get_path(requirements['id'])) self._set_quota(requirements['id'], requirements['reserved_size']) except sh.ErrorReturnCode_1 as e: print(self._err('create', e.stderr, e.full_cmd)) return False return True
def _set_quota(self, id, quota): try: self._btrfs('qgroup', 'limit', '-e', self._get_quota(quota), self._get_path(id)) except sh.ErrorReturnCode_1 as e: print(self._err('resize', e.stderr, e.full_cmd)) return False return True
def clone(self, id, parent_id, quota): try: self._btrfs('subvolume', 'snapshot', self._get_path(parent_id), self._get_path(id)) self._set_quota(id, quota) except sh.ErrorReturnCode_1 as e: print(self._err('clone', e.stderr, e.full_cmd)) return False return True
def remove(self, id): try: self._btrfs('subvolume', 'delete', self._get_path(id)) except sh.ErrorReturnCode_1 as e: print(self._err('remove', e.stderr, e.full_cmd)) return False return True
def get_all(self): ids = [] try: subvolumes = self._btrfs('subvolume', 'list', '-o', self._base_path) subvolumes = subvolumes.strip() for line in subvolumes.splitlines(): path = line.strip().split()[-1] try: id = None # Seems like output may vary, the path can be absolute or relative # so a check is needed if '/' not in path: id = path elif self._base_path in path: id = path[path.index('{}/'.format(self._base_path)):].replace('{}/'.format(self._base_path), '') if id: ids.append(id) except ValueError: pass except sh.ErrorReturnCode_1 as e: print(self._err('get_all', e.stderr, e.full_cmd)) return [] return ids
def from_dict(self, configkey, literal_dict): """Turn a dict into a configmap.""" tdir = tempfile.mkdtemp() for key in literal_dict: with open(os.path.join(tdir, key), 'w') as h: h.write(literal_dict[key]) max_timeout = 120 retry_delay = 1 success = False start = time.time() now = time.time() while success is False and now < start + max_timeout: try: self.kubectl.create.configmap( configkey, "--from-file={}".format(tdir), '--context={}'.format(self.config.context), '--namespace={}'.format(self.config.namespace) ) success = True except sh.ErrorReturnCode_1 as err: LOG.error( "Error creating configmap %s (%s remaining)", err, (start + max_timeout) - now ) time.sleep( min( retry_delay, (max_timeout - (time.time() - start)) ) ) retry_delay = retry_delay * 2 now = time.time() shutil.rmtree(tdir)
def create(self, namespace): """Create the given namespace. :param namespace: name of the namespace we want to create :returns: True if the create succeeded, False otherwise """ response = self._post( "/namespaces", data={ "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } ) if response['status'] == "Failure": # I would rather raise.. but want to stay backward # compatible for a little while. # raise KubeError(response) return False self.config.set_namespace(namespace) sa = ServiceAccount(self.config) if not sa.exists("default"): # this will (but not always) fail try: sa.create("default") except sh.ErrorReturnCode_1 as err: LOG.error(err) LOG.error('(ignoring)') return True
def run(self, resume=1): """Execute ansible-playbook using information gathered from config. Args: resume (int): Used as list index - 1 from which to resume workflow. """ # list index to start working on (for support of --resume) try: i = int(resume) - 1 except ValueError: # generally if passed a non-int i = 0 cmds = self._config.playbook_cmds kwargs = { '_out': self._print_stdout, '_err': self._print_stderr, '_env': self._config.env } for counter, cmd in enumerate(cmds): # skip execution until we reach our --resume index # using a list slice doesn't work here since we need to be aware of # the full list to produce a resume index on failure if counter < i: continue try: sh.ansible_playbook(*cmd, **kwargs) except (sh.ErrorReturnCode, sh.ErrorReturnCode_1): msg = ('An error was encountered during playbook execution. ' 'Please resolve manually and then use the following ' 'command to resume execution of this script:\n\n') cmd = self._construct_resume_cli(counter + 1) print(colorama.Fore.RED + msg + cmd) sys.exit(1)
def inrepo(self): try: results = pacman("-Ssq", self.name) except sh.ErrorReturnCode_1: return for result in results.split("\n"): if self.name == result: self.repo = True
def build(self): for dep in self.aurdeps: try: dep.build() except BuildError: print("could not build dependency %s" % (dep.name)) return print("Building", self.name) os.chdir(self.path) try: results = sh.makepkg("-d", "--noconfirm", _err="/var/log/aur_repo/%s.log" % self.name) except sh.ErrorReturnCode_1: with open("/var/log/aur_repo/%s.log" % self.name) as log: if "A package has already been built" in log.read(): print("%s is up to date" % (self.name)) return raise BuildError except sh.ErrorReturnCode_2: print("Error building %s, see /var/log/aur_repo/%s.log" % (self.name, self.name)) raise BuildError for line in open("/var/log/aur_repo/%s.log" % self.name).read().split("\n"): if "Finished making" in line: tmp = line[line.find(":") + 1:].split()[1] self.pkg = sh.glob("%s/*%s*" % (self.path,tmp))[0] self.add()
def _update_project(project): with cd(project.folder()): info('We will update the project %s' % click.style(project.name(), bold=True)) repo = Repo(project.folder()) remotes = [remote.name for remote in repo.remotes] updated_from_upstream = False if 'upstream' not in remotes: click.secho('warning: your repository has no configured upstream, ' 'skipping update', fg='yellow') else: out = git('branch', '-a') remote_branch_name = 'remotes/upstream/' + repo.active_branch.name rebase = False for line in out.split('\n'): if remote_branch_name in line: rebase = True break if rebase: try: vgit('pull', '--rebase', 'upstream', repo.active_branch) except ErrorReturnCode_1: fatal('error: unable to update the project') updated_from_upstream = True if 'origin' in remotes: vgit('push', 'origin', repo.active_branch) if 'origin' in remotes and not updated_from_upstream: vgit('pull', 'origin', repo.active_branch) success('The project %s has been updated' % click.style(project.name(), bold=True))
def hdd_detach(uuid, controller_name, port, device): try: VBoxManage('storageattach', uuid, '--storagectl', controller_name, '--port', port, '--device', device, '--type', 'hdd', '--medium', 'none') except ErrorReturnCode_1 as e: # if the VM was not found if 'VBOX_E_OBJECT_NOT_FOUND' in e.stderr: raise HDDNotFound(uuid) # something else happened, just let it go raise
def hdd_clone(uuid, new_location, existing=False): try: if existing: VBoxManage('clonehd', uuid, new_location, '--existing') else: VBoxManage('clonehd', uuid, new_location) except ErrorReturnCode_1 as e: # if the VM was not found if 'VBOX_E_OBJECT_NOT_FOUND' in e.stderr: raise HDDNotFound(uuid) # something else happened, just let it go raise
def hdd_close(uuid, delete=False): try: if delete: VBoxManage('closemedium', 'disk', uuid, '--delete') else: VBoxManage('closemedium', 'disk', uuid) except ErrorReturnCode_1 as e: # if the VM was not found if 'VBOX_E_OBJECT_NOT_FOUND' in e.stderr: raise HDDNotFound(uuid) # something else happened, just let it go raise
def vm_network(name): """ Return IP, Mac, Netmask, Broadcast and Status about every interfaces of a running VM :param name: str :return: list[dict[str,str]] """ try: networks = [] count = int(str(VBoxManage('guestproperty', 'get', name, '/VirtualBox/GuestInfo/Net/Count'))[7:]) mappings = { 'ip': '/VirtualBox/GuestInfo/Net/%d/V4/IP', 'mac': '/VirtualBox/GuestInfo/Net/%d/MAC', 'netmask': '/VirtualBox/GuestInfo/Net/%d/V4/Netmask', 'status': '/VirtualBox/GuestInfo/Net/%d/Status', 'broadcast': '/VirtualBox/GuestInfo/Net/%d/V4/Broadcast' } for i in range(count): network = {} for map, property in mappings.iteritems(): prop = VBoxManage('guestproperty', 'get', name, property % i) network[map] = str(prop)[7:].strip() networks.append(network) return networks except ErrorReturnCode_1 as e: # if the VM was not found if 'VBOX_E_OBJECT_NOT_FOUND' in e.stderr: raise VMNotFound(name) # something else happened, just let it go raise
def remove_files(path, *files): """ This helper function is aimed to remove specified files. If a file does not exist, it fails silently. :param path: absolute or relative source path :param files: filenames of files to be removed """ path = __expand_folders(path) for file in files: try: sh.rm(join(path, file)) except sh.ErrorReturnCode_1: pass
def remove_folder(path): """ This helper function is aimed to remove an entire folder. If the folder does not exist, it fails silently. :param path: absolute or relative source path """ path = __expand_folders(path) try: sh.rm('-r', path) except sh.ErrorReturnCode_1: pass
def start_test(self, repo, ref): base_repo = redis.get("source:" + repo).decode("utf-8") l = redis.lock(base_repo, timeout=60 * 60) log_key = "log:" + repo + "/" + ref log_url = "https://rosie-ci.ngrok.io/log/" + repo + "/" + ref print("grabbing lock " + base_repo) # Retry the task in 10 seconds if the lock can't be grabbed. if not l.acquire(blocking=False): if self.request.retries == 24: set_status(repo, ref, "error", log_url, "Hit max retries. Please ping the owner.") raise self.retry(countdown=30, max_retries=25) print("Lock grabbed " + base_repo) redis.set("owner-" + base_repo, log_url) set_status(repo, ref, "pending", log_url, "Commencing Rosie test.") repo_path = cwd + "/repos/" + base_repo os.chdir(repo_path) try: redis.append(log_key, git.checkout(ref)) except sh.ErrorReturnCode_128 as e: print("error 128") redis.append(log_key, e.full_cmd + "\n" + e.stdout.decode('utf-8') + "\n" + e.stderr.decode('utf-8')) final_status(repo, ref, "error", "Git error in Rosie.") except sh.ErrorReturnCode_1 as e: print("error 1") redis.append(log_key, e.full_cmd + "\n" + e.stdout.decode('utf-8') + "\n" + e.stderr.decode('utf-8')) final_status(repo, ref, "error", "Git checkout error in Rosie.") print("test started " + log_url) return l.local.token.decode("utf-8")
def username_exists(username): try: sh.id(username) except sh.ErrorReturnCode_1: return False else: return True
def apply(self, desc, filename, force=False): """Read, Merge then Apply.""" changes = self.apply_secrets(desc, filename) if self.exists(desc.metadata.name): # pull from the server remote = munch.Munch() try: remote = self.get(desc.metadata.name) except Exception as err: LOG.error( '%s failure to retrieve existing resource %s', err, filename ) # merge our file on top of it remote.update(desc) # write to disk with open(filename, 'w') as h: h.write(remote.toJSON()) try: if force or len(changes): if changes: LOG.info( 'Secret changes detected: %s -- Replacing pod', changes ) if desc.kind in ['Deployment']: # even with force=true replacing # a deployment doesn't cleanup and # redeploy pods. self.delete_path(filename) self.create_path(filename) else: self.replace_path(filename, force=force) else: self.apply_file(filename) except sh.ErrorReturnCode_1: LOG.error('apply_file failed (forcing)') if self.exists(desc.metadata.name): self.delete_path(filename) self.create_path(filename) return changes
def getDeps(self): pkgbuild = open(os.path.join(self.path, "PKGBUILD"), errors="surrogateescape").read() depends = [] m = re.findall("^makedepends.*?=\((.*?)\)\s*$", pkgbuild, re.MULTILINE | re.DOTALL) if m: m = " ".join(m) depends.extend(m.replace("'", "").replace('"', '').split()) for dep in depends: tmp = Package(dep, self.buildPath, self.repoPath, self.repoName) if not tmp.aur and not tmp.repo: print("Could not find make dependency %s" % (dep)) if tmp.aur: self.aurdeps.append(tmp) else: try: pacman("-Qi", dep) except sh.ErrorReturnCode_1: try: print("Installing make dependency %s" % (dep)) results = sudo.pacman("--noconfirm", "-S", dep) except sh.ErrorReturnCode_1: print("Could not install make dependency %s" % (dep)) raise BuildError depends = [] m = re.findall("^depends.*?=\((.*?)\)\s*$", pkgbuild, re.MULTILINE | re.DOTALL) if m: m = " ".join(m) depends.extend(m.replace("'", "").replace('"', '').split()) for dep in depends: tmp = Package(dep, self.buildPath, self.repoPath, self.repoName) if not tmp.aur and not tmp.repo: print("Could not find dependency %s" % (dep)) elif tmp.aur: self.aurdeps.append(tmp) else: try: pacman("-Qi", dep) except sh.ErrorReturnCode_1: try: print("Installing dependency %s" % (dep)) results = sudo.pacman("--noconfirm", "-S", dep) except sh.ErrorReturnCode_1: print("Could not install dependency %s" % (dep)) raise BuildError
def build_cython_components(self, arch): info('Cythonizing anything necessary in {}'.format(self.name)) env = self.get_recipe_env(arch) if self.ctx.python_recipe.from_crystax: command = sh.Command('python{}'.format(self.ctx.python_recipe.version)) site_packages_dirs = command( '-c', 'import site; print("\\n".join(site.getsitepackages()))') site_packages_dirs = site_packages_dirs.stdout.decode('utf-8').split('\n') if 'PYTHONPATH' in env: env['PYTHONPATH'] = env['PYTHONPATH'] + ':{}'.format(':'.join(site_packages_dirs)) else: env['PYTHONPATH'] = ':'.join(site_packages_dirs) with current_directory(self.get_build_dir(arch.arch)): hostpython = sh.Command(self.ctx.hostpython) shprint(hostpython, '-c', 'import sys; print(sys.path)', _env=env) print('cwd is', realpath(curdir)) info('Trying first build of {} to get cython files: this is ' 'expected to fail'.format(self.name)) manually_cythonise = False try: shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env, *self.setup_extra_args) except sh.ErrorReturnCode_1: print() info('{} first build failed (as expected)'.format(self.name)) manually_cythonise = True if manually_cythonise: self.cythonize_build(env=env) shprint(hostpython, 'setup.py', 'build_ext', '-v', _env=env, _tail=20, _critical=True, *self.setup_extra_args) else: info('First build appeared to complete correctly, skipping manual' 'cythonising.') if 'python2' in self.ctx.recipe_build_order: info('Stripping object files') build_lib = glob.glob('./build/lib*') shprint(sh.find, build_lib[0], '-name', '*.o', '-exec', env['STRIP'], '{}', ';', _env=env) if 'python3crystax' in self.ctx.recipe_build_order: info('Stripping object files') shprint(sh.find, '.', '-iname', '*.so', '-exec', '/usr/bin/echo', '{}', ';', _env=env) shprint(sh.find, '.', '-iname', '*.so', '-exec', env['STRIP'].split(' ')[0], '--strip-unneeded', # '/usr/bin/strip', '--strip-unneeded', '{}', ';', _env=env)
def srcstat(env): if not env.workPath: secho ( 'ERROR: No ipbb work area detected', fg='red' ) return secho ( "Packages", fg='blue' ) lSrcs = env.getSources() if not lSrcs: return lSrcTable = Texttable(max_width=0) lSrcTable.set_deco(Texttable.HEADER | Texttable.BORDER) lSrcTable.set_chars(['-', '|', '+', '-']) lSrcTable.header(['name', 'kind', 'version']) for lSrc in lSrcs: lSrcDir = join(env.src, lSrc) lKind, lBranch = "unknown", None # Check if a git repository if exists(join( lSrcDir, '.git')): with DirSentry(lSrcDir) as _: lKind = 'git' try: # lBranch = sh.git('symbolic-ref','--short', 'HEAD').strip() lBranch = sh.git('symbolic-ref', 'HEAD').split('/')[-1].strip() except sh.ErrorReturnCode_128: lBranch = sh.git('rev-parse', '--short', 'HEAD').strip()+'...' try: sh.git('diff', '--no-ext-diff', '--quiet').strip() except sh.ErrorReturnCode_1: lBranch += '*' try: sh.git('diff', '--no-ext-diff', '--cached', '--quiet').strip() except sh.ErrorReturnCode_1: lBranch += '+' elif exists(join( lSrcDir, '.svn')): with DirSentry(lSrcDir) as _: lKind = 'svn' lSVNInfoRaw = sh.svn('info') lSVNInfo = { lEntry[0]:lEntry[1].strip() for lEntry in ( lLine.split(':',1) for lLine in lSVNInfoRaw.split('\n') if lLine )} lBranch = lSVNInfo['URL'].replace( lSVNInfo['Repository Root']+'/', '' ) lSVNStatus = sh.svn('status','-q') if len(lSVNStatus): lBranch += '*' lSrcTable.add_row([lSrc, lKind, lBranch]) echo ( lSrcTable.draw() ) # ------------------------------------------------------------------------------