Python subprocess 模块,check_output() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用subprocess.check_output()

项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def configure_analyst_opsvm():
    '''
    Configures Anaylyst for OPSVM
    '''
    if not service_running('plumgrid'):
        restart_pg()
    opsvm_ip = pg_gw_context._pg_dir_context()['opsvm_ip']
    NS_ENTER = ('/opt/local/bin/nsenter -t $(ps ho pid --ppid $(cat '
                '/var/run/libvirt/lxc/plumgrid.pid)) -m -n -u -i -p ')
    sigmund_stop = NS_ENTER + '/usr/bin/service plumgrid-sigmund stop'
    sigmund_status = NS_ENTER \
        + '/usr/bin/service plumgrid-sigmund status'
    sigmund_autoboot = NS_ENTER \
        + '/usr/bin/sigmund-configure --ip {0} --start --autoboot' \
        .format(opsvm_ip)
    try:
        status = subprocess.check_output(sigmund_status, shell=True)
        if 'start/running' in status:
            if subprocess.call(sigmund_stop, shell=True):
                log('plumgrid-sigmund couldn\'t be stopped!')
                return
        subprocess.check_call(sigmund_autoboot, shell=True)
        status = subprocess.check_output(sigmund_status, shell=True)
    except:
        log('plumgrid-sigmund couldn\'t be started!')
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def service_running(service_name):
    """Determine whether a system service is running"""
    if init_is_systemd():
        return service('is-active', service_name)
    else:
        try:
            output = subprocess.check_output(
                ['service', service_name, 'status'],
                stderr=subprocess.STDOUT).decode('UTF-8')
        except subprocess.CalledProcessError:
            return False
        else:
            # This works for upstart scripts where the 'service' command
            # returns a consistent string to represent running 'start/running'
            if ("start/running" in output or "is running" in output or
                    "up and running" in output):
                return True
            # Check System V scripts init script return codes
            if service_name in systemv_services_running():
                return True
            return False
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
                     max_map_count=65536, mnt_point='/run/hugepages/kvm',
                     pagesize='2MB', mount=True, set_shmmax=False):
    """Enable hugepages on system.

    Args:
    user (str)  -- Username to allow access to hugepages to
    group (str) -- Group name to own hugepages
    nr_hugepages (int) -- Number of pages to reserve
    max_map_count (int) -- Number of Virtual Memory Areas a process can own
    mnt_point (str) -- Directory to mount hugepages on
    pagesize (str) -- Size of hugepages
    mount (bool) -- Whether to Mount hugepages
    """
    group_info = add_group(group)
    gid = group_info.gr_gid
    add_user_to_group(user, group)
    if max_map_count < 2 * nr_hugepages:
        max_map_count = 2 * nr_hugepages
    sysctl_settings = {
        'vm.nr_hugepages': nr_hugepages,
        'vm.max_map_count': max_map_count,
        'vm.hugetlb_shm_group': gid,
    }
    if set_shmmax:
        shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
        shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
        if shmmax_minsize > shmmax_current:
            sysctl_settings['kernel.shmmax'] = shmmax_minsize
    sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
    mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
    lfstab = fstab.Fstab()
    fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
    if fstab_entry:
        lfstab.remove_entry(fstab_entry)
    entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
                         'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
    lfstab.add_entry(entry)
    if mount:
        fstab_mount(mnt_point)
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def is_crm_leader(resource, retry=False):
    """
    Returns True if the charm calling this is the elected corosync leader,
    as returned by calling the external "crm" command.

    We allow this operation to be retried to avoid the possibility of getting a
    false negative. See LP #1396246 for more info.
    """
    if resource == DC_RESOURCE_NAME:
        return is_crm_dc()
    cmd = ['crm', 'resource', 'show', resource]
    try:
        status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
        if not isinstance(status, six.text_type):
            status = six.text_type(status, "utf-8")
    except subprocess.CalledProcessError:
        status = None

    if status and get_unit_hostname() in status:
        return True

    if status and "resource %s is NOT running" % (resource) in status:
        raise CRMResourceNotFound("CRM resource %s not found" % (resource))

    return False
项目:charm-swift-proxy    作者:openstack    | 项目源码 | 文件源码
def config(scope=None):
    """Juju charm configuration"""
    config_cmd_line = ['config-get']
    if scope is not None:
        config_cmd_line.append(scope)
    else:
        config_cmd_line.append('--all')
    config_cmd_line.append('--format=json')
    try:
        config_data = json.loads(
            subprocess.check_output(config_cmd_line).decode('UTF-8'))
        if scope is not None:
            return config_data
        return Config(config_data)
    except ValueError:
        return None
项目:data_pipeline    作者:Yelp    | 项目源码 | 文件源码
def test_get_container_ip(containers):
    """
    Asserts that when an existing container is queried for its IP it returns
    the specified projects and services IP address.
    """
    actual_ip = Containers.get_container_ip_address(
        project=containers.project,
        service=ZOOKEEPER,
        timeout_seconds=5
    )
    container_id = Containers.get_container_info(
        project=containers.project,
        service=ZOOKEEPER
    )['Id']
    command = "docker inspect --format '{{{{ .NetworkSettings.IPAddress }}}}' {container_id}" \
        .format(container_id=container_id)
    expected_ip = subprocess.check_output([command], shell=True)

    assert expected_ip.rstrip() == actual_ip
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def fstab_mount(mountpoint):
    """Mount filesystem using fstab"""
    cmd_args = ['mount', mountpoint]
    try:
        subprocess.check_output(cmd_args)
    except subprocess.CalledProcessError as e:
        log('Error unmounting {}\n{}'.format(mountpoint, e.output))
        return False
    return True
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def relation_get(attribute=None, unit=None, rid=None):
    """Get relation information"""
    _args = ['relation-get', '--format=json']
    if rid:
        _args.append('-r')
        _args.append(rid)
    _args.append(attribute or '-')
    if unit:
        _args.append(unit)
    try:
        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
    except ValueError:
        return None
    except CalledProcessError as e:
        if e.returncode == 2:
            return None
        raise
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def service(name, action):
    """
    Open/close access to a service

    :param name: could be a service name defined in `/etc/services` or a port
                 number.
    :param action: `open` or `close`
    """
    if action == 'open':
        subprocess.check_output(['ufw', 'allow', str(name)],
                                universal_newlines=True)
    elif action == 'close':
        subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
                                universal_newlines=True)
    else:
        raise UFWError(("'{}' not supported, use 'allow' "
                        "or 'delete'").format(action))
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def run_action(self, unit_sentry, action,
                   _check_output=subprocess.check_output,
                   params=None):
        """Run the named action on a given unit sentry.

        params a dict of parameters to use
        _check_output parameter is used for dependency injection.

        @return action_id.
        """
        unit_id = unit_sentry.info["unit_name"]
        command = ["juju", "action", "do", "--format=json", unit_id, action]
        if params is not None:
            for key, value in params.iteritems():
                command.append("{}={}".format(key, value))
        self.log.info("Running command: %s\n" % " ".join(command))
        output = _check_output(command, universal_newlines=True)
        data = json.loads(output)
        action_id = data[u'Action queued with id']
        return action_id
项目:python-driver    作者:bblfsh    | 项目源码 | 文件源码
def generate2():
    """
    Call an external Python 2 program to retrieve the AST symbols of that
    language version
    :return:
    """
    import subprocess as sp
    import tempfile, shutil, sys, traceback

    tempdir = tempfile.mkdtemp()
    tempfile = os.path.join(tempdir, "py2_ast_code.py")

    py2_proc_out = ""
    try:
        with open(tempfile, 'w') as py2code:
            py2code.write(generate_str + WRITESYMS_CODE)

        py2_proc_out = sp.check_output(["python2", tempfile]).decode()
    finally:
        try:
            shutil.rmtree(tempdir)
        except:
            print("Warning: error trying to delete the temporal directory:", file=sys.stderr)
            print(traceback.format_exc(), file=sys.stderr)
    return set(py2_proc_out.splitlines())
项目:Pillage    作者:kostrin    | 项目源码 | 文件源码
def bruteforce(self, ip_address, port, userList, passList):
        print "INFO: Performing hydra msSql scan against " + ip_address 
        hydraCmd = "hydra -L %s -P %s -f -e n -o pillageResults/%s_msSqlhydra.txt -u %s -s %s mssql" % (userList, passList, ip_address, ip_address, port)
        creds={}
        try:
            results = subprocess.check_output(hydraCmd, shell=True)
            resultarr = results.split("\n")
            for result in resultarr:
                if "login:" in result:
                    print "[*] Valid msSql credentials found: " + result
                    resultList=result.split()
                    self.username=resultList[4]
                    if resultList[6]:
                        self.password=resultList[6]
                    else:
                        self.password=''

        except:
            print "INFO: No valid msSql credentials found"
项目:Pillage    作者:kostrin    | 项目源码 | 文件源码
def webEnum(self, args):
        print "INFO: Performing nmap http script scan for {}:{}".format(args[0],args[1])
        nmapSCAN = "nmap -sV -Pn -vv -p {} --script='(http* or ssl*) and not (dos or fuzzer or brute)' -oN {}_http.nmap {}".format(args[1],args[0],args[0])
        subprocess.check_output(nmapSCAN, shell=True)

        print "INFO: Performing nikto scan on {}:{}".format(args[0],args[1])
        script="nikto -host {} -port {} -C all >> {}_nikto_{}.txt".format(args[0],args[1],args[0],args[1])
        subprocess.check_output(script, shell=True)

        '''
        print "INFO: Performing dirb scan on {}:{}".format(args[0],args[1])
        dirbList="/usr/share/wordlists/dirbuster/directory-list-2.3-small.txt"
        script="dirb {}://{}:{} {} -S -w >> {}_dirb_{}.txt".format(args[2],args[0],args[1],dirbList,args[0],args[1])
        subprocess.call(script, shell=True)
        '''
        print "INFO: Finished http module for {}:{}".format(args[0],args[1])
项目:Pillage    作者:kostrin    | 项目源码 | 文件源码
def smbEnum(self, args):
        print "INFO: Performing nmap smb script scan for {}:{}".format(args[0],args[1])
        nmapSCAN = "nmap -sV -Pn -vv -p {} --script='(smb*) and not (brute or broadcast or dos or external or fuzzer)' --script-args=unsafe=1 -oN {}_smb.nmap {}".format(args[1],args[0],args[0])
        subprocess.check_output(nmapSCAN, shell=True)

        print "INFO: Performing ntbscan for {}:{}".format(args[0],args[1])
        nbtSCAN = "nbtscan -r -v -h {} >> {}_smbNbt.txt".format(args[0],args[0])
        subprocess.check_output(nbtSCAN, shell=True)

        print "INFO: Performing enum4Linux scan for {}:{}".format(args[0],args[1])
        try:
            enumSCAN = "enum4linux -a -M -v {} >> {}_smbEnum.txt".format(args[0],args[0])
            subprocess.check_output(enumSCAN, shell=True)
        except:
            print "ERROR: enum4Linux scan FAILED for {}:{}".format(args[0],args[1])

        print "INFO: Finished smb module for {}:{}".format(args[0],args[1])
项目:py_find_1st    作者:roebel    | 项目源码 | 文件源码
def compiler_is_clang(comp) :
    print("check for clang compiler ...", end=' ')
    try:
        cc_output = subprocess.check_output(comp+['--version'],
                                            stderr = subprocess.STDOUT, shell=False)
    except OSError as ex:
        print("compiler test call failed with error {0:d} msg: {1}".format(ex.errno, ex.strerror))
        print("no")
        return False

    ret = re.search(b'clang', cc_output) is not None
    if ret :
        print("yes")
    else:
        print("no")
    return ret
项目:chitin    作者:SamStudio8    | 项目源码 | 文件源码
def check_integrity(self):
        from subprocess import check_output
        reads = 0
        try:
            p = check_output("samtools view -c %s" % self.path, shell=True)
            reads = int(p.split("\n")[0].strip())
        except Exception as e:
            print e
            pass

        has_index = False
        has_indate_index = None
        if os.path.exists(self.path + ".bai"):
            has_index = True
            if os.path.getmtime(self.path) <= os.path.getmtime(self.path + ".bai"):
                has_indate_index = True
            else:
                has_indate_index = False

        return {
            ("has_reads", "has 0 reads"): reads > 0,
            ("has_index", "has no BAI"): has_index,
            ("has_indate_index", "has a BAI older than itself"): has_indate_index,
        }
项目:charm-swift-proxy    作者:openstack    | 项目源码 | 文件源码
def status_get():
    """Retrieve the previously set juju workload state and message

    If the status-get command is not found then assume this is juju < 1.23 and
    return 'unknown', ""

    """
    cmd = ['status-get', "--format=json", "--include-data"]
    try:
        raw_status = subprocess.check_output(cmd)
    except OSError as e:
        if e.errno == errno.ENOENT:
            return ('unknown', "")
        else:
            raise
    else:
        status = json.loads(raw_status.decode("UTF-8"))
        return (status["status"], status["message"])
项目:charm-swift-proxy    作者:openstack    | 项目源码 | 文件源码
def config(scope=None):
    """Juju charm configuration"""
    config_cmd_line = ['config-get']
    if scope is not None:
        config_cmd_line.append(scope)
    else:
        config_cmd_line.append('--all')
    config_cmd_line.append('--format=json')
    try:
        config_data = json.loads(
            subprocess.check_output(config_cmd_line).decode('UTF-8'))
        if scope is not None:
            return config_data
        return Config(config_data)
    except ValueError:
        return None
项目:charm-swift-proxy    作者:openstack    | 项目源码 | 文件源码
def relation_get(attribute=None, unit=None, rid=None):
    """Get relation information"""
    _args = ['relation-get', '--format=json']
    if rid:
        _args.append('-r')
        _args.append(rid)
    _args.append(attribute or '-')
    if unit:
        _args.append(unit)
    try:
        return json.loads(subprocess.check_output(_args).decode('UTF-8'))
    except ValueError:
        return None
    except CalledProcessError as e:
        if e.returncode == 2:
            return None
        raise
项目:charm-swift-proxy    作者:openstack    | 项目源码 | 文件源码
def is_crm_dc():
    """
    Determine leadership by querying the pacemaker Designated Controller
    """
    cmd = ['crm', 'status']
    try:
        status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
        if not isinstance(status, six.text_type):
            status = six.text_type(status, "utf-8")
    except subprocess.CalledProcessError as ex:
        raise CRMDCNotFound(str(ex))

    current_dc = ''
    for line in status.split('\n'):
        if line.startswith('Current DC'):
            # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
            current_dc = line.split(':')[1].split()[0]
    if current_dc == get_unit_hostname():
        return True
    elif current_dc == 'NONE':
        raise CRMDCNotFound('Current DC: NONE')

    return False
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def get_unmapped_read_count_from_indexed_bam(bam_file_name):
    """
    Get number of unmapped reads from an indexed BAM file.

    Args:
        bam_file_name (str): Name of indexed BAM file.

    Returns:
        int: number of unmapped reads in the BAM

    Note:
        BAM must be indexed for lookup using samtools.

    """

    index_output = subprocess.check_output('samtools idxstats %s' % bam_file_name, shell=True)
    return int(index_output.strip().split('\n')[-1].split()[-1])
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def get_bcl2fastq_v2(hostname):
    try:
        subprocess.check_call(["which", "bcl2fastq"])
        # Restore the LD_LIBRARY_PATH set aside by sourceme.bash/shell10x.
        # Required for some installations of bcl2fastq.
        new_environ = dict(os.environ)
        new_environ['LD_LIBRARY_PATH'] = os.environ.get('_TENX_LD_LIBRARY_PATH', '')
        output = subprocess.check_output(["bcl2fastq", "--version"], env=new_environ, stderr=subprocess.STDOUT)
        match = None
        for l in output.split("\n"):
            match = re.match("bcl2fastq v([0-9.]+)", l)
            if match is not None:
                return (match.groups()[0], None)

        return (None, "bcl2fastq version not recognized -- please check the output of bcl2fastq --version")
    except subprocess.CalledProcessError:
        msg = "On machine: %s, bcl2fastq not found on PATH." % hostname
        return (None, msg)
项目:acbs    作者:AOSC-Dev    | 项目源码 | 文件源码
def parse_ab3_defines(defines_file):  # , pkg_name):
    try:
        fp = open(defines_file, 'rt')
        abd_cont = fp.read()
        fp.close()
    except:
        print('[E] Failed to load autobuild defines file! Do you have read permission?')
        return False
    script = "ARCH={}\n".format(
        get_arch_name()) + abd_cont + gen_laundry_list(['PKGNAME', 'PKGDEP', 'BUILDDEP'])
    try:
        # Better to be replaced by subprocess.Popen
        abd_out = subprocess.check_output(script, shell=True)
    except:
        print('[E] Malformed Autobuild defines file found! Couldn\'t continue!')
        return False
    abd_fp = io.StringIO('[wrap]\n' + abd_out.decode('utf-8'))
    abd_config = RawConfigParser()
    abd_config.read_file(abd_fp)
    abd_config_dict = {}
    for i in abd_config['wrap']:
        abd_config_dict[i.upper()] = abd_config['wrap'][i]
    return abd_config_dict
项目:cmake.nvim    作者:phillipbonhomme    | 项目源码 | 文件源码
def test_RTagsDaemonStartClean(self):
        try:
            os.chdir("clean")
        except OSError:
            print("Test Error: Couldn't cd into 'dirty' test directory.")
            raise
        self.assertFalse(self.cmake_build_info["build_dir"].is_dir())
        self.plugin.setup_rtags_daemon()
        try:
            rtags_daemon_status = subprocess.check_output(
                self.cmake_cmd_info["rtags_status"])
        except subprocess.CalledProcessError as e:
            print(e.output)
        self.assertTrue(
            len("*********************************\nfileids\n*********************************\n*********************************\nheadererrors\n*********************************\n*********************************\ninfo\n*********************************\nRunning a release build\nsocketFile: /Users/phillipbonhomme/.rdm\ndataDir: /Users/phillipbonhomme/.cache/rtags/\noptions: 0x14jobCount: 4\nrpVisitFileTimeout: 60000\nrpIndexDataMessageTimeout: 60000\nrpConnectTimeout: 0\nrpConnectTimeout: 0\ndefaultArguments: List<String>(-ferror-limit=50, -Wall, -fspell-checking, -Wno-unknown-warning-option\")\nincludePaths: List<Source::Include>(\")\ndefines: List<Source::Define>(-DRTAGS=\")\nignoredCompilers: Set<Path>(\")\n*********************************\njobs\n*********************************\n"
                ) <= len(str(rtags_daemon_status)))
项目:cmake.nvim    作者:phillipbonhomme    | 项目源码 | 文件源码
def test_RTagsDaemonStartDirty(self):
        try:
            os.chdir("dirty")
        except OSError:
            print("Test Error: Couldn't cd into 'dirty' test directory.")
            raise
        self.assertTrue(self.cmake_build_info["build_dir"].is_dir())
        self.plugin.setup_rtags_daemon()
        try:
            rtags_daemon_status = subprocess.check_output(
                self.cmake_cmd_info["rtags_status"])
        except subprocess.CalledProcessError as e:
            print(e.output)
        self.assertTrue(
            len("*********************************\nfileids\n*********************************\n*********************************\nheadererrors\n*********************************\n*********************************\ninfo\n*********************************\nRunning a release build\nsocketFile: /Users/phillipbonhomme/.rdm\ndataDir: /Users/phillipbonhomme/.cache/rtags/\noptions: 0x14jobCount: 4\nrpVisitFileTimeout: 60000\nrpIndexDataMessageTimeout: 60000\nrpConnectTimeout: 0\nrpConnectTimeout: 0\ndefaultArguments: List<String>(-ferror-limit=50, -Wall, -fspell-checking, -Wno-unknown-warning-option\")\nincludePaths: List<Source::Include>(\")\ndefines: List<Source::Define>(-DRTAGS=\")\nignoredCompilers: Set<Path>(\")\n*********************************\njobs\n*********************************\n"
                ) <= len(str(rtags_daemon_status)))
项目:cmake.nvim    作者:phillipbonhomme    | 项目源码 | 文件源码
def test_RTagsClientStartDirty(self):
        try:
            os.chdir("dirty")
        except OSError:
            print("Test Error: Couldn't cd into 'dirty' test directory.")
            raise
        self.assertTrue(self.cmake_build_info["build_dir"].is_dir())
        self.assertTrue(self.cmake_build_info["comp_data_cmake"].is_file())
        self.plugin.setup_rtags_daemon()
        self.plugin.connect_rtags_client()
        try:
            rtags_client_status = subprocess.check_output(
                self.cmake_cmd_info["rtags_file_status"] +
                [str(src_info["cpp"])])
        except subprocess.CalledProcessError as e:
            print(e.output)
        self.assertTrue(str(rtags_client_status).find("managed"))
        try:
            rtags_client_status = subprocess.check_output(
                self.cmake_cmd_info["rtags_file_status"] +
                [str(src_info["test_cpp"])])
        except subprocess.CalledProcessError as e:
            print(e.output)
        self.assertTrue(str(rtags_client_status).find("managed"))
项目:cmake.nvim    作者:phillipbonhomme    | 项目源码 | 文件源码
def test_RTagsClientSetFile(self):
        try:
            os.chdir("dirty")
        except OSError:
            print("Test Error: Couldn't cd into 'dirty' test directory.")
            raise
        self.assertTrue(self.cmake_build_info["build_dir"].is_dir())
        self.assertTrue(self.cmake_build_info["comp_data_cmake"].is_file())
        self.plugin.setup_rtags_daemon()
        self.plugin.connect_rtags_client()
        self.plugin.rtags_set_file([str(src_info["cpp"])])
        try:
            rtags_client_status = subprocess.check_output(
                self.cmake_cmd_info["rtags_file_status"] +
                [str(src_info["cpp"])])
        except subprocess.CalledProcessError as e:
            print(e.output)
        self.assertTrue(str(rtags_client_status).find("managed"))
项目:cmake.nvim    作者:phillipbonhomme    | 项目源码 | 文件源码
def test_RTagsClientUpdateBuffers(self):
        try:
            os.chdir("dirty")
        except OSError:
            print("Test Error: Couldn't cd into 'dirty' test directory.")
            raise
        self.assertTrue(self.cmake_build_info["build_dir"].is_dir())
        self.assertTrue(self.cmake_build_info["comp_data_cmake"].is_file())
        self.plugin.setup_rtags_daemon()
        self.plugin.connect_rtags_client()
        self.plugin.update_rtags_buffers(
            [str(src_info["test_cpp"]),
             str(src_info["cpp"])])
        try:
            rtags_client_status = subprocess.check_output(
                self.cmake_cmd_info["rtags_buffers"])
        except subprocess.CalledProcessError as e:
            print(e.output)
        filepath = os.getcwd() + str(src_info["test_cpp"])
        self.assertTrue(str(rtags_client_status).find(filepath))
项目:cmake.nvim    作者:phillipbonhomme    | 项目源码 | 文件源码
def run_cmake(self):
        print("Running CMake")
        build_dir_cmd_out = subprocess.call(
            ["mkdir", "build"],
            stdout=subprocess.DEVNULL,
            stderr=subprocess.DEVNULL)
        if build_dir_cmd_out != 0:
            print("Can\'t setup CMake build directory.")
            return

        if self.cmake_build_info["build_dir"].is_dir():
            try:
                subprocess.check_output(
                    self.cmake_cmd_info["cmake_cmd"],
                    cwd=str(self.cmake_build_info["build_dir"]))
            except subprocess.CalledProcessError as e:
                print(e.output)
            if not self.cmake_build_info["comp_data_cmake"].is_file():
                print("Couldn't setup CMake Project")
                return
        else:
            print("Couldn't setup CMake Project")
            return
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def run_script(script_name,running_mode):
    subp.check_output(['bash','-c','chmod 755 '+script_name])
    if running_mode=='NA':
        output=subp.check_output(['bash','-c',script_name])
        if output!='':
            print output
    if running_mode=='write_script':
        pass
    if running_mode=='sge':
        memo='3G'
        output=subp.check_output(['bash','-c','qsub -l h_vmem='+memo+' -o '+script_name+'.o -e '+script_name+'.e '+script_name])
    #TODO: if you choose slurm, then you need to change the settings and provide a file with settings
    if running_mode=='slurm':
        memo='50G'
        partition='akundaje'
        output=subp.check_output(['bash','-c','sbatch --mem '+memo+' -o '+script_name+'.o -e '+script_name+'.e'+' -p '+partition+' '+script_name])
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def QuASAR_rep_wrapper(outdir,parameters,samplename1,samplename2,running_mode):
    script_comparison_file=outdir+'/scripts/QuASAR-Rep/'+samplename1+'.vs.'+samplename2+'/'+samplename1+'.vs.'+samplename2+'.QuASAR-Rep.sh'
    subp.check_output(['bash','-c','mkdir -p '+os.path.dirname(script_comparison_file)])
    script_comparison=open(script_comparison_file,'w')
    script_comparison.write("#!/bin/sh"+'\n')
    script_comparison.write('. '+bashrc_file+'\n')
    outpath=outdir+'/results/reproducibility/'+samplename1+'.vs.'+samplename2+'/QuASAR-Rep/'+samplename1+'.vs.'+samplename2+'.QuASAR-Rep.scores.txt'
    subp.check_output(['bash','-c','mkdir -p '+os.path.dirname(outpath)])
    quasar_data=outdir+'/data/forQuASAR'
    quasar_transform1=quasar_data+'/'+samplename1+'.quasar_transform'
    quasar_transform2=quasar_data+'/'+samplename2+'.quasar_transform'
    script_comparison.write('${mypython} '+os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(os.path.realpath(__file__)))))+"/hifive/bin/find_quasar_replicate_score"+' '+quasar_transform1+' '+quasar_transform2+' '+outpath+'\n') 
    script_comparison.write('${mypython} '+os.path.abspath(os.path.dirname(os.path.realpath(__file__)))+"/plot_quasar_scatter.py"+' '+quasar_transform1+' '+quasar_transform2+' '+outpath+'\n')
    #split the scores by chromosomes
    script_comparison.write('${mypython} '+os.path.abspath(os.path.dirname(os.path.realpath(__file__)))+"/quasar_split_by_chromosomes.py"+' '+outpath+'\n')
    script_comparison.close()
    run_script(script_comparison_file,running_mode)
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def HiCSpector_wrapper(outdir,parameters,concise_analysis,samplename1,samplename2,chromo,running_mode,f1,f2,nodefile):
    script_comparison_file=outdir+'/scripts/HiC-spector/'+samplename1+'.'+samplename2+'/'+chromo+'.'+samplename1+'.'+samplename2+'.sh'

    subp.check_output(['bash','-c','mkdir -p '+os.path.dirname(script_comparison_file)])
    script_comparison=open(script_comparison_file,'w')
    script_comparison.write("#!/bin/sh"+'\n')
    script_comparison.write('. '+bashrc_file+'\n')
    if os.path.isfile(f1) and os.path.getsize(f1)>20:
        if os.path.isfile(f2) and os.path.getsize(f2)>20:
            outpath=outdir+'/results/reproducibility/'+samplename1+'.vs.'+samplename2+'/HiC-Spector/'+chromo+'.'+samplename1+'.vs.'+samplename2+'.scores.txt'
            subp.check_output(['bash','-c','mkdir -p '+os.path.dirname(outpath)])
            script_comparison.write("$mypython -W ignore "+os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))+"/reproducibility_analysis/hic-spector_wrapper.py --m1 "+f1+" --m2 "+f2+" --out "+outpath+".printout --node_file "+nodefile+" --num_evec "+parameters['HiC-Spector']['n']+"\n")
            script_comparison.write("cat "+outpath+".printout | tail -n1 | cut -f2 | awk '{print \""+samplename1+"\\t"+samplename2+"\\t\"$3}' > "+outpath+'\n')
            script_comparison.write("rm "+outpath+".printout"+'\n')
            script_comparison.close()
            run_script(script_comparison_file,running_mode)
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def GenomeDISCO_wrapper(outdir,parameters,concise_analysis,samplename1,samplename2,chromo,running_mode,f1,f2,nodefile):
    script_comparison_file=outdir+'/scripts/GenomeDISCO/'+samplename1+'.'+samplename2+'/'+chromo+'.'+samplename1+'.'+samplename2+'.sh'                     

    subp.check_output(['bash','-c','mkdir -p '+os.path.dirname(script_comparison_file)])              
    script_comparison=open(script_comparison_file,'w')                                                
    script_comparison.write("#!/bin/sh"+'\n')                                                         
    script_comparison.write('. '+bashrc_file+'\n')                                               
    if os.path.isfile(f1) and os.path.getsize(f1)>20:                                                 
        if os.path.isfile(f2) and os.path.getsize(f2)>20:                                             
            concise_analysis_text=''                                                                  
            if concise_analysis:                                                                      
                concise_analysis_text=' --concise_analysis'                                           
            #get the sample that goes for subsampling
            subsampling=parameters['GenomeDISCO']['subsampling']
            if parameters['GenomeDISCO']['subsampling']!='NA' and parameters['GenomeDISCO']['subsampling']!='lowest':
                subsampling_sample=parameters['GenomeDISCO']['subsampling']
                subsampling=outdir+'/data/edges/'+subsampling_sample+'/'+subsampling_sample+'.'+chromo+'.gz'

            outpath=outdir+'/results/reproducibility/'+samplename1+'.vs.'+samplename2+'/GenomeDISCO/'
            subp.check_output(['bash','-c','mkdir -p '+outpath])                                      
            script_comparison.write("$mypython -W ignore "+os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))+"/genomedisco/compute_reproducibility.py")+" --m1 "+f1+" --m2 "+f2+" --m1name "+samplename1+" --m2name "+samplename2+" --node_file "+nodefile+" --outdir "+outpath+" --outpref "+chromo+" --m_subsample "+subsampling+" --approximation 10000000 --norm "+parameters['GenomeDISCO']['norm']+" --method RandomWalks "+" --tmin "+parameters['GenomeDISCO']['tmin']+" --tmax "+parameters['GenomeDISCO']['tmax']+concise_analysis_text+'\n')                                               
            script_comparison.close()                                                                 
            run_script(script_comparison_file,running_mode)
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def _GetMostRecentDockerImageFromGcloud(docker_image):
  """Get most recent <docker_image>:tag for this docker_image.

  Args:
    docker_image: (string) docker image on Google Cloud.

  Returns:
    docker_image:tag if at least one tag was found for docker_image.
    Otherwise, returns None.
  """
  tag = subprocess.check_output(
      ['gcloud', 'container', 'images', 'list-tags',
       docker_image, '--limit=1', '--format=value(tags[0])'])
  tag = tag.strip()
  if not tag:
    return None 
  return '%s:%s' % (docker_image, tag)
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def _GetPodNames(pod_name_prefix, job_name=None):
  """Get pod names based on the pod_name_prefix and job_name.

  Args:
    pod_name_prefix: value of 'name-prefix' selector.
    job_name: value of 'job' selector. If None, pod names will be
      selected only based on 'name-prefix' selector.

  Returns:
    List of pod names.
  """
  pod_list_command = [
      _KUBECTL, 'get', 'pods', '-o', 'name', '-a',
      '-l', _GetJobSelector(pod_name_prefix, job_name)]
  logging.info('Command to get pod names: %s', ' '.join(pod_list_command))
  output = subprocess.check_output(pod_list_command, universal_newlines=True)
  pod_names = [name for name in output.strip().split('\n') if name]
  logging.info('Pod names: "%s"', ','.join(pod_names))
  return pod_names
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def _PrintLogs(pod_name_prefix, job_name):
  """Prints pod logs.

  If a pod has been restarted, prints logs from previous run. Otherwise,
  prints the logs from current run. We print logs for pods selected
  based on pod_name_prefix and job_name.

  Args:
    pod_name_prefix: value of 'name-prefix' selector.
    job_name: value of 'job' selector.
  """
  for pod_name in _GetPodNames(pod_name_prefix, job_name):
    try:
      # Get previous logs.
      logs_command = [_KUBECTL, 'logs', '-p', pod_name]
      logging.info('Command to get logs: %s', ' '.join(logs_command))
      output = subprocess.check_output(logs_command, universal_newlines=True)
    except subprocess.CalledProcessError:
      # We couldn't get previous logs, so we will try to get current logs.
      logs_command = [_KUBECTL, 'logs', pod_name]
      logging.info('Command to get logs: %s', ' '.join(logs_command))
      output = subprocess.check_output(logs_command, universal_newlines=True)
    print('%s logs:' % pod_name)
    print(output)
项目:ISB-CGC-pipelines    作者:isb-cgc    | 项目源码 | 文件源码
def __init__(self, fqArchiveUrl, filtersDir, outputPrefix, outputUrl, diskSize, diskType, logsPath, container, scriptUrl, tag, cores, mem, preemptible):
        super(PipelineStep, self).__init__()

        fqFileName = os.path.basename(fqArchiveUrl)
        fqInputs = "{fqArchive}:{fqFileName}".format(fqArchive=fqArchiveUrl, fqFileName=fqFileName)

        try:
            filtersDirContents = subprocess.check_output(["gsutil", "ls", filtersDir])

        except subprocess.CalledProcessError as e:
            print "ERROR: couldn't get a listing of filter files!  -- {reason}".format(reason=e)
            exit(-1)

        bfInputs = [x for x in filtersDirContents.split('\n') if re.match('^.*\.bf$', x) or re.match('^.*\.txt', x)]
        bfInputs.append(fqInputs)

        inputs = ",".join(["{url}:{filename}".format(url=x, filename=os.path.basename(x)) for x in bfInputs])
        outputs = "{outputPrefix}*:{outDir}".format(outputPrefix=outputPrefix, outDir=outputUrl)
        env = "INPUT_FILE={fqFileName},OUTPUT_PREFIX={outputPrefix},FILTERS_LIST={filtersList}".format(fqFileName=fqFileName, outputPrefix=outputPrefix, filtersList=','.join([os.path.basename(x) for x in bfInputs if re.match('^.*\.bf$', x)]))

        self._step = PipelineSchema("biobloomcategorizer",
                                                   self._pipelinesConfig,
                                                   logsPath,
                                                   container,
                                                   scriptUrl=scriptUrl,
                                                   cores=cores,
                                                   mem=mem,
                                                   diskSize=diskSize,
                                                   diskType=diskType,
                                                   inputs=inputs,
                                                   outputs=outputs,
                                                   env=env,
                                                   tag=tag,
                                                   preemptible=preemptible)
项目:ISB-CGC-pipelines    作者:isb-cgc    | 项目源码 | 文件源码
def getJobLogs(args, config):  # TODO: reimplement
        pipelineDbUtils = PipelineDbUtils(config)

        jobInfo = pipelineDbUtils.getJobInfo(select=["stdout_log", "stderr_log", "gcs_log_path"],
                                             where={"job_id": args.jobId})

        with open(os.devnull, 'w') as fnull:
            if args.stdout:
                try:
                    stdoutLogFile = subprocess.check_output(
                        ["gsutil", "cat", os.path.join(jobInfo[0].gcs_log_path, jobInfo[0].stdout_log)], stderr=fnull)
                except subprocess.CalledProcessError as e:
                    print "ERROR: couldn't get the stdout log : {reason}".format(reason=e)
                    exit(-1)

                print "STDOUT:\n"
                print stdoutLogFile
                print "---------\n"

            if args.stderr:
                try:
                    stderrLogFile = subprocess.check_output(
                        ["gsutil", "-q", "cat", os.path.join(jobInfo[0].gcs_log_path, jobInfo[0].stderr_log)],
                        stderr=fnull)
                except subprocess.CalledProcessError as e:
                    print "ERROR: couldn't get the stderr log : {reason}".format(reason=e)
                    exit(-1)

                print "STDERR:\n"
                print stderrLogFile
                print "---------\n"

        pipelineDbUtils.closeConnection()
项目:ISB-CGC-pipelines    作者:isb-cgc    | 项目源码 | 文件源码
def calculateDiskSize(inputFile=None, inputFileSize=None, analysisId=None, scalingFactor=None, roundToNearestGbInterval=None):
        if inputFile is not None:
            fileSize = int(subprocess.check_output(["gsutil", "du", inputFile]).split(' ')[0])

        elif inputFileSize is not None:
            fileSize = inputFileSize

        elif analysisId is not None:
            analysisDetail = DataUtils.getAnalysisDetail(analysisId)

            if len(analysisDetail["result_set"]["results"]) > 0:
                files = analysisDetail["result_set"]["results"][0]["files"]
                fileSize = sum([int(x["filesize"]) for x in files])
            else:
                print "ERROR: no files found for analysis ID {a}!".format(a=analysisId)
                exit(-1)

        if scalingFactor is not None:
            scalingFactor = int(scalingFactor)
        else:
            scalingFactor = 1

        if roundToNearestGbInterval is not None:
            roundTo = float(roundToNearestGbInterval) * 1000000000

        return int(math.ceil(scalingFactor * fileSize/roundTo)*roundTo)/1000000000
项目:ISB-CGC-pipelines    作者:isb-cgc    | 项目源码 | 文件源码
def calculateDiskSize(tokenFile=None, fileUuid=None, inputFiles=None, inputFileSize=None, scalingFactor=None, roundToNearestGbInterval=None):
        if inputFiles is not None:
            fileSize = 0
            for f in inputFiles:
                fileSize += int(subprocess.check_output(["gsutil", "du", f]).split(' ')[0])

        elif fileUuid is not None:
            fileSize = GDCDataUtils.getFilesize(fileUuid, tokenFile)

        elif inputFileSize is not None:
            filesize = inputFileSize

        else:
            raise DataUtilsError("Couldn't determine disk size! Please provide a path to an existing file in GCS or a file uuid from the GDC.")

        if scalingFactor is not None:
            scalingFactor = int(scalingFactor)
        else:
            scalingFactor = 1

        if roundToNearestGbInterval is not None:
            roundTo = float(roundToNearestGbInterval) * 1000000000

        else:
            roundTo = 1

        return int(math.ceil(scalingFactor * fileSize / roundTo) * roundTo) / 1000000000
项目:docker-utils    作者:a-ba    | 项目源码 | 文件源码
def docker_version():
    out = subprocess.check_output(["docker", "-v"])
    mo = re.match(br"Docker version (\d+)\.(\d+)\.(\d+)", out)
    if mo:
        return tuple(map(int, mo.groups()))
    die("unable to parse a version number from the output of 'docker -v'")
项目:openhealthalgorithms    作者:openhealthalgorithms    | 项目源码 | 文件源码
def version():
    __version = '0.2.1'
    __tag = 'b'
    if path.exists('.git'):
        __tag = 'git'
        __build = subprocess.check_output('git rev-list HEAD --count'.split()).decode().strip()
    else:
        __build = __tag
    return '%s.%s.%s' % (__version, __tag, __build)
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def get_cidr_from_iface(interface):
    '''
    Determines Network CIDR from interface.
    '''
    if not interface:
        return None
    apt_install('ohai')
    try:
        os_info = subprocess.check_output(['ohai', '-l', 'fatal'])
    except OSError:
        log('Unable to get operating system information')
        return None
    try:
        os_info_json = json.loads(os_info)
    except ValueError:
        log('Unable to determine network')
        return None
    device = os_info_json['network']['interfaces'].get(interface)
    if device is not None:
        if device.get('routes'):
            routes = device['routes']
            for net in routes:
                if 'scope' in net:
                    return net.get('destination')
        else:
            return None
    else:
        return None
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def systemv_services_running():
    output = subprocess.check_output(
        ['service', '--status-all'],
        stderr=subprocess.STDOUT).decode('UTF-8')
    return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def rsync(from_path, to_path, flags='-r', options=None):
    """Replicate the contents of a path"""
    options = options or ['--delete', '--executability']
    cmd = ['/usr/bin/rsync', flags]
    cmd.extend(options)
    cmd.append(from_path)
    cmd.append(to_path)
    log(" ".join(cmd))
    return subprocess.check_output(cmd).decode('UTF-8').strip()
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
    """Mount a filesystem at a particular mountpoint"""
    cmd_args = ['mount']
    if options is not None:
        cmd_args.extend(['-o', options])
    cmd_args.extend([device, mountpoint])
    try:
        subprocess.check_output(cmd_args)
    except subprocess.CalledProcessError as e:
        log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
        return False

    if persist:
        return fstab_add(device, mountpoint, filesystem, options=options)
    return True
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def umount(mountpoint, persist=False):
    """Unmount a filesystem"""
    cmd_args = ['umount', mountpoint]
    try:
        subprocess.check_output(cmd_args)
    except subprocess.CalledProcessError as e:
        log('Error unmounting {}\n{}'.format(mountpoint, e.output))
        return False

    if persist:
        return fstab_remove(mountpoint)
    return True
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def list_nics(nic_type=None):
    """Return a list of nics of given type(s)"""
    if isinstance(nic_type, six.string_types):
        int_types = [nic_type]
    else:
        int_types = nic_type

    interfaces = []
    if nic_type:
        for int_type in int_types:
            cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
            ip_output = subprocess.check_output(cmd).decode('UTF-8')
            ip_output = ip_output.split('\n')
            ip_output = (line for line in ip_output if line)
            for line in ip_output:
                if line.split()[1].startswith(int_type):
                    matched = re.search('.*: (' + int_type +
                                        r'[0-9]+\.[0-9]+)@.*', line)
                    if matched:
                        iface = matched.groups()[0]
                    else:
                        iface = line.split()[1].replace(":", "")

                    if iface not in interfaces:
                        interfaces.append(iface)
    else:
        cmd = ['ip', 'a']
        ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
        ip_output = (line.strip() for line in ip_output if line)

        key = re.compile('^[0-9]+:\s+(.+):')
        for line in ip_output:
            matched = re.search(key, line)
            if matched:
                iface = matched.group(1)
                iface = iface.partition("@")[0]
                if iface not in interfaces:
                    interfaces.append(iface)

    return interfaces
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def get_nic_hwaddr(nic):
    """Return the Media Access Control (MAC) for a network interface."""
    cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
    ip_output = subprocess.check_output(cmd).decode('UTF-8')
    hwaddr = ""
    words = ip_output.split()
    if 'link/ether' in words:
        hwaddr = words[words.index('link/ether') + 1]
    return hwaddr
项目:charm-plumgrid-gateway    作者:openstack    | 项目源码 | 文件源码
def lsmod():
    """Shows what kernel modules are currently loaded"""
    return check_output(['lsmod'],
                        universal_newlines=True)