我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用subprocess.call()。
def configure_analyst_opsvm(): ''' Configures Anaylyst for OPSVM ''' if not service_running('plumgrid'): restart_pg() opsvm_ip = pg_gw_context._pg_dir_context()['opsvm_ip'] NS_ENTER = ('/opt/local/bin/nsenter -t $(ps ho pid --ppid $(cat ' '/var/run/libvirt/lxc/plumgrid.pid)) -m -n -u -i -p ') sigmund_stop = NS_ENTER + '/usr/bin/service plumgrid-sigmund stop' sigmund_status = NS_ENTER \ + '/usr/bin/service plumgrid-sigmund status' sigmund_autoboot = NS_ENTER \ + '/usr/bin/sigmund-configure --ip {0} --start --autoboot' \ .format(opsvm_ip) try: status = subprocess.check_output(sigmund_status, shell=True) if 'start/running' in status: if subprocess.call(sigmund_stop, shell=True): log('plumgrid-sigmund couldn\'t be stopped!') return subprocess.check_call(sigmund_autoboot, shell=True) status = subprocess.check_output(sigmund_status, shell=True) except: log('plumgrid-sigmund couldn\'t be started!')
def cmp_pkgrevno(package, revno, pkgcache=None): """Compare supplied revno with the revno of the installed package * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg * -1 => Installed revno is less than supplied arg This function imports apt_cache function from charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
def log(message, level=None): """Write a message to the juju log""" command = ['juju-log'] if level: command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) command += [message] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: subprocess.call(command) except OSError as e: if e.errno == errno.ENOENT: if level: message = "{}: {}".format(level, message) message = "juju-log: {}".format(message) print(message, file=sys.stderr) else: raise
def __call__(self, *args, **kwargs): """Trap ``SIGTERM`` and call wrapped function.""" self._caught_signal = None # Register handler for SIGTERM, then call `self.func` self.old_signal_handler = signal.getsignal(signal.SIGTERM) signal.signal(signal.SIGTERM, self.signal_handler) self.func(*args, **kwargs) # Restore old signal handler signal.signal(signal.SIGTERM, self.old_signal_handler) # Handle any signal caught during execution if self._caught_signal is not None: signum, frame = self._caught_signal if callable(self.old_signal_handler): self.old_signal_handler(signum, frame) elif self.old_signal_handler == signal.SIG_DFL: sys.exit(0)
def install_update(): """If a newer release is available, download and install it. :returns: ``True`` if an update is installed, else ``False`` """ update_data = wf().cached_data('__workflow_update_status', max_age=0) if not update_data or not update_data.get('available'): wf().logger.info('No update available') return False local_file = download_workflow(update_data['download_url']) wf().logger.info('Installing updated workflow ...') subprocess.call(['open', local_file]) update_data['available'] = False wf().cache_data('__workflow_update_status', update_data) return True
def getAudio(freq, audio_files=None): files = os.listdir(DATA_DIR) p = re.compile('.*\.[mkv|avi]') files = [ f for f in files if p.match(f) ] if audio_files: files = [ f for f in files if os.path.splitext(f)[0] in audio_files] audio_dirs = [] for f in files: name, extension = os.path.splitext(f) command = "ffmpeg -i {0}{1}{2} -ab 160k -ac 2 -ar {3} -vn {0}{1}_{3}.wav".format(DATA_DIR, name, extension, freq) audio_dirs.append(DATA_DIR + name + '_' + str(freq) + '.wav') subprocess.call(command, shell=True) return audio_dirs # Convert timestamp to seconds
def webEnum(self, args): print "INFO: Performing nmap http script scan for {}:{}".format(args[0],args[1]) nmapSCAN = "nmap -sV -Pn -vv -p {} --script='(http* or ssl*) and not (dos or fuzzer or brute)' -oN {}_http.nmap {}".format(args[1],args[0],args[0]) subprocess.check_output(nmapSCAN, shell=True) print "INFO: Performing nikto scan on {}:{}".format(args[0],args[1]) script="nikto -host {} -port {} -C all >> {}_nikto_{}.txt".format(args[0],args[1],args[0],args[1]) subprocess.check_output(script, shell=True) ''' print "INFO: Performing dirb scan on {}:{}".format(args[0],args[1]) dirbList="/usr/share/wordlists/dirbuster/directory-list-2.3-small.txt" script="dirb {}://{}:{} {} -S -w >> {}_dirb_{}.txt".format(args[2],args[0],args[1],dirbList,args[0],args[1]) subprocess.call(script, shell=True) ''' print "INFO: Finished http module for {}:{}".format(args[0],args[1])
def convert_mp4(video_dir, audio_dir): ''' Args: 1. video_dir: Directory for all video files 2. audio_dir: Directory where all converted files will be stored. ''' # Get all file names video_file_names = sorted(glob.glob(video_dir + "*.mp4")) # Extract actual names of file, also remove any extensions video_names = map(lambda x : x.split('/')[-1].split(".")[0], video_file_names) # Command for converting video to audio command = "ffmpeg -i " + video_dir + "{0}.mp4 -ab 96k -ar 44100 -vn " + audio_dir + "{0}.wav" for name in video_names: subprocess.call(command.format(name), shell=True)
def delete_container(self, lxc_name): logger.info ("delete container:%s" % lxc_name) if self.imgmgr.deleteFS(lxc_name): Container_Collector.billing_increment(lxc_name) self.historymgr.log(lxc_name,"Delete") logger.info("delete container %s success" % lxc_name) return [True, "delete container success"] else: logger.info("delete container %s failed" % lxc_name) return [False, "delete container failed"] #status = subprocess.call([self.libpath+"/lxc_control.sh", "delete", lxc_name]) #if int(status) == 1: # logger.error("delete container %s failed" % lxc_name) # return [False, "delete container failed"] #else: # logger.info ("delete container %s success" % lxc_name) # return [True, "delete container success"] # start container, if running, restart it
def recover_container(self, lxc_name): logger.info ("recover container:%s" % lxc_name) #status = subprocess.call([self.libpath+"/lxc_control.sh", "status", lxc_name]) [success, status] = self.container_status(lxc_name) if not success: return [False, status] self.imgmgr.checkFS(lxc_name) if status == 'stopped': logger.info("%s stopped, recover it to running" % lxc_name) if self.start_container(lxc_name)[0]: self.historymgr.log(lxc_name,"Recover") if self.start_services(lxc_name)[0]: logger.info("%s recover success" % lxc_name) return [True, "recover success"] else: logger.error("%s recover failed with services not start" % lxc_name) return [False, "recover failed for services not start"] else: logger.error("%s recover failed for container starting failed" % lxc_name) return [False, "recover failed for container starting failed"] else: logger.info("%s recover success" % lxc_name) return [True, "recover success"]
def service(action, service_name, **kwargs): """Control a system service. :param action: the action to take on the service :param service_name: the name of the service to perform th action on :param **kwargs: additional params to be passed to the service command in the form of key=value. """ if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) return subprocess.call(cmd) == 0
def load_previous(self, path=None): """Load previous copy of config from disk. In normal usage you don't need to call this method directly - it is called automatically at object initialization. :param path: File path from which to load the previous config. If `None`, config is loaded from the default location. If `path` is specified, subsequent `save()` calls will write to the same path. """ self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v
def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. :param: cmd: str: The apt command to run. :param: fatal: bool: Whether the command's output should be checked and retried. """ # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. cmd_env = { 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} if fatal: _run_with_retries( cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), retry_message="Couldn't acquire DPKG lock") else: env = os.environ.copy() env.update(cmd_env) subprocess.call(cmd, env=env)
def download_mnist(dirpath): data_dir = os.path.join(dirpath, 'mnist') if os.path.exists(data_dir): print('Found MNIST - skip') return else: os.mkdir(data_dir) url_base = 'http://yann.lecun.com/exdb/mnist/' file_names = ['train-images-idx3-ubyte.gz','train-labels-idx1-ubyte.gz','t10k-images-idx3-ubyte.gz','t10k-labels-idx1-ubyte.gz'] for file_name in file_names: url = (url_base+file_name).format(**locals()) print(url) out_path = os.path.join(data_dir,file_name) cmd = ['curl', url, '-o', out_path] print('Downloading ', file_name) subprocess.call(cmd) cmd = ['gzip', '-d', out_path] print('Decompressing ', file_name) subprocess.call(cmd)
def run(self): _install.run(self) print("Installing udev rules...") if not os.path.isdir("/etc/udev/rules.d"): print("WARNING: udev rules have not been installed (/etc/udev/rules.d is not a directory)") return try: shutil.copy("./rivalcfg/data/99-steelseries-rival.rules", "/etc/udev/rules.d/") except IOError: print("WARNING: udev rules have not been installed (permission denied)") return try: subprocess.call(["udevadm", "trigger"]) except OSError: print("WARNING: unable to update udev rules, please run the 'udevadm trigger' command") return print("Done!")
def set_data_field(record, field_name, field_val): assert(len(record.samples) == 1) new_format = record.FORMAT new_fields = new_format.split(':') if not(field_name in new_fields): new_fields = new_fields + [field_name] new_format = ':'.join(new_fields) sample_call = get_record_sample_call(record) data = sample_call.data data_dict = data._asdict() data_dict[field_name] = field_val new_sample_vals = [] for field in new_fields: new_sample_vals.append(data_dict[field]) # Note - the old way of passing the fields to pyVCF is memory intensive # because a fresh type is allocated for each call to make_calldata_tuple #data_instantiator = vcf.model.make_calldata_tuple(new_fields) #data = data_instantiator(*new_sample_vals) data = FakeNamedTuple(new_fields, new_sample_vals) sample_call.data = data record.samples[0] = sample_call record.FORMAT = new_format
def combine_vcfs(output_filename, input_vcf_filenames): tmp_filename = output_filename + ".tmp" for (i,fn) in enumerate(input_vcf_filenames): if i == 0: args = 'cat ' + fn subprocess.check_call(args + " > " + tmp_filename, shell=True) else: args = 'grep -v "^#" ' + fn ret = subprocess.call(args + " >> " + tmp_filename, shell=True) if ret == 2: raise Exception("grep call failed: " + args) # Sort and index the files tk_tabix.sort_vcf(tmp_filename, output_filename) tk_tabix.index_vcf(output_filename) os.remove(tmp_filename)
def run_cmake(self): print("Running CMake") build_dir_cmd_out = subprocess.call( ["mkdir", "build"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if build_dir_cmd_out != 0: print("Can\'t setup CMake build directory.") return if self.cmake_build_info["build_dir"].is_dir(): try: subprocess.check_output( self.cmake_cmd_info["cmake_cmd"], cwd=str(self.cmake_build_info["build_dir"])) except subprocess.CalledProcessError as e: print(e.output) if not self.cmake_build_info["comp_data_cmake"].is_file(): print("Couldn't setup CMake Project") return else: print("Couldn't setup CMake Project") return
def DeletePods(pod_name, yaml_file): """Deletes pods based on the given kubernetes config. Args: pod_name: 'name-prefix' selector for the pods. yaml_file: kubernetes yaml config. Raises: TimeoutError: if jobs didn't terminate for a long time. """ command = [_KUBECTL, 'delete', '--filename=%s' % yaml_file] logging.info('Deleting pods: %s', ' '.join(command)) subprocess.call(command) def CheckPodsAreTerminated(): return not _GetPodNames(pod_name) if not _WaitUntil(100, CheckPodsAreTerminated): raise TimeoutError( 'Timed out waiting for %s pod to terminate.' % pod_name)
def call_script(model_path, config, proto, eval_script, num_process=10, normalize=True): ''' open pipe and call the script ''' try: subprocess.call( " python {} {} --config={} " " --proto={} " " -p {} {}".format( eval_script, model_path, config, proto, num_process, ' -n ' if normalize else ''), shell=True) except: traceback.print_exc(file=sys.stdout) print 'error in call_bleu_script()' print model_path
def draw(self, graph, highlight=None): gv = '' gv += 'digraph G{} {{\n'.format(self._n) gv += ' layout=neato;\n' gv += self._ranks for s, d in graph.edges_iter(): gv += ' "{}" -> "{}"'.format(s, d) if (s, d) == highlight: gv += '[color=red,penwidth=3]' gv += ';\n' gv += '}' fname = self.WSDIR + '/{0:04d}'.format(self._n) with open(fname + '.dot', 'w') as f: print(gv, file=f) cmd = '' cmd += '/usr/local/bin/neato' cmd += ' -Tpng {f}.dot -o{f}.png'.format(f=fname) subprocess.call(cmd, shell=True) self._n += 1
def isDaemonAlive(hostAndPort="{0}:{1}".format(IPFSAPI_IP, IPFSAPI_PORT)): """Ensure that the IPFS daemon is running via HTTP before proceeding""" client = ipfsapi.Client(IPFSAPI_IP, IPFSAPI_PORT) try: # OSError if ipfs not installed, redundant of below # subprocess.call(['ipfs', '--version'], stdout=open(devnull, 'wb')) # ConnectionError/AttributeError if IPFS daemon not running client.id() return True except (ConnectionError, exceptions.AttributeError): logError("Daemon is not running at http://" + hostAndPort) return False except OSError: logError("IPFS is likely not installed. " "See https://ipfs.io/docs/install/") sys.exit() except: logError('Unknown error in retrieving daemon status') logError(sys.exc_info()[0])
def grab(bbox=None): if sys.platform == "darwin": f, file = tempfile.mkstemp('.png') os.close(f) subprocess.call(['screencapture', '-x', file]) im = Image.open(file) im.load() os.unlink(file) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origo in lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
def publish_cv(dryrun): print "Running Content View Publish..." # Set the initial state good_publish = False if not dryrun: rc = subprocess.call(['/usr/local/bin/publish_content_views', '-q', '-a']) else: msg = "Dry run - not actually performing publish" helpers.log_msg(msg, 'WARNING') rc = subprocess.call(['/usr/local/bin/publish_content_views', '-q', '-a', '-d']) if rc == 0: good_publish = True return good_publish
def promote_cv(dryrun, lifecycle): print "Running Content View Promotion to " + lifecycle + "..." # Set the initial state good_promote = False if not dryrun: rc = subprocess.call(['/usr/local/bin/promote_content_views', '-q', '-e', lifecycle]) else: msg = "Dry run - not actually performing promotion" helpers.log_msg(msg, 'WARNING') rc = subprocess.call(['/usr/local/bin/promote_content_views', '-q', '-d', '-e', lifecycle]) if rc == 0: good_promote = True return good_promote
def postModule(moduleTar, moduleInputDir, pfserver, pfmodpath, pfuser, pftoken): """ Function to push puppet modules using curl to Artifiactory repository """ # Remove module's extension (.tar.gz) puppetModuleNameNoExt = splitext(moduleTar)[0] # Remove the path from the module puppetModuleName = puppetModuleNameNoExt.split('/')[-1] # Split the module name into the required parts puppetModuleNameList = puppetModuleName.split('-') author = puppetModuleNameList[0] moduleName = puppetModuleNameList[1] version = puppetModuleNameList[2] url = "http://" + pfserver + pfmodpath + "/" + author + "/" + moduleName + "/" + moduleTar fileName = moduleInputDir + "/" + moduleTar # Put the files using curl (need to clean this up) authtoken = pfuser + ":" + pftoken subprocess.call(['curl', '-u', authtoken, '-XPUT', url, '-T', fileName])
def _setup_headers(self, bap): "pass type information from IDA to BAP" # this is very fragile, and may break in case # if we have several BAP instances, especially # when they are running on different binaries. # Will leave it as it is until issue #588 is # resolved in the upstream with self.tmpfile("h") as out: ida.output_types(out) subprocess.call(bap, [ '--api-add', 'c:"{0}"'.format(out.name), ]) def cleanup(): subprocess.call(bap, [ "--api-remove", "c:{0}". format(os.path.basename(out.name)) ]) self.on_cleanup(cleanup)
def graceful_exit(tmpdir, keep_data_files=False, proc=None, pkill_cmd=None): #kill process if any, but keep in on try so doesn't prevent directory clean-up try: if proc: proc.terminate() log.debug("Sent terminate to powstream process %s" % proc.pid) except: pass #if they are still not down, force them down try: if pkill_cmd: time.sleep("2") call([pkill_cmd, '-9', '-f', tmpdir ], shell=False) except: pass #clean directory try: cleanup_dir(tmpdir, keep_data_files=keep_data_files) except: pass ## # Removes a data file
def __render(self): """ Build the argument list for the ``register_task_definition()`` call. :rtype: dict """ r = {} r['family'] = self.family r['networkMode'] = self.networkMode if self.taskRoleArn: r['taskRoleArn'] = self.taskRoleArn r['containerDefinitions'] = [c.render() for c in self.containers] volumes = self.__get_volumes() if volumes: r['volumes'] = volumes return r
def _openDownloadFile(self, buildId, suffix): (tmpFd, tmpName) = mkstemp() url = self._makeUrl(buildId, suffix) try: os.close(tmpFd) env = { k:v for (k,v) in os.environ.items() if k in self.__whiteList } env["BOB_LOCAL_ARTIFACT"] = tmpName env["BOB_REMOTE_ARTIFACT"] = url ret = subprocess.call(["/bin/bash", "-ec", self.__downloadCmd], stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, cwd="/tmp", env=env) if ret == 0: ret = tmpName tmpName = None return CustomDownloader(ret) else: raise ArtifactDownloadError("failed (exit {})".format(ret)) finally: if tmpName is not None: os.unlink(tmpName)
def doHelp(availableCommands, argv, bobRoot): parser = argparse.ArgumentParser(prog="bob help", description="Display help information about command.") # Help without a command parameter gets handled by the main argument parser # in pym/bob/scripts.py. parser.add_argument('command', help="Command to get help for") args = parser.parse_args(argv) if args.command in availableCommands: manPage = "bob-" + args.command manSection = "1" else: manPage = "bob" + args.command manSection = "7" inSourceLoc = os.path.join(bobRoot, "doc", "_build", "man", manPage+"."+manSection) if os.path.isfile(inSourceLoc): ret = subprocess.call(["man", inSourceLoc]) else: ret = subprocess.call(["man", manSection, manPage]) sys.exit(ret)
def _scanDir(self, workspace, dir): self.__dir = dir dir = os.path.join(workspace, dir) try: remotes = subprocess.check_output(["git", "remote", "-v"], cwd=dir, universal_newlines=True).split("\n") remotes = (r[:-8].split("\t") for r in remotes if r.endswith("(fetch)")) self.__remotes = { remote:url for (remote,url) in remotes } self.__commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=dir, universal_newlines=True).strip() self.__description = subprocess.check_output( ["git", "describe", "--always", "--dirty"], cwd=dir, universal_newlines=True).strip() self.__dirty = subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"], cwd=dir) != 0 except subprocess.CalledProcessError as e: raise BuildError("Git audit failed: " + str(e)) except OSError as e: raise BuildError("Error calling git: " + str(e))
def get_nlcd_fn(datadir=None): """Calls external shell script `get_nlcd.sh` to fetch: 2011 Land Use Land Cover (nlcd) grids, 30 m http://www.mrlc.gov/nlcd11_leg.php """ if datadir is None: datadir = iolib.get_datadir() #This is original filename, which requires ~17 GB #nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img') #get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif') if not os.path.exists(nlcd_fn): cmd = ['get_nlcd.sh',] subprocess.call(cmd) return nlcd_fn
def get_bareground_fn(datadir=None): """Calls external shell script `get_bareground.sh` to fetch: ~2010 global bare ground, 30 m Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean) The shell script will compress all downloaded tiles using lossless LZW compression. http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php """ if datadir is None: datadir = iolib.get_datadir() bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt') if not os.path.exists(bg_fn): cmd = ['get_bareground.sh',] subprocess.call(cmd) return bg_fn #Download latest global RGI glacier db
def get_glacier_poly(datadir=None): """Calls external shell script `get_rgi.sh` to fetch: Randolph Glacier Inventory (RGI) glacier outline shapefiles Full RGI database: rgi50.zip is 410 MB The shell script will unzip and merge regional shp into single global shp http://www.glims.org/RGI/ """ if datadir is None: datadir = iolib.get_datadir() #rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp') #Update to rgi60, should have this returned from get_rgi.sh rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp') if not os.path.exists(rgi_fn): cmd = ['get_rgi.sh',] subprocess.call(cmd) return rgi_fn #Update glacier polygons
def safeInstall(): FACTORIOPATH = getFactorioPath() try: if not os.path.isdir("%s" % (FACTORIOPATH) ): if os.access("%s/.." % (FACTORIOPATH), os.W_OK): os.mkdir(FACTORIOPATH, 0o777) else: subprocess.call(['sudo', 'mkdir', '-p', FACTORIOPATH]) subprocess.call(['sudo', 'chown', getpass.getuser(), FACTORIOPATH]) os.mkdir(os.path.join(FACTORIOPATH, "saves")) os.mkdir(os.path.join(FACTORIOPATH, "config")) with open("%s/.bashrc" % (os.path.expanduser("~")), "r+") as bashrc: lines = bashrc.read() if lines.find("eval \"$(_FACTOTUM_COMPLETE=source factotum)\"\n") == -1: bashrc.write("eval \"$(_FACTOTUM_COMPLETE=source factotum)\"\n") print("You'll want to restart your shell for command autocompletion. Tab is your friend.") updateFactorio() except IOError as e: print("Cannot make %s. Please check permissions. Error %s" % (FACTORIOPATH, e)) sys.exit(1)
def send_pkts_and_capture(port_interface_mapping, port_packet_list): ''' sends packets to P4 and captures by sniffing ''' queue = Queue.Queue() thd = threading.Thread(name="sniff_thread", target=lambda: sniff_record(queue, port_interface_mapping)) thd.start() # gives time for time to start sniffing... so packets are sniffed once sniff call begins time.sleep(1) for x in port_packet_list: port_num = x['port'] iface_name = port_interface_mapping['port2intf'][port_num] sendp(x['packet'], iface=iface_name) thd.join() pack = queue.get(True) Packet_list = [] for p in pack: eth = p.sniffed_on port_no = port_interface_mapping['intf_port_names'][eth] Packet_list.append({'port': port_no, 'packet': p}) return Packet_list
def __call__(self, *args, **kwargs): self._caught_signal = None # Register handler for SIGTERM, then call `self.func` self.old_signal_handler = signal.getsignal(signal.SIGTERM) signal.signal(signal.SIGTERM, self.signal_handler) self.func(*args, **kwargs) # Restore old signal handler signal.signal(signal.SIGTERM, self.old_signal_handler) # Handle any signal caught during execution if self._caught_signal is not None: signum, frame = self._caught_signal if callable(self.old_signal_handler): self.old_signal_handler(signum, frame) elif self.old_signal_handler == signal.SIG_DFL: sys.exit(0)
def editPipeline(args, config): pipelineDbUtils = PipelineDbUtils(config) request = json.loads(pipelineDbUtils.getJobInfo(select=["request"], where={"job_id": args.jobId})[0].request) _, tmp = mkstemp() with open(tmp, 'w') as f: f.write("{data}".format(data=json.dumps(request, indent=4))) if "EDITOR" in os.environ.keys(): editor = os.environ["EDITOR"] else: editor = "/usr/bin/nano" if subprocess.call([editor, tmp]) == 0: with open(tmp, 'r') as f: request = json.load(f) pipelineDbUtils.updateJob(args.jobId, keyName="job_id", setValues={"request": json.dumps(request)}) else: print "ERROR: there was a problem editing the request" exit(-1)
def service(action, service_name): """Control a system service""" if init_is_systemd(): cmd = ['systemctl', action, service_name] else: cmd = ['service', service_name, action] return subprocess.call(cmd) == 0
def restart_on_change_helper(lambda_f, restart_map, stopstart=False, restart_functions=None): """Helper function to perform the restart_on_change function. This is provided for decorators to restart services if files described in the restart_map have changed after an invocation of lambda_f(). @param lambda_f: function to call. @param restart_map: {file: [service, ...]} @param stopstart: whether to stop, start or restart a service @param restart_functions: nonstandard functions to use to restart services {svc: func, ...} @returns result of lambda_f() """ if restart_functions is None: restart_functions = {} checksums = {path: path_hash(path) for path in restart_map} r = lambda_f() # create a list of lists of the services to restart restarts = [restart_map[path] for path in restart_map if path_hash(path) != checksums[path]] # create a flat list of ordered services without duplicates from lists services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) if services_list: actions = ('stop', 'start') if stopstart else ('restart',) for service_name in services_list: if service_name in restart_functions: restart_functions[service_name](service_name) else: for action in actions: service(action, service_name) return r
def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] accepts_file = "--file" in subprocess.check_output( relation_cmd_line + ["--help"], universal_newlines=True) if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) settings = relation_settings.copy() settings.update(kwargs) for key, value in settings.items(): # Force value to be a string: it always should, but some call # sites pass in things like dicts or numbers. if value is not None: settings[key] = "{}".format(value) if accepts_file: # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is # too big. Ideally we should tell relation-set to read the data from # stdin, but that feature is broken in 1.23.2: Bug #1454678. with tempfile.NamedTemporaryFile(delete=False) as settings_file: settings_file.write(yaml.safe_dump(settings).encode("utf-8")) subprocess.check_call( relation_cmd_line + ["--file", settings_file.name]) os.remove(settings_file.name) else: for key, value in settings.items(): if value is None: relation_cmd_line.append('{}='.format(key)) else: relation_cmd_line.append('{}={}'.format(key, value)) subprocess.check_call(relation_cmd_line) # Flush cache of any relation-gets for local unit flush(local_unit())
def status_set(workload_state, message): """Set the workload state with a message Use status-set to set the workload state with a message which is visible to the user via juju status. If the status-set command is not found then assume this is juju < 1.23 and juju-log the message unstead. workload_state -- valid juju workload state. message -- status update message """ valid_states = ['maintenance', 'blocked', 'waiting', 'active'] if workload_state not in valid_states: raise ValueError( '{!r} is not a valid workload state'.format(workload_state) ) cmd = ['status-set', workload_state, message] try: ret = subprocess.call(cmd) if ret == 0: return except OSError as e: if e.errno != errno.ENOENT: raise log_message = 'status-set failed: {} {}'.format(workload_state, message) log(log_message, level='INFO')
def port_has_listener(address, port): """ Returns True if the address:port is open and being listened to, else False. @param address: an IP address or hostname @param port: integer port Note calls 'zc' via a subprocess shell """ cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) return not(bool(result))
def disable_ipv6(): """ Disable ufw IPv6 support in /etc/default/ufw """ exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g', '/etc/default/ufw']) if exit_code == 0: hookenv.log('IPv6 support in ufw disabled', level='INFO') else: hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR") raise UFWError("Couldn't disable IPv6 support in ufw")
def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark""" log("Marking {} as {}".format(packages, mark)) cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) if fatal: subprocess.check_call(cmd, universal_newlines=True) else: subprocess.call(cmd, universal_newlines=True)
def _python_cmd(*args): """ Return True if the command succeeded. """ args = (sys.executable,) + args return subprocess.call(args) == 0
def when_i_am_elected_leader(self, event): """Callback when this host gets elected leader.""" # set running state self.previously_running = True self.LOG.info("Monasca Transform service running on %s " "has been elected leader" % str(self.my_host_name)) if CONF.service.spark_python_files: pyfiles = (" --py-files %s" % CONF.service.spark_python_files) else: pyfiles = '' event_logging_dest = '' if (CONF.service.spark_event_logging_enabled and CONF.service.spark_event_logging_dest): event_logging_dest = ( "--conf spark.eventLog.dir=" "file://%s" % CONF.service.spark_event_logging_dest) # Build the command to start the Spark driver spark_cmd = "".join(( "export SPARK_HOME=", CONF.service.spark_home, " && ", "spark-submit --master ", CONF.service.spark_master_list, " --conf spark.eventLog.enabled=", CONF.service.spark_event_logging_enabled, event_logging_dest, " --jars " + CONF.service.spark_jars_list, pyfiles, " " + CONF.service.spark_driver)) # Start the Spark driver # (specify shell=True in order to # correctly handle wildcards in the spark_cmd) subprocess.call(spark_cmd, shell=True)