我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用argparse.ArgumentDefaultsHelpFormatter()。
def generate_argparser(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter, epilog=LICENSE) parser.add_argument('-d', '--nodedata', required=True, nargs=1, help=("file containing paths of one or more" "RESULT.node.score.csv files")) parser.add_argument('-t', '--tree', required=True, type=open, nargs=1, help="tree file in Newick format") parser.add_argument('-o', '--out', required=True, nargs=1, help="new output files prefix") parser.add_argument("-v", "--verbose", action="store_true") # These args are hidden to pass through to the treedata object parser.add_argument("-c", "--clade", nargs=1, help=argparse.SUPPRESS) parser.add_argument("-s", "--startk", type=int, default=0, help=argparse.SUPPRESS) parser.add_argument("-p", "--stopk", type=int, help=argparse.SUPPRESS) return parser
def generate_argparser(): """Generates the argparsr ArgumentParser """ parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter, epilog=LICENSE) parser.add_argument('-t', '--tree', type=open, nargs=1, help="input tree in newick format") parser.add_argument('-d', '--data', type=os.path.abspath, nargs=1, help=("CSV output from quartet_sampling" " (RESULT.node.score.csv)")) parser.add_argument("-c", "--clade", nargs=1, help=argparse.SUPPRESS) parser.add_argument("-v", "--verbose", action="store_true", help="verbose screen output") parser.add_argument("-s", "--startk", type=int, default=0, help=argparse.SUPPRESS) parser.add_argument("-p", "--stopk", type=int, help=argparse.SUPPRESS) return parser
def generate_argparser(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter, epilog=LICENSE) parser.add_argument('-d', '--data', type=os.path.abspath, nargs=1, required=True, help=("RESULT.node.score.csv file output from" "quartet_sampling.py")) parser.add_argument("-c", "--clade", nargs=1, help=("specify a clade using a comma-separated" "list of 2+ descendant taxa")) parser.add_argument("-v", "--verbose", action="store_true", help="verbose screen output") parser.add_argument("-s", "--startk", type=int, default=0, help="starting branch numerical index") parser.add_argument("-p", "--stopk", type=int, help="stopping branch numerical index") parser.add_argument("-o", "--out", type=os.path.abspath, nargs=1, help="output file path for statistics") return parser
def get_arguments(): parser = argparse.ArgumentParser(description='FAST5 to FASTQ', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('dir', type=str, help='directory of FAST5 reads to extract (will be searched recursively)') parser.add_argument('--min_length', type=int, default=0, help='Exclude reads shorter than this length (in bp)') parser.add_argument('--min_mean_qual', type=float, default=0.0, help='Exclude reads with a mean qscore less than this value') parser.add_argument('--min_qual_window', type=float, default=0.0, help='Exclude reads where their mean qscore in a sliding window drops ' 'below this value') parser.add_argument('--window_size', type=int, default=50, help='The size of the sliding window used for --min_qual_window') parser.add_argument('--target_bases', type=int, default=None, help='If set, exclude the worst reads (as judged by their minimum qscore ' 'in a sliding window) such that only this many bases remain') args = parser.parse_args() args.dir = os.path.abspath(args.dir) return args
def add_pipeline(subparsers): """Pipeline subcommands.""" pipeline_parser = subparsers.add_parser( 'pipeline', help=add_pipeline.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) pipeline_parser.set_defaults(func=pipeline_parser.print_help) pipeline_subparsers = pipeline_parser.add_subparsers(title='Pipelines') pipeline_full_parser = pipeline_subparsers.add_parser( 'app', help=runner.prepare_app_pipeline.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) pipeline_full_parser.set_defaults(func=runner.prepare_app_pipeline) pipeline_onetime_parser = pipeline_subparsers.add_parser( 'onetime', help=runner.prepare_onetime_pipeline.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) pipeline_onetime_parser.set_defaults(func=runner.prepare_onetime_pipeline) add_env(pipeline_onetime_parser)
def parse_args(args): if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( prog='cfpp', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('filename', metavar='filename') parser.add_argument('-s', '--search-path', action='append', default=['.'], help='List of paths to search when reading files ' 'referenced from the CloudFormation template. ' 'The current working directory is always ' 'searched first, regardless of this ' 'setting. Specify this option once for each ' 'directory to add to the search path.') parser.add_argument('--version', action='version', version=VERSION, help='Display version number and exit.') args = parser.parse_args(args) return args
def command_line_arguments(command_line_parameters): """Parse the program options""" # set up command line parser parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-a', '--algorithm', metavar = 'x', nargs = '+', required = True, help = 'Biometric recognition; registered algorithms are: %s' % bob.bio.base.resource_keys('algorithm')) parser.add_argument('-e', '--extractor', metavar = 'x', nargs = '+', required = True, help = 'Feature extraction; registered feature extractors are: %s' % bob.bio.base.resource_keys('extractor')) parser.add_argument('-P', '--projector-file', metavar = 'FILE', help = 'The pre-trained projector file, if the algorithm performs projection') parser.add_argument('-E', '--enroller-file' , metavar = 'FILE', help = 'The pre-trained enroller file, if the extractor requires enroller training') parser.add_argument('-m', '--model-files', metavar = 'MODEL', nargs='+', required = True, help = "A list of enrolled model files") parser.add_argument('-p', '--probe-files', metavar = 'PROBE', nargs='+', required = True, help = "A list of extracted feature files used as probes") # add verbose option bob.core.log.add_command_line_option(parser) # parse arguments args = parser.parse_args(command_line_parameters) # set verbosity level bob.core.log.set_verbosity_level(logger, args.verbose) return args
def command_line_arguments(command_line_parameters): """Parse the program options""" # set up command line parser parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-e', '--extractor', metavar = 'x', nargs = '+', required = True, help = 'Feature extraction; registered feature extractors are: %s' % bob.bio.base.resource_keys('extractor')) parser.add_argument('-E', '--extractor-file', metavar = 'FILE', help = "The pre-trained extractor file, if the extractor requires training") parser.add_argument('-p', '--preprocessor', metavar = 'x', nargs = '+', required = True, help = 'Data preprocessing; registered preprocessors are: %s' % bob.bio.base.resource_keys('preprocessor')) parser.add_argument('-i', '--input-file', metavar = 'PREPROCESSED', required = True, help = "The preprocessed data file to read.") parser.add_argument('-o', '--output-file', metavar = 'FEATURE', default = 'extracted.hdf5', help = "The file to write the extracted features into (should be of type HDF5)") # add verbose option bob.core.log.add_command_line_option(parser) # parse arguments args = parser.parse_args(command_line_parameters) # set verbosity level bob.core.log.set_verbosity_level(logger, args.verbose) return args
def command_line_arguments(command_line_parameters): """Parse the program options""" # set up command line parser parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-p', '--preprocessor', metavar = 'x', nargs = '+', required = True, help = 'Data preprocessing; registered preprocessors are: %s' % bob.bio.base.resource_keys('preprocessor')) parser.add_argument('-i', '--input-file', metavar = 'FILE', required = True, help = "The data file to be preprocessed.") # parser.add_argument('-a', '--annotations', nargs='+', help = "Key=value-pairs for the annotations") parser.add_argument('-a', '--annotation-file', metavar = 'FILE', help = "The annotation file for the given data file, if applicable and/or available; currently the only supported format is the 'named' annotation format.") parser.add_argument('-o', '--output-file', metavar = 'PREPROCESSED', default = 'preprocessed.hdf5', help = "Write the preprocessed data into this file (should be of type HDF5)") parser.add_argument('-c', '--convert-as-image', metavar = 'IMAGE', help = "Write the preprocessed data into this image file, converting it to an image, if possible") # add verbose option bob.core.log.add_command_line_option(parser) # parse arguments args = parser.parse_args(command_line_parameters) # set verbosity level bob.core.log.set_verbosity_level(logger, args.verbose) return args
def databases(command_line_parameters = None): import argparse database_replacement = "%s/.bob_bio_databases.txt" % os.environ["HOME"] parser = argparse.ArgumentParser(description="Prints a list of directories for registered databases", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-D', '--database-directories-file', metavar = 'FILE', default = database_replacement, help = 'The file, where database directories are stored (to avoid changing the database configurations)') args = parser.parse_args(command_line_parameters) # get registered databases databases = bob.bio.base.utils.resources.database_directories(replacements=args.database_directories_file) # print directories for all databases for d in sorted(databases): print ("\n%s:" % d) print ("Original data: %s" % databases[d][0]) if len(databases[d]) > 1: print ("Annotations: %s" % databases[d][1])
def main(): parser = argparse.ArgumentParser(description="NAF to FoLiA convertor", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('naffile', nargs='?', help='Path to a NAF input document') parser.add_argument('foliafile', nargs='?', help='Path to a FoLiA output document') parser.add_argument('--id', type=str,help="Document ID for the FoLiA document (will be derived from the filename if not set)", action='store',default="",required=False) args = parser.parse_args() if not args.naffile: parser.print_help() sys.exit(2) foliadoc = naf2folia(args.naffile, args.id) if args.foliafile: foliadoc.save(args.foliafile) else: print(foliadoc.xmlstring())
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Prepare EMTF material from SPUDS.', formatter_class=ArgumentDefaultsHelpFormatter, epilog=EPILOG) parser.add_argument('destination_path', type=str, help='root path to store XML and PNG files') parser.add_argument('zip_path', type=str, help='root path where SPUDS EMTF .zip files are to be found') parser.add_argument('expected', type=int, help='number of EMTF records expected across the .zip files') args = parser.parse_args(argv[1:]) mirror(args.destination_path, args.zip_path, args.expected)
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Fetch USArray MT data from IRIS.', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('output_fname', type=str, help='name of output HDF5 file') parser.add_argument('station_id', type=str, help='USArray station identifier') parser.add_argument('dt1', type=dt_parser, help='start date and time (UTC)') parser.add_argument('dt2', type=dt_parser, help='end date and time (UTC)') args = parser.parse_args(argv[1:]) df = fetch(args.station_id, args.dt1, args.dt2) df.to_hdf(args.output_fname, 'iris')
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Convert IAGA2002 magnetometer data to HDF.', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('hdf_fname', type=str, help='output HDF file name') parser.add_argument('iaga2002_fnames', type=str, metavar='iaga2002_fname', nargs='*', help='input IAGA2002 file (in time order)') parser.add_argument('--he', action='store_true', help='include data in HE coordinate') args = parser.parse_args(argv[1:]) iaga2hdf(args.hdf_fname, args.iaga2002_fnames, he=args.he)
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Convert FGM format data to HDF.', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('hdf_fname', type=str, help='output HDF file name') parser.add_argument('fgm_fnames', type=str, metavar='fgm_fname', nargs='*', help='input FGM file (in time order)') args = parser.parse_args(argv[1:]) fgm2hdf(args.hdf_fname, args.fgm_fnames)
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Convert SuperMAG CSV format data to daily IAGA2002 format.', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('output_path', type=str, help='path to store daily IAGA2002 format files') parser.add_argument('csv_fname', type=str, help='SuperMAG CSV file') parser.add_argument('--nez', action='store_true', help='store (raw) HEZ components (aligned to local magnetic field) instead of XYZ components') args = parser.parse_args(argv[1:]) df_map = read_sm_csv(args.csv_fname) df_map2iaga(args.output_path, df_map, nez=args.nez)
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Convert FGM format data (CARISMA) to IAGA2002 format.', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('output_path', type=str, help='path to store daily IAGA2002 format files') parser.add_argument('fgm_fnames', type=str, nargs='+', metavar='fgm_fname', help='FGM format file') args = parser.parse_args(argv[1:]) for fgm_fname in args.fgm_fnames: iaga_fname = fgm2iaga(args.output_path, fgm_fname) logger.info('{} -> {}'.format(fgm_fname, iaga_fname))
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Compute E-field from B-field using 3-D transfer function model.', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('output_mat_fname', type=str, help='output, containing modeled E-field, in .mat format') parser.add_argument('input_iaga2002_fname', type=str, help='input IAGA2002 magnetometer data file') parser.add_argument('xml_fname', type=str, help='EM transfer function XML file') args = parser.parse_args(argv[1:]) process(args.output_mat_fname, args.input_iaga2002_fname, args.xml_fname)
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Compute E-field from B-field using USGS 1-D model.', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('output_mat_fname', type=str, help='output, containing modeled E-field, in .mat format') parser.add_argument('input_iaga2002_fname', type=str, help='input IAGA2002 magnetometer data file') parser.add_argument('model', type=str, choices=sorted(USGS_MODEL_MAP), help='process use the given 1-D conductivity model') args = parser.parse_args(argv[1:]) process(args.output_mat_fname, args.input_iaga2002_fname, args.model)
def main(argv=None): if argv is None: argv = sys.argv parser = ArgumentParser('Convert ACE HDF4 file to pandas HDF5 record.', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('hdf5_fname', type=str, help='output HDF5 file') parser.add_argument('hdf4_fname', type=str, help='input ACE HDF4 data record') args = parser.parse_args(argv[1:]) hdf4to5(args.hdf5_fname, args.hdf4_fname)
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('task', choices=['read', 'post', 'news', 'alerts'], help='Type of server interaction') parser.add_argument('--name', default='', help='Name of user') parser.add_argument('--msg', default='', help='Optional message') parser.add_argument('--score', default=0, type=int, help='Achieved score') args = parser.parse_args() if args.task == 'post': post_score(username=args.name, score=args.score, message=args.msg) elif args.task == 'read': read_highscore() elif args.task == 'news': read_news() elif args.task == 'alerts': read_alerts()
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("template_dir", help="the absolute path to the template directory") parser.add_argument("username", help="your username on the VM in the cloud-platform") args = parser.parse_args() j2_env = Environment(loader=FileSystemLoader(args.template_dir), trim_blocks=True) with open("Makefile", "w") as f: f.write(j2_env.get_template("Makefile.template").render( username=str(args.username)))
def opt_parser(): parser = ArgumentParser(prog='fontmerger', formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('base_fonts', metavar='BASE_FONT', nargs='*', default=[], help='target fonts') parser.add_argument('-V', '--version', dest='show_version', action='store_true', default=False, help='show version') parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='verbose mode') parser.add_argument('-c', '--config', dest='config', default='./fonts.json', help='a configuration file which define font merge context by JSON format') parser.add_argument('-x', '--ext-fonts', dest='ext_fonts', metavar='EXT_FONT_ID', nargs='*', default=[], help='a list of font identifier that merging fonts') parser.add_argument('-i', '--info', dest='info', action='store_true', default=False, help='show base font information') parser.add_argument('-o', '--output', dest='outputdir', metavar='OUTPUT_DIR', default='./', help='output directory') parser.add_argument('-l', '--list', dest='list_fonts', action='store_true', default=False, help='show available additional fonts') parser.add_argument('-p', '--preview', dest='preview_fonts', action='store_true', default=False, help='preview fonts') parser.add_argument('--all', dest='all', action='store_true', default=False, help='extend all fonts') parser.add_argument('--suffix', dest='suffix', help='font name suffix') parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='debug mode') return parser
def parse_args(): dstring = "Prototype implementation of ARG tracking and regular garbage collection." parser = argparse.ArgumentParser(description=dstring, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--popsize', '-N', type=int, help="Diploid population size") parser.add_argument('--theta', '-T', type=float, help="4Nu") parser.add_argument('--rho', '-R', type=float, help="4Nr") parser.add_argument('--pdel', default=0.0, type=float, help="Ratio of deleterious mutations to neutral mutations.") parser.add_argument('--nsam', '-n', type=int, help="Sample size (in chromosomes).") parser.add_argument('--seed', '-S', type=int, help="RNG seed") parser.add_argument('--gc', '-G', type=int, help="GC interval") parser.add_argument('--neutral', action='store_true', help="Simulate no selection") parser.add_argument('--neutral_mutations', action='store_true', help="Simulate neutral mutations. If False, ARG is tracked instead and neutral mutations dropped down on the sample afterwards.") return parser
def parse_cli(): """ parse command-line interface arguments """ import argparse parser = argparse.ArgumentParser(description="Generate disp.dat " "data from an ls-dyna nodout file.", formatter_class= argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--nodout", help="ASCII file containing nodout data", default="nodout") parser.add_argument("--vtkout", help="VTK filename (without extension)", default="disp") args = parser.parse_args() return args
def parse_cli(): """parse command-line interface arguments """ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter p = ArgumentParser(description="Generate disp.dat " "data from an ls-dyna nodout file.", formatter_class=ArgumentDefaultsHelpFormatter) p.add_argument("--nodout", help="ASCII file containing nodout data", default="nodout") p.add_argument("--dispout", help="name of the binary displacement output file", default="disp.dat") p.add_argument("--legacynodes", help="repeat saving node IDs for each timestep", action="store_true") args = p.parse_args() return args
def build_parser(): """Build argument parser.""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Required args parser.add_argument("--user_input_yml", "-u", required=True, help=".yml file with user-defined inputs") parser.add_argument("--out_dir", "-o", default=None, help=("output directory that overrides " + "what's provided in user_input_yml")) parser.add_argument("--psp_on_clue_yml", "-p", default=None, help=("path to local YML file that overrides " + "what's provided in user_input_yml")) parser.add_argument("--verbose", "-v", action="store_true", default=False, help="whether to increase the # of messages reported") return parser
def build_parser(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Required args parser.add_argument("gct_file_path", type=str, help="filepath to gct file") parser.add_argument("out_pw_file_path", type=str, help="filepath to output pw file") # Optional args parser.add_argument("-plate_field", type=str, default="det_plate", help="metadata field name specifying the plate") parser.add_argument("-well_field", type=str, default="det_well", help="metadata field name specifying the well") return parser
def build_parser(): """Build argument parser.""" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("in_gct_path", type=str, help="full path to input gct") parser.add_argument("out_dir", type=str, help="where to save output") parser.add_argument("out_prefix", type=str, help="prefix for naming output figure and its title") parser.add_argument("target_id", type=str, help="which row of connectivity matrix to extract") parser.add_argument("-queries_to_highlight", "-qth", nargs="*", type=str, default=None, help="which queries to highlight") parser.add_argument("-conn_metric", type=str, default="KS test statistic", help="connectivity metric to use for plot labeling") return parser
def build_parser(): """Build argument parser.""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Required args parser.add_argument("--in_gct_path", "-i", required=True, help="path to input gct file") # Optional args parser.add_argument("--in_gct2_path", "-i2", help="path to second gct file") parser.add_argument("--out_name", "-o", default="steep_output.gct", help="what to name the output similarity file") parser.add_argument("--similarity_metric", "-s", default="spearman", choices=["spearman", "pearson"], help="similarity metric to use for comparing columns") parser.add_argument("--verbose", "-v", action="store_true", default=False, help="whether to increase the # of messages reported") return parser
def main(): argparser = argparse.ArgumentParser( description='Utility for downloading archived APKs used for measuring ' 'per-milestone patch size growth.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) argparser.add_argument('--download-path', default=DEFAULT_DOWNLOAD_PATH, help='Directory to store downloaded APKs.') argparser.add_argument('--milestone', default=CURRENT_MILESTONE, help='Download reference APK for this milestone.') argparser.add_argument('--apk', default=DEFAULT_APK, help='APK name.') argparser.add_argument('--builder', default=DEFAULT_BUILDER, help='Builder name.') argparser.add_argument('--bucket', default=DEFAULT_BUCKET, help='Google storage bucket where APK is stored.') args = argparser.parse_args() MaybeDownloadApk( args.builder, args.milestone, args.apk, args.download_path, args.bucket)
def main(): """SDK Navigator Example Demonstrates Navigator by echoing input values from wheels and buttons. Uses the intera_interface.Navigator class to demonstrate an example of using the register_callback feature. Shows Navigator input of the arm for 10 seconds. """ arg_fmt = argparse.ArgumentDefaultsHelpFormatter parser = argparse.ArgumentParser(formatter_class=arg_fmt) parser.add_argument( "-n", "--navigator", dest="nav_name", default="right", choices=["right", "head"], help='Navigator on which to run example' ) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('sdk_navigator', anonymous=True) echo_input(args.nav_name) return 0
def __init__(self, title=None, *args, **kwargs): """ Args: title: Title of the app """ self.title = title kwargs["formatter_class"] = kwargs.get( 'formatter_class', argparse.ArgumentDefaultsHelpFormatter # SortedHelpFormatter ) kwargs["add_help"] = False super(CLIArgParser, self).__init__(*args, **kwargs)
def main(): ap = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) ap.add_argument("-u", "--udid", required=False, help="iPhone udid") subp = ap.add_subparsers() @contextmanager def add_parser(name): yield subp.add_parser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter) with add_parser('developer') as p: p.set_defaults(func=load_main('atx.cmds.iosdeveloper')) with add_parser('screencap') as p: p.add_argument('-o', '--output', default='screenshot.png', help='take iPhone screenshot') p.add_argument('-r', '--rotate', type=int, choices=[0, 90, 180, 270], default=0, help='screen rotation') p.set_defaults(func=_screencap) args = ap.parse_args() args.func(args)
def options(self): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('refresh_token', help='VMware Cloud API refresh token') parser.add_argument('org_id', help='Organization identifier.') parser.add_argument('sddc_id', help='Sddc Identifier.') parser.add_argument('-i', '--interval_sec', default=60, help='Task pulling interval in sec') args = parser.parse_args() self.refresh_token = args.refresh_token self.org_id = args.org_id self.sddc_id = args.sddc_id self.interval_sec = int(args.interval_sec)
def create_assembler_parser(): """ Create an ArgumentParser for the assembler. """ parser = argparse.ArgumentParser( description='A 2 pass assembler.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "file", help="file to be assembled." ) parser.add_argument( '-o', '--outfile', default=None, required=False, help='output file' ) return parser
def make_parser(): p = argparse.ArgumentParser('origen', formatter_class=argparse.ArgumentDefaultsHelpFormatter) p.add_argument('xs_tape9', metavar='xs-tape9', help="""path to the cross section TAPE9 file. If the path is not absolute, defaults to looking in {LIBS_DIR}""".format(LIBS_DIR=LIBS_DIR)) p.add_argument('time', help='the time in sec', type=float) p.add_argument('--phi', help='the neutron flux in [n/cm^2/sec]', type=float, default=4e14) p.add_argument('--nuclide', help="The initial starting nuclide.", default="U235") p.add_argument('--decay-tape9', help="path to the decay TAPE9 file.", default=decay_TAPE9) p.add_argument('--origen', help="Path to the origen executable", default=ORIGEN) p.add_argument('--no-run-origen', action='store_false', dest='run_origen', help="Don't run origen") p.add_argument('--no-run-cram', action='store_false', dest='run_cram', help="Don't run cram") p.add_argument('--hdf5-file', default='data/results.hdf5', help="""hdf5 file to write results to""") return p
def make_parser(): p = argparse.ArgumentParser('tape9sparse', formatter_class=argparse.ArgumentDefaultsHelpFormatter) p.add_argument('tape9s', nargs='+', help="""Paths to the TAPE9 files. If a path is a directory, a set of default libraries will be gathered from that directory (transmutagen.origen_all.ALL_LIBS)""") p.add_argument('--phi', help='the neutron flux in [n/cm^2/sec]', type=float, default=4e14) p.add_argument('-f', '--format', help='The sparse matrix format', default='csr', dest='format') p.add_argument('-d', '--decay', help='path to the decay file, if needed', default='decay.lib', dest='decaylib') p.add_argument('--include-fission', action='store_true', default=True, dest='include_fission', help='Include fission reactions in the matrix.') p.add_argument('--no-include-fission', action='store_false', dest='include_fission', help="Don't include fission reactions in the matrix.") p.add_argument('--alpha-as-He4', action='store_true', default=False, help="Alpha reactions go to He4") p.add_argument('-o', '--output-dir', default=None, help='The directory to write the output files to, in npz format.') return p
def test1(): """ IntsRanges micro-test """ def new_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-r", "--ranges", default=[1, 3, 5], action=IntsRanges, help="specify inclusive integer ranges") return parser a = new_parser().parse_args([]) assert a.ranges == [1, 3, 5] a = new_parser().parse_args(['-r', '1']) assert a.ranges == [1] a = new_parser().parse_args(['-r', '1', '-r', '3-5']) assert a.ranges == [1, 3, 4, 5] a = new_parser().parse_args(['-r', '1-5-2', '-r', '5-7']) assert a.ranges == [1, 3, 5, 6, 7] a = new_parser().parse_args(['-r', '0-100-5']) assert a.ranges == list(range(0, 101, 5))
def main(): parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("-l", "--list", default=False, action='store_true', help="when given, lists known notebooks and does *not* open anything") parser.add_argument("-c", "--course-gitdir", default=default_course_gitdir, help="""location of a git repo where to fetch notebooks; needed in order to generate relevant URLs""") parser.add_argument("-i", "--index", default=0, type=int, help="index in the list of known notebooks - run with -l to see list") parser.add_argument("-u", "--user", default='student-0001', help="username for opening that notebook") parser.add_argument("-s", "--sleep", default=3, type=int, help="delay in seconds to sleep between actions") args = parser.parse_args() course, notebooks = list_notebooks(args.course_gitdir) if args.list: list(notebooks) else: run(args.user, course, notebooks, args.index, args.sleep)
def main(): parser = argparse.ArgumentParser(description="", formatter_class=argparse.ArgumentDefaultsHelpFormatter) setup_argparser(parser) args = parser.parse_args() corrector = Corrector(**vars(args)) print("Reading from standard input (if interactively invoked, type ctrl-D when done):",file=sys.stderr) testwords, mask, _ = readinput(sys.stdin.readlines(), args.tok, args.blocksize) if args.json: print("[") for results in corrector.correct(testwords, mask): if args.json: corrector.output_json(results) print(",") elif args.output: if args.report: corrector.output_report(results) corrector.output(results) if args.json: print("]")
def main(argv=None): ''' Handles command line arguments and gets things started. :param argv: List of arguments, as if specified on the command-line. If None, ``sys.argv[1:]`` is used instead. :type argv: list of str ''' # Get command line arguments parser = argparse.ArgumentParser( description="Takes one or more file paths and reports their detected \ encodings", formatter_class=argparse.ArgumentDefaultsHelpFormatter, conflict_handler='resolve') parser.add_argument('input', help='File whose encoding we would like to determine.', type=argparse.FileType('rb'), nargs='*', default=[sys.stdin]) parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) args = parser.parse_args(argv) for f in args.input: if f.isatty(): print("You are running chardetect interactively. Press " + "CTRL-D twice at the start of a blank line to signal the " + "end of your input. If you want help, run chardetect " + "--help\n", file=sys.stderr) print(description_of(f, f.name))
def get_arguments(): parser = argparse.ArgumentParser(description='FAST5 integrity check ' '(prints bad fast5 files to stdout)', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('dir', type=str, help='directory of FAST5 reads to check (will be searched recursively)') args = parser.parse_args() args.dir = os.path.abspath(args.dir) return args
def get_arguments(): parser = argparse.ArgumentParser(description='FASTQ filter tool', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('input_fastq', type=str, help='FASTQ file of reads to be filtered (can be gzipped)') parser.add_argument('--min_length', type=int, default=0, help='Exclude reads shorter than this length (in bp)') parser.add_argument('--min_mean_qual', type=float, default=0.0, help='Exclude reads with a mean qscore less than this value') parser.add_argument('--min_qual_window', type=float, default=0.0, help='Exclude reads where their mean qscore in a sliding window drops ' 'below this value') parser.add_argument('--window_size', type=int, default=50, help='The size of the sliding window used for --min_qual_window') parser.add_argument('--target_bases', type=int, default=None, help='If set, exclude the worst reads (as judged by their minimum qscore ' 'in a sliding window) such that only this many bases remain') args = parser.parse_args() args.input_fastq = os.path.abspath(args.input_fastq) if not os.path.isfile(args.input_fastq): sys.exit('Error: could not find ' + args.input_fastq) if args.min_length == 0 and args.min_mean_qual == 0.0 and args.min_qual_window == 0.0 and \ args.target_bases is None: sys.exit('Error: no filters were used so this tool refuses to run (because the output\n' ' FASTQ would be identical to the input FASTQ). Please use one of the\n' ' following filters: --min_length, --min_mean_qual, --min_qual_window\n' ' or --target_bases.') return args
def add_rebuild(subparsers): """Rebuild Pipeline subcommands.""" rebuild_parser = subparsers.add_parser( 'rebuild', help=runner.rebuild_pipelines.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) rebuild_parser.set_defaults(func=runner.rebuild_pipelines) rebuild_parser.add_argument('-a', '--all', action='store_true', help='Rebuild all Pipelines') rebuild_parser.add_argument( 'project', nargs='?', default=os.getenv('REBUILD_PROJECT'), help='Project to rebuild, overrides $REBUILD_PROJECT')
def add_autoscaling(subparsers): """Auto Scaling Group Policy subcommands.""" autoscaling_parser = subparsers.add_parser( 'autoscaling', help=runner.create_scaling_policy.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) autoscaling_parser.set_defaults(func=runner.create_scaling_policy)
def add_validate(subparsers): """Validate Spinnaker setup.""" validate_parser = subparsers.add_parser( 'validate', help=add_validate.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) validate_parser.set_defaults(func=validate_parser.print_help) validate_subparsers = validate_parser.add_subparsers(title='Testers') validate_all_parser = validate_subparsers.add_parser( 'all', help=validate.validate_all.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) validate_all_parser.set_defaults(func=validate.validate_all) validate_gate_parser = validate_subparsers.add_parser( 'gate', help=validate.validate_gate.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) validate_gate_parser.set_defaults(func=validate.validate_gate)