我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用os.path.splitext()。
def load_data(self): # work in the parent of the pages directory, because we # want the filenames to begin "pages/...". chdir(dirname(self.setup.pages_dir)) rel = relpath(self.setup.pages_dir) for root, dirs, files in walk(rel): for filename in files: start, ext = splitext(filename) if ext in self.setup.data_extensions: #yield root, dirs, filename loader = self.setup.data_loaders.get(ext) path = join(root,filename) if not loader: raise SetupError("Identified data file '%s' by type '%s' but no loader found" % (filename, ext)) data_key = join(root, start) loaded_dict = loader.loadf(path) self.data[data_key] = loaded_dict #self.setup.log.debug("data key [%s] ->" % (data_key, ), root, filename, ); pprint.pprint(loaded_dict, sys.stdout) #pprint.pprint(self.data, sys.stdout) #print("XXXXX data:", self.data)
def get_all_pages(self): # work in the parent of the pages directory, because we # want the filenames to begin "pages/...". chdir(dirname(self.setup.pages_dir)) rel = relpath(self.setup.pages_dir) for root, dirs, files in walk(rel): # self.config.pages_dir): # examples: # # root='pages' root='pages/categories' # dirs=['categories'] dirs=[] # files=['index.html'] files=['list.html'] # self.setup.log.debug("\nTEMPLATE ROOT: %s" % root) # self.setup.log.debug("TEMPLATE DIRS: %s" % dirs) # self.setup.log.debug("TEMPLATE FILENAMES: %s" % files) # #dir_context = global_context.new_child(data_tree[root]) for filename in files: start, ext = splitext(filename) if ext in self.setup.template_extensions: # if filename.endswith(".html"): # TODO: should this filter be required at all? yield Page(self.setup, filename, join(root, filename))
def prompt_pick_backup(message): """Prompts the user to pick an existing database, and returns the selected choice database ID and its metadata""" # First load all the saved databases (splitting extension and path) saved_db = [path.splitext(path.split(f)[1])[0] for f in glob('backups/*.tlo')] # Then prompt the user print('Available backups databases:') for i, db_id in enumerate(saved_db): metadata = get_metadata(db_id) print('{}. {}, ID: {}'.format(i + 1, metadata.get('peer_name', '???'), db_id)) db_id = saved_db[get_integer(message, 1, len(saved_db)) - 1] return db_id, get_metadata(db_id)
def rinexobs(obsfn,writeh5=None,maxtimes=None): stem,ext = splitext(expanduser(obsfn)) if ext[-1].lower() == 'o': #raw text file with open(obsfn,'r') as f: t=time.time() lines = f.read().splitlines(True) lines.append('') header,version,headlines,obstimes,sats,svset = scan(lines) print('{} is a RINEX {} file, {} kB.'.format(obsfn,version,getsize(obsfn)/1000.0)) data = processBlocks(lines,header,obstimes,svset,headlines,sats) print("finished in {0:.2f} seconds".format(time.time()-t)) #%% save to disk (optional) if writeh5: h5fn = stem + '.h5' print('saving OBS data to {}'.format(h5fn)) data.to_hdf(h5fn,key='OBS',mode='a',complevel=6,append=False) elif ext.lower() == '.h5': data = read_hdf(obsfn,key='OBS') print('loaded OBS data from {} to {}'.format(blocks.items[0],blocks.items[-1])) return data # this will scan the document for the header info and for the line on # which each block starts
def rasterize(self, vector_filename, feature_filename): """ Burn features from a vector image onto a raster image. Args: vector_filename (str): filename of the vector image feature_filename (str): filename of the raster image """ logging.info("Burning features from vector file: '%s' to raster file: '%s'" %(vector_filename, feature_filename)) # assume the layer has the same name as the image layer_name = splitext(basename(vector_filename))[0] # convert vector features into nonzero pixels of the output file returncode = call(['gdal_rasterize', '-burn', '1', '-l', layer_name, vector_filename, feature_filename]) # detect errors if returncode != 0: raise RuntimeError('Could not rasterize vector.')
def split_folder_and_path(filepath): """Split a file path into its folder, filename, and extension Args: path (str): Path to a file Returns: tuple: of (folder, filename (without extension), extension) """ dirname = op.dirname(filepath) filename = op.basename(filepath) splitext = op.splitext(filename) filename_without_extension = splitext[0] extension = splitext[1] return dirname, filename_without_extension, extension
def get_icon(image, size): """ Generate a GdkPixbuf image :param image: icon name or image path :return: GdkPixbux Image """ directory = path.join(env.get("DATA_DIR"), "applications", "images") + "/" theme = Gtk.IconTheme.get_default() if theme.has_icon(path.splitext(image)[0]): icon = theme.load_icon(path.splitext(image)[0], size, 0) elif path.exists(directory + image): icon = GdkPixbuf.Pixbuf.new_from_file(directory + image) elif path.exists(image): icon = GdkPixbuf.Pixbuf.new_from_file(image) else: icon = theme.load_icon("image-missing", size, 0) if icon.get_width() != size or icon.get_height() != size: icon = icon.scale_simple(size, size, GdkPixbuf.InterpType.BILINEAR) return icon
def find_ancestor_cmd_path(self, cmd, cwd): """Recursively check for command binary in ancestors' node_modules/.bin directories.""" node_modules_bin = path.normpath(path.join(cwd, 'node_modules/.bin/')) binary = path.join(node_modules_bin, cmd) if sublime.platform() == 'windows' and path.splitext(binary)[1] != '.cmd': binary += '.cmd' if binary and access(binary, X_OK): return binary parent = path.normpath(path.join(cwd, '../')) if parent == '/' or parent == cwd: return None return self.find_ancestor_cmd_path(cmd, parent)
def print_images(base_output_name, image_formats, dpi, path=None, transparent=False): file_base = opath.splitext(opath.basename(base_output_name))[0] for fmt in image_formats: if path: out_name = path else: out_name = "{}.{}".format(file_base, fmt) try: if fmt == 'png': plt.savefig(out_name, dpi=dpi, transparent=transparent) else: plt.savefig(out_name, format=fmt, transparent=transparent) except PermissionError: # thanks to https://github.com/wdecoster for the suggestion print("""You don't have permission to save pauvre plots to this directory. Try changing the directory and running the script again!""")
def dated_path(obj, file_data): try: prefix = getattr(obj, 'model_name') except BaseException: prefix = "undefined" parts = op.splitext(file_data.filename) rand = random.getrandbits(16) filename = u"{name}_{rand}{ext}".format( rand=rand, name=parts[0], ext=parts[1] ) filename = secure_filename(filename) today = date.today() path = u"{prefix}/{t.year}/{t.month}/{filename}".format( prefix=prefix, t=today, filename=filename ) return path
def init(o): if o.cfdna == None and o.gdna == None: raise Exception("At least one of --cfdna and --gdna should be specified") if o.cfdna != None: o.cfdna = AlignmentFile(o.cfdna, "rb") if not o.cfdna.has_index(): raise Exception("Index not found, use `samtools index` to generate") if o.gdna != None: o.gdna = AlignmentFile(o.gdna, "rb") if not o.gdna.has_index(): raise Exception("Index not found, use `samtools index` to generate") if o.output == None: basename, extname = splitext(o.query) o.output = basename + "_MrBam" + extname
def _parse_file(self, filename, file_url): """ Attempts to parse a file with the loaded plugins Returns set of endpoints """ file_set = set() with open(filename, 'r') as plug_in: lines = plug_in.readlines() ext = path.splitext(filename)[1].upper() if ext in self.plugins.keys() and self._ext_test(ext): for plug in self.plugins.get(ext): if plug.enabled: res = plug.run(lines) if len(res) > 0: for i in res: i = file_url + i file_set.add(i) elif ext == '.TXT' and self._ext_test(ext): for i in lines: i = file_url + i file_set.add(i.strip()) return file_set
def __getDownloadLink(self, link): if link == 'SproutCore.xml': data=requests.get('http://docs.sproutcore.com/feeds/' + link).text e = xml.etree.ElementTree.fromstring(data) version = e.findall('version')[0].text for atype in e.findall('url'): return {'url': atype.text, 'version':version} server = self.serverManager.getDownloadServer(self.localServer) data = requests.get(server.url+link).text e = xml.etree.ElementTree.fromstring(data) version = e.findall('version')[0].text for atype in e.findall('url'): if not self.localServer == None: disassembled = urlparse(atype.text) filename, file_ext = splitext(basename(disassembled.path)) url = self.localServer if not url[-1] == '/': url = url + '/' url = url + filename + file_ext return {'url': url, 'version':version} if atype.text.find(server.url) >= 0: return {'url': atype.text, 'version':version}
def set_output_image_fn(pet_fn, predict_dir, verbose=1): ''' set output directory for subject and create filename for image slices. output images are saved according to <predict_dir>/<subject name>/...png args: pet_fn -- filename of pet image on which prection was based predict_dir -- output directory for predicted images verbose -- print output filename if 2 or greater, 0 by default return: image_fn -- output filename for slices ''' pet_basename = splitext(basename(pet_fn))[0] name=[ f for f in pet_basename.split('_') if 'sub' in f.split('-') ][0] image_fn = predict_dir +os.sep + pet_basename + '_predict.png' if verbose >= 2 : print('Saving to:', image_fn) return image_fn
def test_modules_name(self): # it checks that __all__ includes all the .py files in dmd folder import pydmd package = pydmd f_aux = [] for (__, __, filenames) in walk('pydmd'): f_aux.extend(filenames) f = [] for i in f_aux: file_name, file_ext = path.splitext(i) if file_name != '__init__' and file_ext == '.py': f.append(file_name) assert (sorted(package.__all__) == sorted(f))
def export_annotations(self): """Export annotations to a CSV file. The resulting CSV file has three columns. The first column contains the annotation type, the second column contains the onset (in s), and the third column contains the duration (in s). The first line is a header containing the column names. """ fname = QFileDialog.getSaveFileName(self, "Export annotations", filter="*.csv")[0] if fname: name, ext = splitext(split(fname)[-1]) ext = ext if ext else ".csv" # automatically add extension fname = join(split(fname)[0], name + ext) anns = data.current.raw.annotations with open(fname, "w") as f: f.write("type,onset,duration\n") for a in zip(anns.description, anns.onset, anns.duration): f.write(",".join([a[0], str(a[1]), str(a[2])])) f.write("\n")
def trash_move(src, dst, topdir=None): filename = op.basename(src) filespath = op.join(dst, FILES_DIR) infopath = op.join(dst, INFO_DIR) base_name, ext = op.splitext(filename) counter = 0 destname = filename while op.exists(op.join(filespath, destname)) or op.exists(op.join(infopath, destname + INFO_SUFFIX)): counter += 1 destname = '%s %s%s' % (base_name, counter, ext) check_create(filespath) check_create(infopath) os.rename(src, op.join(filespath, destname)) f = open(op.join(infopath, destname + INFO_SUFFIX), 'w') f.write(info_for(src, topdir)) f.close()
def _load_from_id(self, data_id): img_file = osp.join(self.dataset_dir, data_id + '.jpg') img = scipy.misc.imread(img_file) # generate label from mask files lbl = np.zeros(img.shape[:2], dtype=np.int32) # shelf bin mask file shelf_bin_mask_file = osp.join(self.dataset_dir, data_id + '.pbm') shelf_bin_mask = scipy.misc.imread(shelf_bin_mask_file, mode='L') lbl[shelf_bin_mask < 127] = -1 # object mask files mask_glob = osp.join(self.dataset_dir, data_id + '_*.pbm') for mask_file in glob.glob(mask_glob): mask_id = osp.splitext(osp.basename(mask_file))[0] mask = scipy.misc.imread(mask_file, mode='L') lbl_name = mask_id[len(data_id + '_'):] lbl_id = self.label_names.index(lbl_name) lbl[mask > 127] = lbl_id return img, lbl
def list_action(): """Display existing profiles, with indices.""" parser = OptionParser(version=mp.__version__) parser.disable_interspersed_args() (options, args) = parser.parse_args() if len(args) > 0: print("This command takes no argument.") sys.exit(1) filenames = get_profile_filenames("all") for n, filename in enumerate(filenames): ts = osp.splitext(filename)[0].split('_')[-1] print("{index} {filename} {hour}:{min}:{sec} {day}/{month}/{year}" .format(index=n, filename=filename, year=ts[:4], month=ts[4:6], day=ts[6:8], hour=ts[8:10], min=ts[10:12], sec=ts[12:14]))
def execute(self, context): from os.path import basename, splitext filepath = self.filepath # change the menu title to the most recently chosen option preset_class = getattr(bpy.types, self.menu_idname) preset_class.bl_label = bpy.path.display_name(basename(filepath)) ext = splitext(filepath)[1].lower() # execute the preset using script.python_file_run if ext == ".py": bpy.ops.script.python_file_run(filepath=filepath) elif ext == ".xml": import rna_xml rna_xml.xml_file_run(context, filepath, preset_class.preset_xml_map) else: self.report({'ERROR'}, "unknown filetype: %r" % ext) return {'CANCELLED'} return {'FINISHED'}
def file_to_feature(file, extension='.h5', tree_leaves=5): r""" Parameters ---------- file extension tree_leaves: `int`, number of sub-directories that define the feature name For example, if file='./a/b/c/d/e/f/g.mp4' and tree_leaves=4 then feature_name = 'c_d_e_f_g' + extension Returns ------- """ base, ext = path.splitext(file) leaves = [] for _ in range(tree_leaves): base, leaf = path.split(base) leaves.append(leaf) feature_name = '_'.join(leaves[::-1]) + extension return feature_name
def _check_if_pyc(fname): """Return True if the extension is .pyc, False if .py and None if otherwise""" from imp import find_module from os.path import realpath, dirname, basename, splitext # Normalize the file-path for the find_module() filepath = realpath(fname) dirpath = dirname(filepath) module_name = splitext(basename(filepath))[0] # Validate and fetch try: fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath]) except ImportError: raise IOError("Cannot find config file. " "Path maybe incorrect! : {0}".format(filepath)) return pytype, fileobj, fullpath
def savefile(name, *arg, **kwargs): """Save a file without carrying of extension. arg: for .npy extension kwargs: for .pickle or .mat extensions """ name = _safetySave(name) fileName, fileExt = splitext(name) # Pickle : if fileExt == '.pickle': with open(name, 'wb') as f: pickle.dump(kwargs, f) # Matlab : elif fileExt == '.mat': data = savemat(name, kwargs) # Numpy (single array) : elif fileExt == '.npy': data = np.save(name, arg)
def loadfile(name): """Load a file without carrying of extension. The function return a dictionnary data. """ fileName, fileExt = splitext(name) # Pickle : if fileExt == '.pickle': with open(name, "rb") as f: data = pickle.load(f) # Matlab : elif fileExt == '.mat': data = loadmat(name) # Numpy (single array) elif fileExt == '.npy': data = np.load(name) return data
def main(argv=None): if len(sys.argv) <= 2: print('Usage ./%s --exp=<exp_name> --ec2_settings=<relative_path_to_ec2_settings_file>'%sys.argv[0]) sys.exit(0) import importlib import os.path as osp import shutil module_name = 'sandbox.rocky.tf.launchers.%s'%( osp.splitext(FLAGS.ec2_settings)[0].replace('/','.')) mod = importlib.import_module(module_name) dst_py = osp.join(osp.dirname(FLAGS.ec2_settings),FLAGS.exp+'.py') try: shutil.copy(FLAGS.ec2_settings, dst_py) except shutil.SameFileError as e: print(e) if type(mod.params) != list: mod.params = [mod.params] if hasattr(mod, 'base_params'): base_params = mod.base_params else: base_params = dict() N = 0 for params in mod.params: ps = base_params.copy() ps.update(params) N += execute(params=ps, mode="ec2") print('Launched %d jobs.'%N)
def test_files_exist(self): """Test if all notebooks are processed correctly""" raw_dir = osp.join(self.src_dir, 'raw_examples') for f in find_files(raw_dir, 'example_*.ipynb'): base = osp.splitext(f)[0].replace( raw_dir, osp.join(self.src_dir, 'examples')) self.assertTrue(osp.exists(base + '.ipynb'), msg=base + '.ipynb is missing') self.assertTrue(osp.exists(base + '.rst'), msg=base + '.rst is missing') self.assertTrue(osp.exists(base + '.py'), msg=base + '.py is missing') html = osp.splitext( f.replace(raw_dir, osp.join( self.out_dir, 'examples')))[0] + '.html' self.assertTrue(osp.exists(html), msg=html + ' is missing!')
def exists_in_db(self, filename): """ Returns True if there exists a file in the DB with the same library name and hash as the given file """ [name, _] = splitext(basename(filename))[0].split('__') if not exists(join(self.path, name, self.METADATA_FILENAME)): return False newHash = ReferenceDB.get_file_hash(filename) metadata = self.read_metadata(name) # Check all versions of the library, as identified by the filename for version in metadata: if metadata[version]['file_hash'] == newHash: return True return False
def saveQcFileAs(): exitFullscreen() currentvideofile = mp.path if currentvideofile and not currentqcfile: basename = path.basename(currentvideofile) basename = "[QC]_{}_{}.txt".format(path.splitext(basename)[0], qcauthor) if sys.platform.startswith("win32"): trtable = str.maketrans('\\/:*?"<>|', "_________") basename = basename.translate(trtable) dirname = path.dirname(currentvideofile) defpath = path.join(dirname, basename) elif currentqcfile: defpath = currentqcfile else: defpath = "[QC]_UNNAMED_{}.txt".format(qcauthor) filename = QFileDialog.getSaveFileName( mainwindow, _("Save QC Document"), defpath, _("QC documents (*.txt);;All files (*.*)"), )[0] if filename: writeQcFile(filename)
def create_thumbnail(infile, thumbfile, width=300, height=300, cx=0.5, cy=0.5, border=4): baseout, extout = op.splitext(thumbfile) im = image.imread(infile) rows, cols = im.shape[:2] x0 = int(cx * cols - .5 * width) y0 = int(cy * rows - .5 * height) xslice = slice(x0, x0 + width) yslice = slice(y0, y0 + height) thumb = im[yslice, xslice] thumb[:border, :, :3] = thumb[-border:, :, :3] = 0 thumb[:, :border, :3] = thumb[:, -border:, :3] = 0 dpi = 100 fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi) ax = fig.add_axes([0, 0, 1, 1], aspect='auto', frameon=False, xticks=[], yticks=[]) ax.imshow(thumb, aspect='auto', resample=True, interpolation='bilinear') fig.savefig(thumbfile, dpi=dpi) return fig
def get_embedded_train_val_split(layer, model_name=MODEL_NAME): assert layer in LAYER_RESULT_FUNCS.keys() model_path = join(MODELS_DIR, model_name) model_name_no_ext, _ = splitext(model_name) embedded_data_dir = join( DATA_DIR, 'embedding_{:s}'.format(model_name_no_ext)) train_val_split_file = join( embedded_data_dir, 'train_val_split_{:s}.npz'.format(layer)) if isfile(train_val_split_file): split = np.load(train_val_split_file) return split['arr_0'], split['arr_1'],\ split['arr_2'], split['arr_3'],\ split['arr_4'], split['arr_5'] else: return _create_embedded_train_val_split( layer, model_path, train_val_split_file)
def convert(fq_in_path, fq_out_path, skip_ascii = False): print 'reading: {}...'.format(fq_in_path), with open(fq_in_path) as f: data = f.read() if data.startswith('solid'): if skip_ascii: print ' skipped because in ASCII format' return try: solid = read_ascii_file(StringIO(data)) except ascii.SyntaxError: print ' FAILED to convert!' return else: try: solid = read_binary_file(StringIO(data)) except binary.FormatError: print ' FAILED to convert!' return fq_out_base, ext = splitext(fq_out_path) fq_out_path = fq_out_base + '.raw' with open(fq_out_path, 'w') as f: for facet in solid.facets: f.write( ' '.join(' '.join(map(str,vertex)) for vertex in facet.vertices) + '\n' ) print ' converted in: {}'.format(fq_out_path)
def trash_move(src, dst, topdir=None): filename = op.basename(src) filespath = op.join(dst, FILES_DIR) infopath = op.join(dst, INFO_DIR) base_name, ext = op.splitext(filename) counter = 0 destname = filename while op.exists(op.join(filespath, destname)) or op.exists(op.join(infopath, destname + INFO_SUFFIX)): counter += 1 destname = '%s %s%s' % (base_name, counter, ext) check_create(filespath) check_create(infopath) try: os.rename(src, op.join(filespath, destname)) except: shutil.move(src, op.join(filespath, destname)) f = open(op.join(infopath, destname + INFO_SUFFIX), 'w') f.write(info_for(src, topdir)) f.close()
def main(): args = check_argv() mat_syl_fn = path.join("thetaOscillator", args.lang + "_" + args.subset + "_bounds_t.mat") landmarks_dir = path.join("landmarks", args.lang + "_" + args.subset) landmarks_fn = path.join(landmarks_dir, "landmarks.unsup_syl.pkl") if not path.isdir(landmarks_dir): os.makedirs(landmarks_dir) print("Reading: " + mat_syl_fn) mat = scipy.io.loadmat(mat_syl_fn) n_wavs = mat["wav_files"].shape[0] landmarks = {} for i_wav in xrange(n_wavs): wav_label = path.splitext(path.split(str(mat["wav_files"][i_wav][0][0]))[-1])[0] bounds = [int(round(float(i[0])*100.0)) for i in mat["bounds_t"][i_wav][0]] landmarks[wav_label] = bounds[1:] # remove first (0) landmark print("Writing: " + landmarks_fn) with open(landmarks_fn, "wb") as f: pickle.dump(landmarks, f, -1)
def main(): args = check_argv() print datetime.datetime.now() print "Reading HTK features from directory:", args.htk_dir npz_dict = {} n_feat_files = 0 for feat_fn in glob.glob(path.join(args.htk_dir, "*." + args.extension)): hlist_output = shell("HList -r " + feat_fn) features = [ [float(i) for i in line.split(" ") if i != ""] for line in hlist_output.split("\n") if line != "" ] key = path.splitext(path.split(feat_fn)[-1])[0] npz_dict[key] = np.array(features) n_feat_files += 1 print "Read", n_feat_files, "feature files" print "Writing Numpy archive:", args.npz_fn np.savez(args.npz_fn, **npz_dict) print datetime.datetime.now()
def main(): args = check_argv() print("Reading:", args.mat_fn) mat = tables.open_file(args.mat_fn) n_audio = mat.root.files_train[0].shape[0] print("No. audio files:", n_audio) filenames = [] for i_audio in xrange(n_audio): filenames.append("".join([chr(i[0]) for i in mat.root.files_train[0][i_audio][0]])) audio_keys = [path.splitext(path.split(i)[-1])[0] for i in filenames] features_dict = {} for i_audio in xrange(n_audio): features = mat.root.F_train_iter[0][i_audio][0] features_dict[audio_keys[i_audio].replace("_", "-")] = features.T print("Writing:", args.npz_fn) np.savez(args.npz_fn, **features_dict)
def get_fasta_filenames(self, *args): """Return all FASTA file names as a list.""" ret = [] for fn in args: ext = op.splitext(fn)[1].upper() if ext in [".XML"]: try: fns = ContigSet(fn).toExternalFiles() ret.extend(fns) except IOError: raise IOError("Could not open %s as ContigSet." % fn) else: ret.append(fn) for fn in ret: if op.splitext(fn)[1].upper() not in [".FA", ".FASTA"]: raise IOError("%s input must be FASTA or ContigSet." % self.__class__.__name__) return ret
def initialize_options(self): """Find all files of all locales. """ self.paths = [] self.data_path = '/mimesis/data' self.separators = (',', ':') self.data_dir = here + self.data_path self.before_total = 0 self.after_total = 0 for root, _, files in os.walk(self.data_dir): for file in sorted(files): if splitext(file)[1] == '.json': self.paths.append( join( relpath(root, self.data_dir), file, ) )
def _run_exodus(*args): '''Run a Exodus using runpy ''' fullpath = path.join(config.addonsdir, config.exodus['id'], config.exodus['entryfile']) entrypath, entryfile = path.split(fullpath) if entrypath not in sys.path: sys.path.insert(0, entrypath) module, _ = path.splitext(entryfile) # Exodus assumes thread names start at 1 # and gets provider names from the thread name c = threading._counter = count().next c() # consume 0 sys.argv = list(args) # note: cannot use __import__ because Exodus spawns threads that do import # so that would cause deadlock old_modules = set(sys.modules.keys()) # unload the newly added modules after runpy runpy.run_module(module) for k in set(sys.modules.keys()) - old_modules: del sys.modules[k]
def parrec2nii(PAR_file, cfg, compress=True): """ Converts par/rec files to nifti.gz. """ base_dir = op.dirname(PAR_file) base_name = op.join(base_dir, op.splitext(PAR_file)[0]) ni_name = base_name + '.nii.gz' REC_file = '%s.REC' % op.splitext(PAR_file)[0] if op.isfile(ni_name): [os.remove(f) for f in [REC_file] + [PAR_file]] return 0 cmd = _construct_conversion_cmd(base_name, PAR_file, compress) with open(os.devnull, 'w') as devnull: subprocess.call(cmd, stdout=devnull) _rename_b0_files(base_dir=base_dir) [os.remove(f) for f in [REC_file] + [PAR_file]]
def convert_phy(f): try: # Try to skip 5 rows (new version) df = pd.read_csv(f, delim_whitespace=True, skiprows=5, header=0, low_memory=False) test = df['gx'] except KeyError: # Else skip 4 rows (old version) df = pd.read_csv(f, delim_whitespace=True, skiprows=4, header=0, low_memory=False) gradients = ['gx', 'gy', 'gz'] gradient_signal = np.array([df[g] for g in gradients]).sum(axis=0) gradient_signal[np.isnan(gradient_signal)] = 0 gradient_signal = (gradient_signal - gradient_signal.mean()) / gradient_signal.std() fn = op.join(op.dirname(f), op.splitext(op.basename(f))[0]) df.to_csv(fn + '.tsv.gz', sep='\t', index=None, compression='gzip') os.remove(f)
def meta_autodetect_platform(cls): """ Dark magic to autodetect the platform for built-in shellcodes. User-defined shellcodes must define *arch* and *os*. """ abspath = path.abspath join = path.join split = path.split splitext = path.splitext sep = path.sep module = cls.__module__ if module != '__main__': tokens = cls.__module__.split('.') if len(tokens) < 2 or tokens[0] != base_package or \ tokens[1] == base_file: return tokens.insert(-1, 'any') tokens = tokens[1:3] else: module = abspath(sys.modules[module].__file__) if not module.startswith(base_dir): return tokens = module.split(sep) tokens = tokens[len(base_dir.split(sep)):-1] while len(tokens) < 2: tokens.append('any') cls.arch, cls.os = tokens
def deobsfucate(self, filebase=None): """ Scans through the segments, articles and content associated with an NZB-File and sets up the filenames defined in the segments to it's best effort filebase: provide a fallback filename base (the part of the file before the extension) to build on if we can't detect the file on our own. """ # The name from the meta tag _name = self.meta.get('name', '').decode(self.encoding).strip() if not _name: if filebase: # Use base provided as a base _name = filebase else: # If we can't get a name from the meta tag to start with, then # we use the NZB-Filename itself as a backup _name = splitext(basename(self.path()))[0] for segment in self.segments: filename = segment.deobsfucate(_name) if filename: # Update segment.filename = filename
def execute(self, context): if not bpy.data.is_saved: self.report({'WARNING'}, "Save your file first") return {'CANCELLED'} script_file = os.path.realpath(__file__) addon_directory = os.path.dirname(script_file) # audio if bpy.context.scene.render.ffmpeg.audio_codec == 'NONE': bpy.context.scene.render.ffmpeg.audio_codec = 'AAC' bpy.context.scene.render.ffmpeg.audio_bitrate = 192 # video if self.preset == 'youtube': bpy.ops.script.python_file_run(filepath=os.path.join(addon_directory, 'render_presets', 'youtube_1080.py')) elif self.preset == 'twitter': bpy.ops.script.python_file_run(filepath=os.path.join(addon_directory, 'render_presets', 'twitter_720p.py')) from os.path import splitext, dirname path = bpy.data.filepath exported_file_name = 'video' if self.name_pattern == 'blender': exported_file_name = splitext(bpy.path.basename(path))[0] elif self.name_pattern == 'folder': exported_file_name = dirname(path).rsplit(sep="\\", maxsplit=1)[-1] elif self.name_pattern == 'scene': exported_file_name = bpy.context.scene.name bpy.context.scene.render.filepath = "//" + exported_file_name + '.mp4' if self.auto_render: bpy.ops.render.render({'dict': "override"}, 'INVOKE_DEFAULT', animation=True) return {"FINISHED"}
def walk_data_files(self, path): for root, dirs, files in walk(path): for filename in files: start, ext = splitext(filename) if ext in self.setup.data_extensions: # if filename.endswith(".yml") or filename.endswith('.json') or filename.endswith('ini'): yield root, dirs, filename
def iter_files(root, exts=None, recursive=False): """ Iterate over file paths within root filtered by specified extensions. :param compat.string_types root: Root folder to start collecting files :param iterable exts: Restrict results to given file extensions :param bool recursive: Wether to walk the complete directory tree :rtype collections.Iterable[str]: absolute file paths with given extensions """ if exts is not None: exts = set((x.lower() for x in exts)) def matches(e): return (exts is None) or (e in exts) if recursive is False: for entry in compat.scandir(root): if compat.has_scandir: ext = splitext(entry.name)[-1].lstrip('.').lower() if entry.is_file() and matches(ext): yield entry.path else: ext = splitext(entry)[-1].lstrip('.').lower() if not compat.isdir(entry) and matches(ext): yield join(root, entry) else: for root, folders, files in compat.walk(root): for f in files: ext = splitext(f)[-1].lstrip('.').lower() if matches(ext): yield join(root, f)
def load_modules(directory): files = glob.glob("{}/*.py".format(directory)) return [imp.load_source(splitext(basename(module))[0], module) for module in files]
def getArgs(): """ Parses command line arguments and returns them to the caller """ __version__ = 'v0.2.5' parser = argparse.ArgumentParser() parser._action_groups.pop() required = parser.add_argument_group('Required arguments') required.add_argument('-i', '--input', required=True, metavar='<FILE>', help='Input file in FASTA format') optional = parser.add_argument_group('Optional arguments') optional.add_argument('-o', '--output', type=argparse.FileType('w'), metavar='<FILE>', default=sys.stdout, help='Output file name. Default: Input file name + _perf.tsv') optional.add_argument('-a', '--analyse', action='store_true', default=False, help='Generate a summary HTML report.') cutoff_group = optional.add_mutually_exclusive_group() cutoff_group.add_argument('-l', '--min-length', type=int, metavar='<INT>', help='Minimum length cutoff of repeat') cutoff_group.add_argument('-u', '--min-units', metavar='INT or FILE', help="Minimum number of repeating units to be considered. Can be an integer or a file specifying cutoffs for different motif sizes.") optional.add_argument('-rep', '--repeats', type=argparse.FileType('r'), metavar='<FILE>', help='File with list of repeats (Not allowed with -m and/or -M)') optional.add_argument('-m', '--min-motif-size', type=int, metavar='<INT>', help='Minimum size of a repeat motif in bp (Not allowed with -rep)') optional.add_argument('-M', '--max-motif-size', type=int, metavar='<INT>', help='Maximum size of a repeat motif in bp (Not allowed with -rep)') optional.add_argument('-s', '--min-seq-length', type=int, metavar = '<INT>', default=0, help='Minimum size of sequence length for consideration (in bp)') optional.add_argument('-S', '--max-seq-length', type=float, metavar='<FLOAT>', default=inf, help='Maximum size of sequence length for consideration (in bp)') seqid_group = optional.add_mutually_exclusive_group() seqid_group.add_argument('-f', '--filter-seq-ids', metavar='<FILE>') seqid_group.add_argument('-F', '--target-seq-ids', metavar='<FILE>') optional.add_argument('--version', action='version', version='PERF ' + __version__) args = parser.parse_args() if args.repeats and (args.min_motif_size or args.max_motif_size): parser.error("-rep is not allowed with -m/-M") if args.repeats is None: if args.min_motif_size is None: args.min_motif_size = 1 if args.max_motif_size is None: args.max_motif_size = 6 if args.output.name == "<stdout>": args.output = open(splitext(args.input)[0] + '_perf.tsv', 'w') return args