我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用os.path.relpath()。
def load_data(self): # work in the parent of the pages directory, because we # want the filenames to begin "pages/...". chdir(dirname(self.setup.pages_dir)) rel = relpath(self.setup.pages_dir) for root, dirs, files in walk(rel): for filename in files: start, ext = splitext(filename) if ext in self.setup.data_extensions: #yield root, dirs, filename loader = self.setup.data_loaders.get(ext) path = join(root,filename) if not loader: raise SetupError("Identified data file '%s' by type '%s' but no loader found" % (filename, ext)) data_key = join(root, start) loaded_dict = loader.loadf(path) self.data[data_key] = loaded_dict #self.setup.log.debug("data key [%s] ->" % (data_key, ), root, filename, ); pprint.pprint(loaded_dict, sys.stdout) #pprint.pprint(self.data, sys.stdout) #print("XXXXX data:", self.data)
def get_all_pages(self): # work in the parent of the pages directory, because we # want the filenames to begin "pages/...". chdir(dirname(self.setup.pages_dir)) rel = relpath(self.setup.pages_dir) for root, dirs, files in walk(rel): # self.config.pages_dir): # examples: # # root='pages' root='pages/categories' # dirs=['categories'] dirs=[] # files=['index.html'] files=['list.html'] # self.setup.log.debug("\nTEMPLATE ROOT: %s" % root) # self.setup.log.debug("TEMPLATE DIRS: %s" % dirs) # self.setup.log.debug("TEMPLATE FILENAMES: %s" % files) # #dir_context = global_context.new_child(data_tree[root]) for filename in files: start, ext = splitext(filename) if ext in self.setup.template_extensions: # if filename.endswith(".html"): # TODO: should this filter be required at all? yield Page(self.setup, filename, join(root, filename))
def add_arguments(self, parser): default_fixtures_dir = relpath(settings.DEVFIXTURE_DIR) default_backup_dir = relpath(settings.DEVFIXTURE_BACKUP_DIR) parser.add_argument('action', choices=['create', 'restore']) parser.add_argument( '-d', '--fixtures-dir', default=default_fixtures_dir, help='Fixtures directory. [default: %(default)s]' ) parser.add_argument( '-b', '--backup-dir', default=default_backup_dir, help='Backup directory. [default: %(default)s]' ) parser.add_argument( '-f', '--fixture-file', required=False, help='File to use to create/restore from. Note that if you use this, some of the auto detection' 'features will not function properly' )
def create_index(startpath): parcels = [relpath(x, startpath) for x in glob(join(startpath, '*.parcel'))] html = """<html> <head> <title>Parcels</title> </head> <body> """ for parcel in parcels: html += ' <p><a href="{}">{}</a></p>\n'.format(parcel, parcel) html += """ <p><a href="manifest.json">manifest.json</a></p> </body> </html> """ return html
def run(self): testpath = 'src/test' buildlink = 'build/lib/test' if isdir(dirname(buildlink)): if islink(buildlink): os.unlink(buildlink) os.symlink(relpath(testpath, dirname(buildlink)), buildlink) testpath = buildlink try: os.environ['EPYTHON'] = 'python{}.{}'.format(sys.version_info.major, sys.version_info.minor) subprocess.check_call(['py.test', '-v', testpath, '-s', '--cov-report=html', '--cov-report=term'] + (['-k', self.match] if self.match else []) + ['--cov={}'.format(p) for p in find_packages(dirname(testpath), exclude=['test'])]) finally: if islink(buildlink): os.unlink(buildlink)
def _write_git_file_and_module_config(cls, working_tree_dir, module_abspath): """Writes a .git file containing a (preferably) relative path to the actual git module repository. It is an error if the module_abspath cannot be made into a relative path, relative to the working_tree_dir :note: will overwrite existing files ! :note: as we rewrite both the git file as well as the module configuration, we might fail on the configuration and will not roll back changes done to the git file. This should be a non-issue, but may easily be fixed if it becomes one :param working_tree_dir: directory to write the .git file into :param module_abspath: absolute path to the bare repository """ git_file = osp.join(working_tree_dir, '.git') rela_path = osp.relpath(module_abspath, start=working_tree_dir) if is_win: if osp.isfile(git_file): os.remove(git_file) with open(git_file, 'wb') as fp: fp.write(("gitdir: %s" % rela_path).encode(defenc)) with GitConfigParser(osp.join(module_abspath, 'config'), read_only=False, merge_includes=False) as writer: writer.set_value('core', 'worktree', to_native_path_linux(osp.relpath(working_tree_dir, start=module_abspath))) #{ Edit Interface
def test_module2_recursive_es6(self): from calmjs.testing import module2 calmjs_base_dir = abspath(join( indexer.modpath_pkg_resources(indexer)[0], pardir)) results = { k: relpath(v, calmjs_base_dir) for k, v in indexer.mapper(module2, globber='recursive').items() } self.assertEqual(results, { 'calmjs/testing/module2/index': to_os_sep_path('calmjs/testing/module2/index.js'), 'calmjs/testing/module2/helper': to_os_sep_path('calmjs/testing/module2/helper.js'), 'calmjs/testing/module2/mod/helper': to_os_sep_path('calmjs/testing/module2/mod/helper.js'), })
def test_module2_callables(self): from calmjs.testing import module2 calmjs_base_dir = abspath(join( indexer.modpath_pkg_resources(indexer)[0], pardir)) results = { k: relpath(v, calmjs_base_dir) for k, v in indexer.mapper( module2, globber=indexer.globber_recursive, modname=indexer.modname_python, modpath=indexer.modpath_pkg_resources, ).items() } self.assertEqual(results, { 'calmjs.testing.module2.index': to_os_sep_path('calmjs/testing/module2/index.js'), 'calmjs.testing.module2.helper': to_os_sep_path('calmjs/testing/module2/helper.js'), 'calmjs.testing.module2.mod.helper': to_os_sep_path('calmjs/testing/module2/mod/helper.js'), })
def relpath(path, start=os.path.curdir): """Return a relative version of a path""" from os.path import sep, curdir, join, abspath, commonprefix, \ pardir if not path: raise ValueError("no path specified") start_list = abspath(start).split(sep) path_list = abspath(path).split(sep) # Work out how much of the filepath is shared by start and path. i = len(commonprefix([start_list, path_list])) rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list)
def find_files(self, root): a4js_paths = super().find_files(path.join( settings.BASE_DIR, 'node_modules', 'adhocracy4', 'adhocracy4' )) a4_paths = super().find_files(get_module_dir('adhocracy4')) mbjs_paths = super().find_files(path.join( settings.BASE_DIR, 'node_modules', 'a4-meinberlin', 'meinberlin' )) mb_paths = super().find_files(get_module_dir('meinberlin')) liqd_product_paths = super().find_files( path.relpath(get_module_dir('liqd_product')) ) return a4js_paths + a4_paths + \ mbjs_paths + mb_paths + \ liqd_product_paths
def relpath(path, start=os.path.curdir): """Return a relative version of a path""" if not path: raise ValueError("no path specified") start_list = os.path.abspath(start).split(os.path.sep) path_list = os.path.abspath(path).split(os.path.sep) # Work out how much of the filepath is shared by start and path. i = len(os.path.commonprefix([start_list, path_list])) rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return os.path.curdir return os.path.join(*rel_list)
def get_url_transform( input_root, soft_dep ): ''' This meta function returns a url transformation function which maps absolute urls to relative urls. ''' def url_transform( url ): res = urlparse( url ) # Do nothing if url points to the www if res.netloc: return url if res.path.startswith('/'): url = res.path.lstrip('/') soft_dep.append(url) url = path.relpath( url, input_root ) else: soft_dep.append(path.join(input_root, res.path)) return url return url_transform
def start_pool(): pool = Pool(processes=int(configs["threads"])) logging.info("Plugin math_check_online @ total formulas to check: {}". format(len(formulas))) pool.map(check_math, formulas) #saving results logging.info("GOOD FORMULAS: {} --- BAD FORMULAS: {}".format( len(formulas)-bad_formulas.qsize(), bad_formulas.qsize())) log_matherrors_file_path = path.relpath("debug/math_errors.txt") with open(log_matherrors_file_path, "w") as f: f.write("Math Errors Tree Log: \n") f.write("---------------------\n") ids = [] while not bad_formulas.empty(): form = bad_formulas.get() ids.append(form[1]) output = tree_explorer.print_tree_to_blocks(ids) f.write(output + "\n\n")
def __async_gather_candidates(self, context, timeout): outs, errs = context['__proc'].communicate(timeout=timeout) if errs: self.error_message(context, errs) context['is_async'] = not context['__proc'].eof() if context['__proc'].eof(): context['__proc'] = None candidates = [] for line in outs: result = util.parse_jump_line(context['path'], line) if not result: continue path = relpath(result[0], start=context['path']) candidates.append(_candidate(result, path)) return candidates
def __async_gather_candidates(self, context, timeout): outs, errs = context['__proc'].communicate(timeout=timeout) if errs: self.error_message(context, errs) context['is_async'] = not context['__proc'].eof() if context['__proc'].eof(): context['__proc'] = None if not outs: return [] if isabs(outs[0]): candidates = [{ 'word': relpath(x, start=context['__directory']), 'action__path': x, } for x in outs if x != ''] else: candidates = [{ 'word': x, 'action__path': join(context['__directory'], x), } for x in outs if x != ''] context['__current_candidates'] += candidates if (len(context['__current_candidates']) >= self.vars['min_cache_files']): self.__cache[context['__directory']] = context[ '__current_candidates'] return candidates
def should_ignore(path): log.debug('should ignore %s ?' % path) items = relpath(path, cfg.watch_dir).split('/') log.debug('items: %s' % items) subdirs = items[0:-1] log.debug('subdirs: %s' % subdirs) if path == cfg.cfg_path: log.debug('CONFIG CHANGED - RELOAD') cfg.read() if is_excluded_subdir(subdirs): return True file_name = items[-1] log.debug('file_name: %s' % file_name) if is_excluded_file(file_name): return True log.debug("don't ignore") return False
def _gather_files(input_root, output_dir, skip_dirs, wiki_only): if not exists(output_dir): mkdir(output_dir) all_files = [] for root, dirs, filenames in walk(input_root): if skip_dirs: output = join(output_dir, relpath(root, input_root)) if exists(output): continue path = relpath(root, input_root) normalized_path = normalize_wiki_filename(path) if not exists(join(output_dir, normalized_path)): mkdir(join(output_dir, normalized_path)) all_files += [join(path, x) for x in filenames] if wiki_only: all_files = [x for x in all_files if "wikipedia/" in x] return all_files
def enable_snapshot_loading_after_initialization(trainer, *, continue_last=None, continue_from=None): assert continue_last is None or continue_from is None def load_snapshot_on_initialization_after(trainer): snapshot_dir = get_snapshot_dir() fpath = None if continue_last: fpath = osp.join(snapshot_dir, 'last_epoch' + __snapshot_ext__) if continue_from: fpath = osp.join(snapshot_dir, 'epoch_{}'.format(continue_from) + __snapshot_ext__) if fpath: if load_snapshot_file(trainer, fpath): fpath_real = osp.relpath(osp.realpath(fpath), osp.dirname(fpath)) logger.info('Restored snapshot from {} (aka. {}), continue={}.'.format( fpath, fpath_real, continue_last, continue_from)) trainer.runtime['restore_snapshot'] = fpath trainer.register_event('initialization:after', load_snapshot_on_initialization_after, priority=25)
def copy_dependencies(binary_path, lib_path): relative_path = path.relpath(lib_path, path.dirname(binary_path)) + "/" # Update binary libraries binary_dependencies = [lib for lib in set(otool(binary_path)) if not lib.startswith("@rpath")] change_non_system_libraries_path(binary_dependencies, relative_path, binary_path) # Update dependencies libraries need_checked = binary_dependencies checked = set() while need_checked: checking = set(need_checked) need_checked = set() for f in checking: # No need to check these for their dylibs if is_system_library(f): continue need_relinked = set(otool(f)) new_path = path.join(lib_path, path.basename(f)) if not path.exists(new_path): shutil.copyfile(f, new_path) change_non_system_libraries_path(need_relinked, relative_path, new_path) need_checked.update(need_relinked) checked.update(checking) need_checked.difference_update(checked)
def process_remote_include(repo, revision, path, from_heading, to_heading, options, sources_dir, target_dir): """Please a remote include statement with the file content. This involves cloning or updating the git repository with the file and processing the include as a regular local one. """ repo_path = gitutils.sync_repo(repo, target_dir, revision) return process_local_include( ospa.relpath(ospa.join(repo_path, path), sources_dir), from_heading, to_heading, options, sources_dir, target_dir )
def getXMLNode(self, xmlNode): # Prepare Attributes self.attributes["id"] = self.id if self.guid: self.attributes['guid'] = self.guid # Generate Node nodeInputDataset = ET.SubElement(xmlNode, self.type, self.attributes) nodeInputDatasetName = ET.SubElement(nodeInputDataset, "Name") nodeInputDatasetName.text = self.name if self.relpath: nodeInputDatasetPath = ET.SubElement(nodeInputDataset, "Path") nodeInputDatasetPath.text = self.relpath if self.metadata: nodeInputDatasetMetaData = ET.SubElement(nodeInputDataset, "MetaData") for metaName, metaValue in self.metadata.iteritems(): nodeInputDatasetMeta = ET.SubElement(nodeInputDatasetMetaData, "Meta", {"name": metaName}) nodeInputDatasetMeta.text = metaValue return nodeInputDataset
def getXMLNode(self,xmlNode): #Prepare Attributes attributes = {} attributes["id"] = self.id if self.guid: attributes['Guid'] = self.guid # Generate Node nodeInputDataset = ET.SubElement(xmlNode,self.type,attributes) nodeInputDatasetName = ET.SubElement(nodeInputDataset, "Name") nodeInputDatasetName.text = self.name nodeInputDatasetPath = ET.SubElement(nodeInputDataset,"Path") nodeInputDatasetPath.text = self.relpath if self.metadata: nodeInputDatasetMetaData = ET.SubElement(nodeInputDataset,"MetaData") for metaName, metaValue in self.metadata.iteritems(): nodeInputDatasetMeta = ET.SubElement(nodeInputDatasetMetaData,"Meta",{"name":metaName}) nodeInputDatasetMeta.text = metaValue return xmlNode
def test_supports_gzip(self): filename = relpath(__file__) with HTTPServerFixture() as httpd: url = urljoin(httpd.url, filename) headers = {'Accept-Encoding': 'gzip, deflate'} request = Request(url, None, headers=headers) with closing(urlopen(request)) as http_in: http_headers = http_in.info() http_data_in = http_in.read() self.assertEqual('gzip', http_headers['Content-Encoding']) with open(filename, "rb") as file_in: file_data_in = file_in.read() http_data_decompressed = self.ungzip(http_data_in) self.assertEqual( file_data_in, http_data_decompressed, "The content of %s differs from %s." % (url, filename))
def do_POST(self): # pylint: disable=invalid-name form = cgi.FieldStorage( fp=self.rfile, headers=self.headers, environ={ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type']}) uploaded_filename = form['file'].filename dest_path = self.get_unused_filename(uploaded_filename) self.log_message('Writing "{}" to: {}' .format(uploaded_filename, dest_path)) with open(dest_path, 'wb') as output: stream_copy(form['file'].file, output) # editor.reload_files() dest_filename = path.relpath(dest_path, self.request_abspath) rename_message = ' (renamed to {})'.format(dest_filename) \ if uploaded_filename != dest_filename \ else '' message = '{} uploaded{}.'.format(uploaded_filename, rename_message) self.write_response( alert_message=message)
def watch(file, from_keymap = False): global CMD_CACHE conf = loadConfig(file) if not conf: return if not from_keymap and CMD_CACHE.get(file): return CMD_CACHE.get(file) else: ref = [] build = [] rel_file = path.relpath(file, conf.dir) for listener in conf.listeners: if from_keymap: if listener.watch("BUILD"): build.append( Cmd.parse(file, listener) ) continue if listener.watch("NOBUILD"): continue if listener.watch(rel_file): ref.append( Cmd.parse(file, listener) ) if len(ref): CMD_CACHE[file] = ref if len(ref) or len(build): return ref + build
def path_is_under(path, under): relpath = os.path.relpath(path, under).split('/') return not relpath[:1] == ['..']
def security_check(path): if not os.path.exists(path): return 'aactivator: File does not exist: ' + path insecure_path = insecure(path) if insecure_path is not None: return ( 'aactivator: Cowardly refusing to source {0} because writeable by others: {1}' .format(relpath(path), relpath(insecure_path)) )
def path_shorten(self, abspath): return path.relpath(abspath, self.internal["@dir"])
def findByValue(self, value, project_path, omit_files=None): result = {} regex_for_files = self.buildRegexForFiles(value) regex_for_extra_files = self.buildRegexForExtraFiles(value) ignored_paths = [ path.normpath(epath) for epath in self.getSetting("ignore", []) ] for dirpath, dirnames, filenames in walk(project_path, topdown=True): relative_dir = path.relpath(dirpath, project_path) # Change excluding folders dirnames[:] = [dirname for dirname in dirnames if path.normpath(path.join(relative_dir, dirname)) not in ignored_paths] for filename in filenames: if omit_files and path.join(relative_dir, filename) in omit_files: continue # Find files with name equal the value if self.matchFilePathWithRegex( filename, regex_for_files, dirpath=relative_dir ): if "files" not in result: result["files"] = [] result["files"].append(path.join(relative_dir, filename)) elif self.matchFilePathWithRegex( filename, regex_for_extra_files, dirpath=relative_dir, is_extra=True ): if "extra_files" not in result: result["extra_files"] = [] result["extra_files"].append(path.join(relative_dir, filename)) pass return result
def parseOptionItem(self, option, view_relpath): if not option: return option # Make every path relative to view file if option["key"] not in ["modules", "module_exports", "module_files"]: option["value"] = self.parsePath( path.normpath( path.relpath( option["value"], view_relpath ) ) ) return option
def relpath(target, base=os.curdir): """ Return a relative path to the target from either the current dir or an optional base dir. Base can be a directory specified either as absolute or relative to current dir. """ if not os.path.exists(target): raise OSError, 'Target does not exist: '+target if not os.path.isdir(base): raise OSError, 'Base is not a directory or does not exist: '+base base_list = (os.path.abspath(base)).split(os.sep) target_list = (os.path.abspath(target)).split(os.sep) # On the windows platform the target may be on a completely # different drive from the base. if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]: raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper() # Starting from the filepath root, work out how much of the # filepath is shared by base and target. for i in range(min(len(base_list), len(target_list))): if base_list[i] <> target_list[i]: break else: # If we broke out of the loop, i is pointing to the first # differing path elements. If we didn't break out of the # loop, i is pointing to identical path elements. # Increment i so that in all cases it points to the first # differing path elements. i+=1 rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:] return os.path.join(*rel_list)
def _get_name_from_path(self, path): if path == self._top_level_dir: return '.' path = _jython_aware_splitext(os.path.normpath(path)) _relpath = relpath(path, self._top_level_dir) assert not os.path.isabs(_relpath), "Path must be within the project" assert not _relpath.startswith('..'), "Path must be within the project" name = _relpath.replace(os.path.sep, '.') return name
def iter_files(folder): """Recursively iterate all files in a given root directory.""" for root, dirnames, filenames in walk(folder): for f in filenames: f = join(root, f) yield (f, relpath(f, folder))
def run(self, force=None): """Copy files from source to destination.""" force = force or {} for fsrc, relpath in self: fdst = join(self.dstdir, relpath) fdstdir = dirname(fdst) if not exists(fdstdir): makedirs(fdstdir) self._copyfile(fsrc, fdst, force=relpath in force)
def test_array_info(self): variables, coords = self._from_dataset_test_variables variables['v4'] = variables['v3'].copy() ds = xr.Dataset(variables, coords) fname = osp.relpath(bt.get_file('test-t2m-u-v.nc'), '.') ds2 = xr.open_dataset(fname) l = ds.psy.create_list( name=[['v1', ['v3', 'v4']], ['v1', 'v2']], prefer_list=True) l.extend(ds2.psy.create_list(name=['t2m'], x=0, t=1), new_name=True) self.assertEqual(l.array_info(engine='netCDF4'), OrderedDict([ # first list contating an array with two variables ('arr0', OrderedDict([ ('arr0', {'dims': {'t': slice(None), 'x': slice(None)}, 'attrs': OrderedDict(), 'store': (None, None), 'name': 'v1', 'fname': None}), ('arr1', {'dims': {'y': slice(None)}, 'attrs': OrderedDict(), 'store': (None, None), 'name': [['v3', 'v4']], 'fname': None}), ('attrs', OrderedDict())])), # second list with two arrays containing each one variable ('arr1', OrderedDict([ ('arr0', {'dims': {'t': slice(None), 'x': slice(None)}, 'attrs': OrderedDict(), 'store': (None, None), 'name': 'v1', 'fname': None}), ('arr1', {'dims': {'y': slice(None), 'x': slice(None)}, 'attrs': OrderedDict(), 'store': (None, None), 'name': 'v2', 'fname': None}), ('attrs', OrderedDict())])), # last array from real dataset ('arr2', {'dims': {'z': slice(None), 'y': slice(None), 't': 1, 'x': 0}, 'attrs': ds2.t2m.attrs, 'store': ('xarray.backends.netCDF4_', 'NetCDF4DataStore'), 'name': 't2m', 'fname': fname}), ('attrs', OrderedDict())])) return l
def info_for(src, topdir): # ...it MUST not include a ".."" directory, and for files not "under" that # directory, absolute pathnames must be used. [2] if topdir is None or not is_parent(topdir, src): src = op.abspath(src) else: src = op.relpath(src, topdir) info = "[Trash Info]\n" info += "Path=" + quote(src) + "\n" info += "DeletionDate=" + format_date(datetime.now()) + "\n" return info
def is_subdir(path: str, directory: str) -> bool: if path is None: return False path = realpath(path) directory = realpath(directory) relative = relpath(path, directory) if relative.startswith(os.pardir): return False return True
def copylib(src_path, dest_dir): """Graft a shared library from the system into the wheel and update the relevant links. 1) Copy the file from src_path to dest_dir/ 2) Rename the shared object from soname to soname.<unique> 3) If the library has a RUNPATH/RPATH, update that to point to its new location. """ # Copy the a shared library from the system (src_path) into the wheel # if the library has a RUNPATH/RPATH to it's current location on the # system, we also update that to point to its new location. with open(src_path, 'rb') as f: shorthash = hashfile(f)[:8] base, ext = os.path.basename(src_path).split('.', 1) if not base.endswith('-%s' % shorthash): new_soname = '%s-%s.%s' % (base, shorthash, ext) else: new_soname = src_name dest_path = os.path.join(dest_dir, new_soname) if os.path.exists(dest_path): return new_soname, dest_path print('Grafting: %s -> %s' % (src_path, dest_path)) rpaths = elf_read_rpaths(src_path) shutil.copy2(src_path, dest_path) verify_patchelf() check_call(['patchelf', '--set-soname', new_soname, dest_path]) for rp in itertools.chain(rpaths['rpaths'], rpaths['runpaths']): if is_subdir(rp, os.path.dirname(src_path)): patchelf_set_rpath(dest_path, pjoin( dirname(dest_path), relpath(rp, dirname(src_path)))) break return new_soname, dest_path
def patchelf_set_rpath(fn, libdir): rpath = pjoin('$ORIGIN', relpath(libdir, dirname(fn))) print('Setting RPATH: %s to "%s"' % (fn, rpath)) check_call(['patchelf', '--force-rpath', '--set-rpath', rpath, fn])
def init(): variables["exto"][0] = relpath(getpath.tmp(), getpath.main_module()) variables["dict"][0] = relpath(getpath.db() + "dazzlepod.txt", getpath.main_module())
def init(): variables["output"][0] = relpath(getpath.db() + "wordlist", getpath.main_module())
def test_module1_loader_es6(self): from calmjs.testing import module1 calmjs_base_dir = abspath(join( indexer.modpath_pkg_resources(indexer)[0], pardir)) results = { k: relpath(v, calmjs_base_dir) for k, v in indexer.mapper_es6(module1).items() } self.assertEqual(results, { 'calmjs/testing/module1/hello': to_os_sep_path('calmjs/testing/module1/hello.js'), })
def test_module1_loader_python(self): from calmjs.testing import module1 calmjs_base_dir = abspath(join( indexer.modpath_pkg_resources(indexer)[0], pardir)) results = { k: relpath(v, calmjs_base_dir) for k, v in indexer.mapper_python(module1).items() } self.assertEqual(results, { 'calmjs.testing.module1.hello': to_os_sep_path('calmjs/testing/module1/hello.js'), })
def test_module2_recursive_es6_legacy(self): # ensure legacy behavior is maintained, where a single argument # is accepted by the modpath function. def modpath_last(module): return indexer.modpath_last(module) from calmjs.testing import module2 calmjs_base_dir = abspath(join( indexer.modpath_pkg_resources(indexer)[0], pardir)) with pretty_logging(stream=StringIO()) as fd: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') results = { k: relpath(v, calmjs_base_dir) for k, v in indexer.mapper( module2, modpath=modpath_last, globber='recursive', ).items() } self.assertIn( "method will need to accept entry_point argument by calmjs-", str(w[-1].message) ) self.assertIn( "method will need to accept entry_point argument by calmjs-", fd.getvalue() ) self.assertEqual(results, { 'calmjs/testing/module2/index': to_os_sep_path('calmjs/testing/module2/index.js'), 'calmjs/testing/module2/helper': to_os_sep_path('calmjs/testing/module2/helper.js'), 'calmjs/testing/module2/mod/helper': to_os_sep_path('calmjs/testing/module2/mod/helper.js'), })
def list_files(data_dir): output = [] for root, _, filenames in walk(data_dir): for filename in filenames: if not filename.startswith('.'): output.append(path.join(root, filename)) output = [path.relpath(p, path.dirname(data_dir)) for p in output] output.append('meta.json') return output
def get_thumb_path(self, base_dir): """Get the relative path to the thumb nail of this notebook""" return os.path.relpath(self.thumb_file, base_dir)
def test_linkgalleries(self): """Test the directive""" self.src_dir2 = self.src_dir self.out_dir2 = self.out_dir os.environ['LINKGALLERYTO'] = self.out_dir fname = osp.join( self.out_dir, 'examples', 'example_mpl_test.html') self.assertTrue(osp.exists(fname), msg=fname + ' is missing!') thumbnail = osp.join( self.out_dir, '_images', 'gallery_' + self.src_dir.replace(os.path.sep, '_').lower() + '_examples_example_mpl_test.ipynb_thumb.png') self.assertTrue(osp.exists(thumbnail), msg=thumbnail + ' is missing!') # create a setup with the links self.setUp() self.assertTrue html_path = osp.join(self.out_dir, 'index.html') self.assertTrue(osp.exists(html_path), msg=html_path + ' is missing!') with open(html_path) as f: html = f.read() self.assertIn(thumbnail, html) # test with new thumbnail to test the linkgalleries with it's own # project thumbnails = glob.glob(osp.join( self.out_dir, '_images', 'gallery_' + self.src_dir.replace(os.path.sep, '_').lower() + '_examples_example_mpl_test.ipynb_thumb*.png')) self.assertTrue(thumbnails) # check that some thumbnails are found self.assertTrue(any(osp.relpath(f, self.out_dir) in html for f in thumbnails), msg='None of %s found in %s' % (thumbnails, html))
def relpath(path, start=os.path.curdir): """Return a relative version of a path""" from os.path import sep, curdir, join, abspath, commonprefix, \ pardir, splitunc if not path: raise ValueError("no path specified") start_list = abspath(start).split(sep) path_list = abspath(path).split(sep) if start_list[0].lower() != path_list[0].lower(): unc_path, rest = splitunc(path) unc_start, rest = splitunc(start) if bool(unc_path) ^ bool(unc_start): raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" % (path, start)) else: raise ValueError("path is on drive %s, start on drive %s" % (path_list[0], start_list[0])) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): break else: i += 1 rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list)