我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用pkg_resources.WorkingSet()。
def test_runtime_main_with_broken_runtime(self): stub_stdouts(self) working_set = mocks.WorkingSet({'calmjs.runtime': [ 'broken = calmjs.tests.test_runtime:broken', ]}) with self.assertRaises(SystemExit): runtime.main( ['-vvd', '-h'], runtime_cls=lambda: runtime.Runtime(working_set=working_set) ) out = sys.stdout.getvalue() err = sys.stderr.getvalue() self.assertIn('broken', err) self.assertIn('Traceback', err) self.assertIn('a fake import error', err) self.assertNotIn('broken', out)
def test_runtime_group_not_runtime_reported(self): stub_stdouts(self) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'bs = calmjs.testing.module3.runtime:fake_bootstrap\n' ),), 'example.package', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) with self.assertRaises(SystemExit): runtime.main( ['-h'], runtime_cls=lambda: runtime.Runtime(working_set=working_set) ) self.assertIn( "'calmjs.runtime' entry point " "'bs = calmjs.testing.module3.runtime:fake_bootstrap' from " "'example.package 1.0' invalid for instance of " "'calmjs.runtime.Runtime': target not an instance of " "'calmjs.runtime.BaseRuntime' or its subclass; not registering " "invalid entry point", sys.stderr.getvalue() )
def test_wrong_registry_type(self): advice = AdviceRegistry('adv', _working_set=WorkingSet({})) registries = {'adv': advice} stub_item_attr_value( self, calmjs_toolchain, 'get_registry', registries.get) spec = {'calmjs_loaderplugin_registry_name': 'adv'} with pretty_logging(stream=StringIO()) as s: registry = spec_update_loaderplugin_registry(spec) self.assertIn( "object referenced in spec is not a valid", s.getvalue()) self.assertIsNot(registry, advice) self.assertTrue(isinstance(registry, BaseLoaderPluginRegistry)) spec = {} with pretty_logging(stream=StringIO()) as s: registry = spec_update_loaderplugin_registry(spec, default='adv') self.assertIn( "provided default is not a valid loaderplugin registry", s.getvalue()) self.assertIsNot(registry, advice) self.assertTrue(isinstance(registry, BaseLoaderPluginRegistry))
def test_resolve_and_order(self): fake = LoaderPluginRegistry('fake_registry', _working_set=WorkingSet({ 'fake_registry': [ 'foo = calmjs.tests.test_toolchain:MockLPHandler']})) registries = {'fake_registry': fake} stub_item_attr_value( self, calmjs_toolchain, 'get_registry', registries.get) spec = {'calmjs_loaderplugin_registry_name': 'fake_registry'} with pretty_logging(stream=StringIO()) as s: registry = spec_update_loaderplugin_registry(spec) self.assertIn( "using loaderplugin registry 'fake_registry'", s.getvalue()) self.assertIs(registry, fake) spec = { 'calmjs_loaderplugin_registry_name': 'fake_registry', 'calmjs_loaderplugin_registry': BaseLoaderPluginRegistry('raw'), }
def test_standard_toolchain_process(self): make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.toolchain:Toolchain = calmjs.tests.test_toolchain:dummy\n' ),), 'example.package', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set) toolchain = Toolchain() spec = Spec() with pretty_logging(stream=StringIO()) as s: reg.process_toolchain_spec_package( toolchain, spec, 'example.package') self.assertEqual(spec, {'dummy': ['dummy']}) self.assertIn( "found advice setup steps registered for package/requirement " "'example.package' for toolchain 'calmjs.toolchain:Toolchain'", s.getvalue(), )
def test_standard_toolchain_advice_extras(self): make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.toolchain:NullToolchain = ' 'calmjs.tests.test_toolchain:dummy\n' ),), 'example.package', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set) toolchain = NullToolchain() spec = Spec() with pretty_logging(stream=StringIO()) as s: reg.process_toolchain_spec_package( toolchain, spec, 'example.package[a,bc,d]') self.assertEqual(spec['extras'], ['a', 'bc', 'd']) self.assertIn( "found advice setup steps registered for package/requirement " "'example.package[a,bc,d]' for toolchain ", s.getvalue() )
def test_toolchain_spec_prepare_loaderplugins_standard(self): reg = LoaderPluginRegistry('simple', _working_set=WorkingSet({ 'simple': [ 'foo = calmjs.tests.test_toolchain:MockLPHandler', 'bar = calmjs.tests.test_toolchain:MockLPHandler', ], })) spec = Spec( calmjs_loaderplugin_registry=reg, loaderplugin_sourcepath_maps={ 'foo': {'foo!thing': 'thing'}, 'bar': {'bar!thing': 'thing'}, }, ) toolchain_spec_prepare_loaderplugins( self.toolchain, spec, 'loaderplugin', 'loaders') self.assertEqual({ 'foo!thing': 'thing', 'bar!thing': 'thing', }, spec['loaderplugin_sourcepath']) self.assertEqual({ 'foo': 'foo', 'bar': 'bar', }, spec['loaders'])
def test_mismatched_ns(self): # mismatch includes a package that doesn't actually have the # directory created d_egg_root = join(mkdtemp(self), 'dummyns') make_dummy_dist(self, (( 'namespace_packages.txt', 'not_ns\n', ), ( 'entry_points.txt', '[dummyns]\n' 'dummyns = dummyns:attr\n', ),), 'dummyns', '1.0', working_dir=d_egg_root) working_set = pkg_resources.WorkingSet([ d_egg_root, self.ds_egg_root, ]) stub_item_attr_value(self, pkg_resources, 'working_set', working_set) dummyns_ep = next(working_set.iter_entry_points('dummyns')) p = indexer.resource_filename_mod_entry_point('dummyns', dummyns_ep) self.assertEqual(normcase(p), normcase(self.dummyns_path))
def test_not_namespace(self): d_egg_root = join(mkdtemp(self), 'dummyns') make_dummy_dist(self, (( 'entry_points.txt', '[dummyns]\n' 'dummyns = dummyns:attr\n', ),), 'dummyns', '1.0', working_dir=d_egg_root) working_set = pkg_resources.WorkingSet([ d_egg_root, self.ds_egg_root, ]) stub_item_attr_value(self, pkg_resources, 'working_set', working_set) moddir = join(d_egg_root, 'dummyns') os.makedirs(moddir) dummyns_ep = next(working_set.iter_entry_points('dummyns')) p = indexer.resource_filename_mod_entry_point('dummyns', dummyns_ep) self.assertEqual(normcase(p), normcase(self.dummyns_path))
def test_nested_namespace(self): self.called = None def _exists(p): self.called = p return exists(p) working_set = pkg_resources.WorkingSet([ self.ds_egg_root, ]) stub_item_attr_value(self, pkg_resources, 'working_set', working_set) stub_item_attr_value(self, indexer, 'exists', _exists) dummyns_ep = next(working_set.iter_entry_points('dummyns.submod')) p = indexer.resource_filename_mod_entry_point( 'dummyns.submod', dummyns_ep) self.assertEqual(p, self.called) with open(join(p, 'data.txt')) as fd: data = fd.read() self.assertEqual(data, self.nested_data)
def setUp(self): remember_cwd(self) app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~1.11.0'}, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) # Stub out the flatten_egginfo_json calls with one that uses our # custom working_set here. stub_item_attr_value(self, dist, 'default_working_set', working_set) # Quiet stdout from distutils logs stub_stdouts(self) # Force auto-detected interactive mode to True, because this is # typically executed within an interactive context. stub_check_interactive(self, True)
def tests_flatten_egginfo_json_missing_deps(self): """ Missing dependencies should not cause a hard failure. """ make_dummy_dist(self, ( ('requires.txt', '\n'.join([ 'uilib>=1.0', ])), ), 'app', '2.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) # Python dependency acquisition failures should fail hard. with self.assertRaises(pkg_resources.DistributionNotFound): calmjs_dist.flatten_egginfo_json(['app'], working_set=working_set)
def _namespace_package_path(fqname, pathnames, path=None): """ Return the __path__ for the python package in *fqname*. This function uses setuptools metadata to extract information about namespace packages from installed eggs. """ working_set = pkg_resources.WorkingSet(path) path = list(pathnames) for dist in working_set: if dist.has_metadata('namespace_packages.txt'): namespaces = dist.get_metadata( 'namespace_packages.txt').splitlines() if fqname in namespaces: nspath = os.path.join(dist.location, *fqname.split('.')) if nspath not in path: path.append(nspath) return path
def get_all_entry_points(): """ Get all entry points related to ``ros2cli`` and any of its extensions. :returns: mapping of entry point names to ``EntryPoint`` instances :rtype: dict """ extension_points = get_entry_points(EXTENSION_POINT_GROUP_NAME) entry_points = defaultdict(dict) working_set = WorkingSet() for dist in sorted(working_set): entry_map = dist.get_entry_map() for group_name in entry_map.keys(): # skip groups which are not registered as extension points if group_name not in extension_points: continue group = entry_map[group_name] for entry_point_name, entry_point in group.items(): entry_points[group_name][entry_point_name] = \ (dist, entry_point) return entry_points
def check(request): return [ {'name': distribution.project_name, 'version': distribution.version} for distribution in WorkingSet() ]
def check(request): package_name = settings.HEARTBEAT.get('package_name') if not package_name: raise ImproperlyConfigured( 'Missing package_name key from heartbeat configuration') sys_path_distros = WorkingSet() package_req = Requirement.parse(package_name) distro = sys_path_distros.find(package_req) if not distro: return dict(error='no distribution found for {}'.format(package_name)) return dict(name=distro.project_name, version=distro.version)
def _directory_import(self): """ Import astropy_helpers from the given path, which will be added to sys.path. Must return True if the import succeeded, and False otherwise. """ # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os.path.abspath(self.path) # Use an empty WorkingSet rather than the man # pkg_resources.working_set, since on older versions of setuptools this # will invoke a VersionConflict when trying to install an upgrade ws = pkg_resources.WorkingSet([]) ws.add_entry(path) dist = ws.by_key.get(DIST_NAME) if dist is None: # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): with _silence(): run_setup(os.path.join(path, 'setup.py'), ['egg_info']) for dist in pkg_resources.find_distributions(path, True): # There should be only one... return dist return dist
def with_requires(*requirements): """Run a test case only when given requirements are satisfied. .. admonition:: Example This test case runs only when `numpy>=1.10` is installed. >>> from cupy import testing ... class Test(unittest.TestCase): ... @testing.with_requires('numpy>=1.10') ... def test_for_numpy_1_10(self): ... pass Args: requirements: A list of string representing requirement condition to run a given test case. """ ws = pkg_resources.WorkingSet() try: ws.require(*requirements) skip = False except pkg_resources.ResolutionError: skip = True msg = 'requires: {}'.format(','.join(requirements)) return unittest.skipIf(skip, msg)
def test_runtime_entry_point_broken_at_main(self): # try the above, but do this through main stub_stdouts(self) ep = pkg_resources.EntryPoint.parse('broken = some.broken:instance') ep.load = fake_error(ImportError) working_set = mocks.WorkingSet({'calmjs.runtime': [ep]}) with self.assertRaises(SystemExit): runtime.main( ['-h'], runtime_cls=lambda: runtime.Runtime(working_set=working_set) ) out = sys.stdout.getvalue() err = sys.stderr.getvalue() self.assertNotIn('broken', out) self.assertIn('broken', err)
def test_runtime_entry_point_preparse_warning(self): # see next test for the condition for warning to appear. stub_stdouts(self) working_set = mocks.WorkingSet({'calmjs.runtime': [ 'deprecated = calmjs.tests.test_runtime:deprecated', ]}) with self.assertRaises(SystemExit): runtime.main( ['deprecated'], runtime_cls=lambda: runtime.Runtime(working_set=working_set) ) err = sys.stderr.getvalue() self.assertNotIn('Traceback', err) self.assertNotIn('this runtime is deprecated', err)
def test_runtime_entry_point_preparse_warning_verbose_logged(self): stub_stdouts(self) working_set = mocks.WorkingSet({'calmjs.runtime': [ 'deprecated = calmjs.tests.test_runtime:deprecated', ]}) with self.assertRaises(SystemExit): # use the verbose flag to increase the log level runtime.main( ['-v', 'deprecated'], runtime_cls=lambda: runtime.Runtime(working_set=working_set) ) err = sys.stderr.getvalue() self.assertNotIn('Traceback', err) self.assertIn('this runtime is deprecated', err) self.assertNotIn('DeprecationWarning triggered at', err)
def test_runtime_entry_point_preparse_warning_verbose_debug_logged(self): stub_stdouts(self) working_set = mocks.WorkingSet({'calmjs.runtime': [ 'deprecated = calmjs.tests.test_runtime:deprecated', ]}) with self.assertRaises(SystemExit): # use the verbose flag to increase the log level runtime.main( ['-vvv', 'deprecated'], runtime_cls=lambda: runtime.Runtime(working_set=working_set) ) err = sys.stderr.getvalue() self.assertNotIn('Traceback', err) self.assertIn('this runtime is deprecated', err) self.assertIn('DeprecationWarning triggered at', err)
def test_spec_debugged_via_cmdline(self): stub_item_attr_value( self, mocks, 'dummy', runtime.ToolchainRuntime(toolchain.NullToolchain()), ) working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'tool = calmjs.testing.mocks:dummy', ], }) rt = runtime.Runtime(working_set=working_set, prog='calmjs') result = rt(['tool', '--export-target', 'dummy', '-d']) self.assertEqual(result['debug'], 1)
def test_spec_optional_advice(self): from calmjs.registry import _inst as root_registry key = toolchain.CALMJS_TOOLCHAIN_ADVICE stub_stdouts(self) def cleanup_fake_registry(): # pop out the fake advice registry that was added. root_registry.records.pop(key, None) self.addCleanup(cleanup_fake_registry) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.toolchain:Toolchain = calmjs.tests.test_toolchain:bad\n' 'calmjs.toolchain:NullToolchain = ' 'calmjs.tests.test_toolchain:dummy\n' ),), 'example.package', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) root_registry.records[key] = toolchain.AdviceRegistry( key, _working_set=working_set) tc = toolchain.NullToolchain() rt = runtime.ToolchainRuntime(tc) result = rt([ '--export-target', 'dummy', '--optional-advice', 'example.package', '-vv', ]) self.assertEqual(result['dummy'], ['dummy', 'bad']) err = sys.stderr.getvalue() self.assertIn('prepare spec with optional advices from packages', err) self.assertIn('example.package', err) self.assertIn('failure encountered while setting up advices', err) # Doing it normally should not result in that optional key. result = rt(['--export-target', 'dummy']) self.assertNotIn('dummy', result)
def test_spec_optional_advice_extras(self): from calmjs.registry import _inst as root_registry key = toolchain.CALMJS_TOOLCHAIN_ADVICE stub_stdouts(self) def cleanup_fake_registry(): # pop out the fake advice registry that was added. root_registry.records.pop(key, None) self.addCleanup(cleanup_fake_registry) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.toolchain:NullToolchain = ' 'calmjs.tests.test_toolchain:dummy\n' ),), 'example.package', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) root_registry.records[key] = toolchain.AdviceRegistry( key, _working_set=working_set) tc = toolchain.NullToolchain() rt = runtime.ToolchainRuntime(tc) result = rt([ '--export-target', 'dummy', '--optional-advice', 'example.package[extra1,extra2]', '-vv', ]) self.assertEqual(result['extras'], ['extra1', 'extra2'])
def test_spec_deferred_addition(self): """ This turns out to be critical - the advices provided by the packages should NOT be added immediately, as it is executed before a number of very important advices were added by the toolchain itself. """ from calmjs.registry import _inst as root_registry key = toolchain.CALMJS_TOOLCHAIN_ADVICE stub_stdouts(self) def cleanup_fake_registry(): # pop out the fake advice registry that was added. root_registry.records.pop(key, None) self.addCleanup(cleanup_fake_registry) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.toolchain:NullToolchain = ' 'calmjs.testing.spec:advice_order\n' ),), 'example.package', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) root_registry.records[key] = toolchain.AdviceRegistry( key, _working_set=working_set) tc = toolchain.NullToolchain() rt = runtime.ToolchainRuntime(tc) result = rt([ '--export-target', 'dummy', '--optional-advice', 'example.package', ]) self.assertEqual(sys.stderr.getvalue(), '') self.assertIsNotNone(result)
def test_spec_debugged_via_cmdline_target_exists_export_cancel(self): stub_item_attr_value( self, mocks, 'dummy', runtime.ToolchainRuntime(toolchain.NullToolchain()), ) working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'tool = calmjs.testing.mocks:dummy', ], }) tmpdir = mkdtemp(self) target = join(tmpdir, 'target') open(target, 'w').close() rt = runtime.Runtime(working_set=working_set, prog='calmjs') stub_stdouts(self) stub_stdin(self, u'n\n') stub_check_interactive(self, True) result = rt(['tool', '--export-target', target, '-dd', '-vv']) self.assertEqual(result['debug'], 2) # This is an integration test of sort for the debug advice output self.assertIn("advise 'cleanup' invoked by", sys.stderr.getvalue()) self.assertIn("toolchain.py", sys.stderr.getvalue()) self.assertIn( 'advise(AFTER_PREPARE, self.check_export_target_exists, spec)', sys.stderr.getvalue(), )
def test_root_runtime_errors_ignored(self): stub_stdouts(self) working_set = mocks.WorkingSet({'calmjs.runtime': [ 'foo = calmjs.nosuchmodule:no.where', 'bar = calmjs.npm:npm', 'npm = calmjs.npm:npm.runtime', ]}) rt = runtime.Runtime(working_set=working_set) with self.assertRaises(SystemExit): rt(['-h']) out = sys.stdout.getvalue() self.assertNotIn('foo', out) self.assertIn('npm', out)
def test_npm_description(self): stub_stdouts(self) working_set = mocks.WorkingSet({'calmjs.runtime': [ 'npm = calmjs.npm:npm.runtime', ]}) rt = runtime.Runtime(working_set=working_set) with self.assertRaises(SystemExit): rt(['npm', '-h']) out = sys.stdout.getvalue() self.assertIn('npm support for the calmjs framework', out)
def test_root_runtime_bad_names(self): working_set = mocks.WorkingSet({'calmjs.runtime': [ 'bad name = calmjs.npm:npm.runtime', 'bad.name = calmjs.npm:npm.runtime', 'badname:likethis = calmjs.npm:npm.runtime', ]}) stderr = mocks.StringIO() with pretty_logging( logger='calmjs.runtime', level=DEBUG, stream=stderr): rt = runtime.Runtime(working_set=working_set) rt.argparser err = stderr.getvalue() self.assertIn("bad 'calmjs.runtime' entry point", err) stub_stdouts(self) with self.assertRaises(SystemExit): rt(['-h']) out = sys.stdout.getvalue() # this results in unnatural argparsing situation self.assertNotIn('bad name', out) # reserved for disambiguation self.assertNotIn('bad.name', out) self.assertNotIn('badname:likethis', out) # command listing naturally not available. self.assertNotIn('npm', out)
def setup_dupe_runtime(self): from calmjs.testing import utils from calmjs.npm import npm utils.foo_runtime = runtime.PackageManagerRuntime(npm.cli_driver) utils.runtime_foo = runtime.PackageManagerRuntime(npm.cli_driver) def cleanup(): del utils.foo_runtime del utils.runtime_foo self.addCleanup(cleanup) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'bar = calmjs.testing.utils:foo_runtime\n' ),), 'example1.foo', '1.0') make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'bar = calmjs.testing.utils:foo_runtime\n' ),), 'example2.foo', '1.0') make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'bar = calmjs.testing.utils:runtime_foo\n' 'baz = calmjs.testing.utils:runtime_foo\n' ),), 'example3.foo', '1.0') make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'bar = calmjs.testing.utils:runtime_foo\n' 'baz = calmjs.testing.utils:runtime_foo\n' ),), 'example4.foo', '1.0') return pkg_resources.WorkingSet([self._calmjs_testing_tmpdir])
def test_duplication_and_runtime_not_recursion(self): """ Make sure it explodes normally if standard runtime error. """ from calmjs.testing import utils class BadAtInit(runtime.DriverRuntime): def init_argparser(self, argparser): if argparser is not self.argparser: raise RuntimeError('A fake explosion') def cleanup(): del utils.badatinit self.addCleanup(cleanup) stub_stdouts(self) # create a dummy dist make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'badatinit = calmjs.testing.utils:badatinit\n' ),), 'example.badsimple', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) utils.badatinit = BadAtInit(None) # and here lies the crimson magician, all out of hp. with pretty_logging( logger='calmjs.runtime', stream=mocks.StringIO()) as s: runtime.Runtime(working_set=working_set).argparser self.assertIn( "cannot register entry_point " "'badatinit = calmjs.testing.utils:badatinit' from " "'example.badsimple 1.0' ", s.getvalue() )
def test_runtime_recursion_that_is_totally_our_fault(self): """ If stuff does blow up, don't blame the wrong party if we can help it. """ from calmjs.testing import utils stub_stdouts(self) # We kind of have to punt this, so punt it with a stupid # override using an EntryPoint that explodes. class TrulyBadAtInit(runtime.Runtime): def init_argparser(self, argparser): raise RuntimeError('maximum recursion depth exceeded') def cleanup(): del utils.trulybad self.addCleanup(cleanup) make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.runtime]\n' 'trulybad = calmjs.testing.utils:trulybad\n' ),), 'example.badsimple', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) utils.trulybad = TrulyBadAtInit(None) with pretty_logging( logger='calmjs.runtime', stream=mocks.StringIO()) as s: runtime.Runtime(working_set=working_set).argparser self.assertIn("maximum recursion depth exceeded", s.getvalue())
def setup_runtime(self): # create a working set with our custom runtime entry point # TODO should really improve the test case to provide custom # runtime instances separate from actual data. working_set = mocks.WorkingSet({ 'calmjs.runtime': [ 'cmd = calmjs.npm:npm.runtime', ], }) return runtime.Runtime(working_set=working_set, prog='calmjs') # for the test, we use the -u flag for the unknown tests as it is # unknown to bootstrap and target parser. Next two are using known # flag to before, then after.
def setup_runtime(self): stub_stdouts(self) remember_cwd(self) cwd = mkdtemp(self) os.chdir(cwd) make_dummy_dist(self, ( ('requirements.json', json.dumps({ 'name': 'calmpy.pip', 'require': { 'setuptools': '25.1.6', }, })), ), 'calmpy.pip', '2.0') make_dummy_dist(self, ( ('requires.txt', '[dev]\ncalmpy.pip'), ), 'site', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) # Stub out the underlying data needed for the cli for the tests # to test against our custom data for reproducibility. stub_item_attr_value(self, dist, 'default_working_set', working_set) stub_check_interactive(self, True) driver = cli.PackageManagerDriver( pkg_manager_bin='mgr', pkgdef_filename='requirements.json', dep_keys=('require',), ) return cwd, runtime.PackageManagerRuntime(driver) # do note: the runtime is not registered to the root runtime # directly, but this is a good enough emulation on how this would # behave under real circumstances, as each of these runtime can and # should be able to operate as independent entities.
def test_calmjs_main_console_version_broken(self): stub_stdouts(self) working_set = pkg_resources.WorkingSet([mkdtemp(self)]) stub_item_attr_value(self, runtime, 'default_working_set', working_set) stub_item_attr_value( self, calmjs_argparse, 'default_working_set', working_set) # make sure the bad case doesn't just blow up... with self.assertRaises(SystemExit) as e: runtime.main(['-V']) self.assertEqual(e.exception.args[0], 0) self.assertIn('? ? from ?', sys.stdout.getvalue())
def test_get_package_advices(self): make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.toolchain:Toolchain = calmjs.tests.test_toolchain:dummy\n' 'calmjs.toolchain:Alt = calmjs.tests.test_toolchain:dummy\n' ),), 'example.package', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set) self.assertEqual(sorted(reg.get('example.package').keys()), [ 'calmjs.toolchain:Alt', 'calmjs.toolchain:Toolchain', ])
def test_not_toolchain_process(self): working_set = pkg_resources.WorkingSet([]) reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set) with pretty_logging(stream=StringIO()) as s: self.assertIsNone( reg.process_toolchain_spec_package(object(), Spec(), 'calmjs')) self.assertIn( "must call process_toolchain_spec_package with a toolchain", s.getvalue(), )
def test_standard_toolchain_process_nothing(self): working_set = pkg_resources.WorkingSet([]) reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set) toolchain = Toolchain() spec = Spec() with pretty_logging(stream=StringIO()) as s: reg.process_toolchain_spec_package(toolchain, spec, 'calmjs') self.assertIn( "no advice setup steps registered for package/requirement " "'calmjs'", s.getvalue(), )
def test_standard_toolchain_failure_process(self): make_dummy_dist(self, (( 'entry_points.txt', '[calmjs.toolchain.advice]\n' 'calmjs.toolchain:Toolchain = calmjs.tests.test_toolchain:bad\n' 'calmjs.toolchain:NullToolchain = ' 'calmjs.tests.test_toolchain:dummy\n' ),), 'example.package', '1.0') working_set = pkg_resources.WorkingSet([self._calmjs_testing_tmpdir]) reg = AdviceRegistry(CALMJS_TOOLCHAIN_ADVICE, _working_set=working_set) toolchain = NullToolchain() spec = Spec() with pretty_logging(stream=StringIO()) as s: reg.process_toolchain_spec_package( toolchain, spec, 'example.package') err = s.getvalue() # inheritance applies. self.assertIn( "found advice setup steps registered for package/requirement " "'example.package' for toolchain 'calmjs.toolchain:NullToolchain'", err, ) self.assertIn("ERROR", err) self.assertIn( "failure encountered while setting up advices through entry_point", err) # partial execution will be done, so do test stuff. self.assertEqual(spec, {'dummy': ['dummy', 'bad']})
def test_toolchain_compile_loaderplugin_entry_registered(self): """ A rough standalone test for handling of loader plugins. """ reg = LoaderPluginRegistry('simple', _working_set=WorkingSet({ 'simple': [ 'foo = calmjs.tests.test_toolchain:MockLPHandler', 'bar = calmjs.tests.test_toolchain:MockLPHandler', ], })) src_dir = mkdtemp(self) src = join(src_dir, 'target.txt') spec = Spec(calmjs_loaderplugin_registry=reg) with pretty_logging(stream=StringIO()) as s: bar_results = self.toolchain.compile_loaderplugin_entry(spec, ( 'bar!target.txt', src, 'bar!target.txt', 'bar!target.txt')) foo_results = self.toolchain.compile_loaderplugin_entry(spec, ( 'foo!target.txt', src, 'foo!target.txt', 'foo!target.txt')) self.assertEqual('', s.getvalue()) self.assertEqual(( {'foo!target.txt': 'foo!target.txt'}, {'foo!target.txt': 'foo!target.txt'}, ['foo!target.txt'], ), foo_results) self.assertEqual(( {'bar!target.txt': 'bar!target.txt'}, {'bar!target.txt': 'bar!target.txt'}, ['bar!target.txt'], ), bar_results) # recursive lookups are generally not needed, if the target # supplied _is_ the target.
def test_toolchain_spec_prepare_loaderplugins_missing(self): reg = LoaderPluginRegistry('simple', _working_set=WorkingSet({ 'simple': [ 'foo = calmjs.tests.test_toolchain:MockLPHandler', 'bar = calmjs.tests.test_toolchain:MockLPHandler', ], })) spec = Spec( calmjs_loaderplugin_registry=reg, loaderplugin_sourcepath_maps={ 'foo': {'foo!thing': 'thing'}, 'missing': {'missing!thing': 'thing'}, 'bar': {'bar!thing': 'thing'}, }, ) with pretty_logging(stream=StringIO()) as s: toolchain_spec_prepare_loaderplugins( self.toolchain, spec, 'loaderplugin', 'loaders') self.assertEqual({ 'foo!thing': 'thing', 'bar!thing': 'thing', }, spec['loaderplugin_sourcepath']) self.assertEqual({ 'foo': 'foo', 'bar': 'bar', }, spec['loaders']) self.assertIn( "loaderplugin handler for 'missing' not found in loaderplugin " "registry 'simple'", s.getvalue()) self.assertIn( "will not be compiled into the build target: ['missing!thing']", s.getvalue())
def test_not_defined(self): working_set = pkg_resources.WorkingSet([ self.ds_egg_root, ]) stub_item_attr_value(self, pkg_resources, 'working_set', working_set) p = indexer.resource_filename_mod_entry_point('dummyns', None) self.assertEqual(normcase(p), normcase(self.dummyns_path))
def test_mismatched(self): # mismatch includes a package that doesn't actually have the # directory created d_egg_root = join(mkdtemp(self), 'dummyns') make_dummy_dist(self, (( 'namespace_packages.txt', 'dummyns\n', ), ( 'entry_points.txt', '[dummyns]\n' 'dummyns = dummyns:attr\n', ),), 'dummyns', '1.0', working_dir=d_egg_root) working_set = pkg_resources.WorkingSet([ d_egg_root, self.ds_egg_root, ]) stub_item_attr_value(self, pkg_resources, 'working_set', working_set) dummyns_ep = next(working_set.iter_entry_points('dummyns')) with pretty_logging(stream=StringIO()) as fd: p = indexer.resource_filename_mod_entry_point( 'dummyns', dummyns_ep) self.assertIn( "'dummyns' resolved by entry_point 'dummyns = dummyns:attr' leads " "to no path", fd.getvalue() ) self.assertEqual(normcase(p), normcase(self.dummyns_path))
def test_standard(self): d_egg_root = join(mkdtemp(self), 'dummyns') make_dummy_dist(self, (( 'namespace_packages.txt', 'dummyns\n', ), ( 'entry_points.txt', '[dummyns]\n' 'dummyns = dummyns:attr\n', ),), 'dummyns', '1.0', working_dir=d_egg_root) working_set = pkg_resources.WorkingSet([ d_egg_root, self.ds_egg_root, ]) stub_item_attr_value(self, pkg_resources, 'working_set', working_set) moddir = join(d_egg_root, 'dummyns') os.makedirs(moddir) # make this also a proper thing with open(join(moddir, '__init__.py'), 'w') as fd: fd.write('') dummyns_ep = next(working_set.iter_entry_points('dummyns')) p = indexer.resource_filename_mod_entry_point('dummyns', dummyns_ep) # finally, this should work. self.assertEqual(normcase(p), normcase(moddir))
def test_yarn_install_package_json_overwrite_interactive(self): # Testing the implied init call stub_mod_call(self, cli) stub_stdin(self, 'y\n') stub_stdouts(self) tmpdir = mkdtemp(self) os.chdir(tmpdir) # All the pre-made setup. app = make_dummy_dist(self, ( ('requires.txt', '\n'.join([])), ('package.json', json.dumps({ 'dependencies': {'jquery': '~1.11.0'}, })), ), 'foo', '1.9.0') working_set = WorkingSet() working_set.add(app, self._calmjs_testing_tmpdir) stub_item_attr_value(self, dist, 'default_working_set', working_set) # We are going to have a fake package.json with open(join(tmpdir, 'package.json'), 'w') as fd: json.dump({}, fd) # This is faked. yarn.yarn_install('foo', overwrite=True) with open(join(tmpdir, 'package.json')) as fd: config = json.load(fd) # Overwritten self.assertEqual(config, { 'dependencies': {'jquery': '~1.11.0'}, 'devDependencies': {}, 'name': 'foo', }) # No log level set. self.assertEqual(sys.stdout.getvalue(), '') self.assertEqual(sys.stderr.getvalue(), '')