我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用collections.abc.Sized()。
def test_Sized(self): non_samples = [None, 42, 3.14, 1j, (lambda: (yield))(), (x for x in []), ] for x in non_samples: self.assertNotIsInstance(x, Sized) self.assertFalse(issubclass(type(x), Sized), repr(type(x))) samples = [bytes(), str(), tuple(), list(), set(), frozenset(), dict(), dict().keys(), dict().items(), dict().values(), ] for x in samples: self.assertIsInstance(x, Sized) self.assertTrue(issubclass(type(x), Sized), repr(type(x))) self.validate_abstract_methods(Sized, '__len__') self.validate_isinstance(Sized, '__len__')
def get_collection_sizes(obj, collections: Optional[Tuple]=None, get_only_non_empty=False): """ Iterates over `collections` of the gives object and gives its byte size and number of items in collection """ from pympler import asizeof collections = collections or (list, dict, set, deque, abc.Sized) if not isinstance(collections, tuple): collections = tuple(collections) result = [] for attr_name in dir(obj): attr = getattr(obj, attr_name) if isinstance(attr, collections) and ( not get_only_non_empty or len(attr) > 0): result.append( (attr_name, len(attr), asizeof.asizeof(attr, detail=1))) return result
def preduce(func, iterable, args = tuple(), kwargs = dict(), processes = 1): """ Parallel application of the reduce function, with keyword arguments. Parameters ---------- func : callable Function to be applied to every element of `iterable`. iterable : iterable Iterable of items to be reduced. Generators are consumed. args : tuple Positional arguments of `function`. kwargs : dictionary, optional Keyword arguments of `function`. processes : int or None, optional Number of processes to use. If `None`, maximal number of processes is used. Default is one. Returns ------- reduced : object Notes ----- If `processes` is 1, `preduce` is equivalent to functools.reduce with the added benefit of using `args` and `kwargs`, but `initializer` is not supported. """ func = partial(func, *args, **kwargs) if processes == 1: return reduce(func, iterable) with Pool(processes) as pool: if isinstance(iterable, Sized): chunksize = max(1, int(len(iterable)/pool._processes)) else: chunksize = 1 # Some reductions are order-sensitive res = pool.imap(partial(reduce, func), tuple(chunked(iterable, chunksize))) return reduce(func, res)
def _is_list(obj): return (isinstance(obj, Sized) and isinstance(obj, Iterable) and not isinstance(obj, (Set, Mapping)))
def ensure_flatlist(x): """Flattens a multi-list x to a single index list.""" if isinstance(x[0], Sized): return x[0] return x
def is_singleton(x): """Checks if x is list-like.""" return not isinstance(x, Sized)
def test_direct_subclassing(self): for B in Hashable, Iterable, Iterator, Sized, Container, Callable: class C(B): pass self.assertTrue(issubclass(C, B)) self.assertFalse(issubclass(int, C))
def test_registration(self): for B in Hashable, Iterable, Iterator, Sized, Container, Callable: class C: __hash__ = None # Make sure it isn't hashable by default self.assertFalse(issubclass(C, B), B.__name__) B.register(C) self.assertTrue(issubclass(C, B))
def pmap(func, iterable, args = tuple(), kwargs = dict(), processes = 1, ntotal = None): """ Parallel application of a function with keyword arguments. Parameters ---------- func : callable Function to be applied to every element of `iterable`. iterable : iterable Iterable of items to be mapped. args : tuple, optional Positional arguments of `function`. kwargs : dictionary, optional Keyword arguments of `function`. processes : int or None, optional Number of processes to use. If `None`, maximal number of processes is used. Default is one. ntotal : int or None, optional If the length of `iterable` is known, but passing `iterable` as a list would take too much memory, the total length `ntotal` can be specified. This allows for `pmap` to chunk better. Yields ------ Mapped values. See Also -------- pmap_unordered : parallel map that does not preserve order Notes ----- If `processes` is 1, `pmap` reduces to `map`, with the added benefit of of using `kwargs` """ func = partial(func, *args, **kwargs) if processes == 1: yield from map(func, iterable) return with Pool(processes) as pool: chunksize = 1 if isinstance(iterable, Sized): chunksize = max(1, int(len(iterable)/pool._processes)) elif ntotal is not None: chunksize = max(1, int(ntotal/pool._processes)) yield from pool.imap(func = func, iterable = iterable, chunksize = chunksize)
def pmap_unordered(func, iterable, args = tuple(), kwargs = dict(), processes = 1, ntotal = None): """ Parallel application of a function with keyword arguments in no particular order. This can reduce memory usage because results are not accumulated so that the order is preserved. Parameters ---------- func : callable Function to be applied to every element of `iterable`. iterable : iterable Iterable of items to be mapped. args : tuple, optional Positional arguments of `function`. kwargs : dictionary, optional Keyword arguments of `function`. processes : int or None, optional Number of processes to use. If `None`, maximal number of processes is used. Default is one. ntotal : int or None, optional If the length of `iterable` is known, but passing `iterable` as a list would take too much memory, the total length `ntotal` can be specified. This allows for `pmap` to chunk better. Yields ------ Mapped values. See Also -------- pmap : parallel map that preserves order Notes ----- If `processes` is 1, `pmap_unordered` reduces to `map`, with the added benefit of of using `kwargs` """ func = partial(func, *args, **kwargs) if processes == 1: yield from map(func, iterable) return with Pool(processes) as pool: chunksize = 1 if isinstance(iterable, Sized): chunksize = max(1, int(len(iterable)/pool._processes)) elif ntotal is not None: chunksize = max(1, int(ntotal/pool._processes)) yield from pool.imap_unordered(func = func, iterable = iterable, chunksize = chunksize)
def test_EmptyMapping(): marker = object() # It should be possible to 'construct' an instance.. assert EmptyMapping() is EmptyMapping # Must be passable to dict() assert dict(EmptyMapping) == {} # EmptyMapping['x'] raises in various forms. assert 'x' not in EmptyMapping with pytest.raises(KeyError): EmptyMapping['x'] with pytest.raises(KeyError): del EmptyMapping['x'] EmptyMapping['x'] = 4 # Shouldn't fail assert 'x' not in EmptyMapping # but it's a no-op with pytest.raises(KeyError): EmptyMapping['x'] # Check it's all empty check_empty_iterable(EmptyMapping, 'EmptyMapping') check_empty_iterable(EmptyMapping.keys(), 'keys()') check_empty_iterable(EmptyMapping.values(), 'values()') check_empty_iterable(EmptyMapping.items(), 'items()', item=('x', 'y')) # Dict methods assert EmptyMapping.get('x') is None assert EmptyMapping.setdefault('x') is None assert EmptyMapping.get('x', marker) is marker assert EmptyMapping.setdefault('x', marker) is marker assert EmptyMapping.pop('x', marker) is marker with pytest.raises(KeyError): EmptyMapping.popitem() with pytest.raises(KeyError): EmptyMapping.pop('x') assert not EmptyMapping assert len(EmptyMapping) == 0 # Should work, but do nothing and return None. assert EmptyMapping.update({1: 23, 'test': 34, }) is None assert EmptyMapping.update(other=5, a=1, b=3) is None # Can't give more than one mapping as a positional argument, # though. with pytest.raises(TypeError): EmptyMapping.update({3: 4}, {1: 2}) # Check it's registered in ABCs. from collections import abc assert isinstance(EmptyMapping, abc.Container) assert isinstance(EmptyMapping, abc.Sized) assert isinstance(EmptyMapping, abc.Mapping) assert isinstance(EmptyMapping, abc.MutableMapping)
def __init__(self, iterable=None): r"""Create a new, empty Multiset object. And if given, initialize with elements from input iterable. Or, initialize from a mapping of elements to their multiplicity. Example: >>> ms = Multiset() # a new, empty multiset >>> ms = Multiset('abc') # a new multiset from an iterable >>> ms = Multiset({'a': 4, 'b': 2}) # a new multiset from a mapping Args: iterable: An optional iterable of elements or mapping of elements to multiplicity to initialize the multiset from. """ if isinstance(iterable, BaseMultiset): self._elements = iterable._elements.copy() self._total = iterable._total else: self._elements = _elements = defaultdict(int) _total = 0 if iterable is not None: if isinstance(iterable, _sequence_types): for element in iterable: _elements[element] += 1 _total = len(iterable) elif isinstance(iterable, dict): for element, multiplicity in iterable.items(): if multiplicity > 0: _elements[element] = multiplicity _total += multiplicity elif isinstance(iterable, _iter_types): for element in iterable: _elements[element] += 1 _total += 1 elif isinstance(iterable, Mapping): for element, multiplicity in iterable.items(): if multiplicity > 0: _elements[element] = multiplicity _total += multiplicity elif isinstance(iterable, Sized): for element in iterable: _elements[element] += 1 _total = len(iterable) else: for element in iterable: _elements[element] += 1 _total += 1 self._total = _total