我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用collections.Iterator()。
def axis_iter(self, axes=0): """Returns an iterator yielding Sub-MPArrays of ``self`` by iterating over the specified physical axes. **Example:** If ``self`` represents a bipartite (i.e. length 2) array with 2 physical dimensions on each site ``A[(k,l), (m,n)]``, ``self.axis_iter(0)`` is equivalent to:: (A[(k, :), (m, :)] for m in range(...) for k in range(...)) :param axes: Iterable or int specifiying the physical axes to iterate over (default 0 for each site) :returns: Iterator over :class:`.MPArray` """ if not isinstance(axes, collections.Iterable): axes = it.repeat(axes, len(self)) ltens_iter = it.product(*(iter(np.rollaxis(lten, i + 1)) for i, lten in zip(axes, self.lt))) return (MPArray(ltens) for ltens in ltens_iter) ########################## # Algebraic operations # ##########################
def _extract_factors(tens, ndims): """Extract iteratively the leftmost MPO tensor with given number of legs by a qr-decomposition :param np.ndarray tens: Full tensor to be factorized :param ndims: Number of physical legs per site or iterator over number of physical legs :returns: List of local tensors with given number of legs yielding a factorization of tens """ current = next(ndims) if isinstance(ndims, collections.Iterator) else ndims if tens.ndim == current + 2: return [tens] elif tens.ndim < current + 2: raise AssertionError("Number of remaining legs insufficient.") else: unitary, rest = qr(tens.reshape((np.prod(tens.shape[:current + 1]), -1))) unitary = unitary.reshape(tens.shape[:current + 1] + rest.shape[:1]) rest = rest.reshape(rest.shape[:1] + tens.shape[current + 1:]) return [unitary] + _extract_factors(rest, ndims)
def _ltens_to_array(ltens): """Computes the full array representation from an iterator yielding the local tensors. Note that it does not get rid of virtual legs. :param ltens: Iterator over local tensors :returns: numpy.ndarray representing the contracted MPA """ ltens = ltens if isinstance(ltens, collections.Iterator) else iter(ltens) res = first = next(ltens) for tens in ltens: res = matdot(res, tens) if res is first: # Always return a writable array, even if len(ltens) == 1. res = res.copy() return res ################################################ # Helper methods for variational compression # ################################################
def _class___init__(self, iterator): if self.partial_cls is None: raise MagicTypeError( 'Iterator should be specified.' ) if not isinstance(iterator, self.main_cls): raise MagicTypeError( 'require Iterator.', iterator=iterator, ) if isinstance(self.partial_cls, tuple): # Iterator[T, ...]. Checking on: # 1. the number of elements in the iterator. # 2. the type of each element. self.case = self.ITERATOR_CASE_LENGTH self._type_idx = 0 else: # Iterator[T]. Check only the type of element. There's no # limitation on the length of iterator. self.case = self.ITERATOR_CASE_NO_LENGTH self.iterator = iterator
def for_json(obj): if hasattr(obj, 'for_json'): return obj.for_json() elif isinstance(obj, datetime): return obj.isoformat() elif isinstance(obj, collections.Mapping): # This includes all types in debian.deb822. return {str(k): obj[k] for k in obj} elif isinstance(obj, (collections.Iterator, tuple, set, frozenset)): ### TODO: Sort sets and frozensets? return list(obj) else: try: data = vars(obj).copy() except TypeError: return repr(obj) else: data["__class__"] = type(obj).__name__ return data
def test_Iterator(self): non_samples = [None, 42, 3.14, 1j, b"", "", (), [], {}, set()] for x in non_samples: self.assertNotIsInstance(x, Iterator) self.assertFalse(issubclass(type(x), Iterator), repr(type(x))) samples = [iter(bytes()), iter(str()), iter(tuple()), iter(list()), iter(dict()), iter(set()), iter(frozenset()), iter(dict().keys()), iter(dict().items()), iter(dict().values()), (lambda: (yield))(), (x for x in []), ] for x in samples: self.assertIsInstance(x, Iterator) self.assertTrue(issubclass(type(x), Iterator), repr(type(x))) self.validate_abstract_methods(Iterator, '__next__', '__iter__') # Issue 10565 class NextOnly: def __next__(self): yield 1 raise StopIteration self.assertNotIsInstance(NextOnly(), Iterator)
def iteritems(self): """ Iterator over (column name, Series) pairs. See also -------- iterrows : Iterate over DataFrame rows as (index, Series) pairs. itertuples : Iterate over DataFrame rows as namedtuples of the values. """ if self.columns.is_unique and hasattr(self, '_item_cache'): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1)
def __delitem__(self, key): if self.read_only: return query = """DELETE FROM {tb} WHERE {cond};""" if isinstance(key, (tuple, list, Iterator, np.ndarray)): key = [str(k) for k in key] else: key = [str(key)] # ====== check if key in cache ====== # db_key = [] for k in key: if k in self.current_cache: del self.current_cache[k] else: db_key.append(k) # ====== remove key from db ====== # self.cursor.execute( query.format(tb=self._current_table, cond='key IN ("%s")' % ', '.join(db_key))) self.connection.commit()
def test_glob_common(self): def _check(glob, expected): self.assertEqual(set(glob), { P(BASE, q) for q in expected }) P = self.cls p = P(BASE) it = p.glob("fileA") self.assertIsInstance(it, collections.Iterator) _check(it, ["fileA"]) _check(p.glob("fileB"), []) _check(p.glob("dir*/file*"), ["dirB/fileB", "dirC/fileC"]) if symlink_skip_reason: _check(p.glob("*A"), ['dirA', 'fileA']) else: _check(p.glob("*A"), ['dirA', 'fileA', 'linkA']) if symlink_skip_reason: _check(p.glob("*B/*"), ['dirB/fileB']) else: _check(p.glob("*B/*"), ['dirB/fileB', 'dirB/linkD', 'linkB/fileB', 'linkB/linkD']) if symlink_skip_reason: _check(p.glob("*/fileB"), ['dirB/fileB']) else: _check(p.glob("*/fileB"), ['dirB/fileB', 'linkB/fileB'])
def test_rglob_common(self): def _check(glob, expected): self.assertEqual(set(glob), { P(BASE, q) for q in expected }) P = self.cls p = P(BASE) it = p.rglob("fileA") self.assertIsInstance(it, collections.Iterator) _check(it, ["fileA"]) _check(p.rglob("fileB"), ["dirB/fileB"]) _check(p.rglob("*/fileA"), []) if symlink_skip_reason: _check(p.rglob("*/fileB"), ["dirB/fileB"]) else: _check(p.rglob("*/fileB"), ["dirB/fileB", "dirB/linkD/fileB", "linkB/fileB", "dirA/linkC/fileB"]) _check(p.rglob("file*"), ["fileA", "dirB/fileB", "dirC/fileC", "dirC/dirD/fileD"]) p = P(BASE, "dirC") _check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"]) _check(p.rglob("*/*"), ["dirC/dirD/fileD"])
def test_rglob_common(self): def _check(glob, expected): self.assertEqual(set(glob), { P(BASE, q) for q in expected }) P = self.cls p = P(BASE) it = p.rglob("fileA") self.assertIsInstance(it, collections.Iterator) # XXX cannot test because of symlink loops in the test setup #_check(it, ["fileA"]) #_check(p.rglob("fileB"), ["dirB/fileB"]) #_check(p.rglob("*/fileA"), [""]) #_check(p.rglob("*/fileB"), ["dirB/fileB"]) #_check(p.rglob("file*"), ["fileA", "dirB/fileB"]) # No symlink loops here p = P(BASE, "dirC") _check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"]) _check(p.rglob("*/*"), ["dirC/dirD/fileD"])
def first_element(obj): """ Return the first element of `obj` Parameters ---------- obj : iterable Should not be an iterator Returns ------- out : object First element of `obj`. Raise a class:`StopIteration` exception if `obj` is empty. """ if isinstance(obj, Iterator): raise RuntimeError( "Cannot get the first element of an iterator") return next(iter(obj))
def split_strategy(self, many_records) -> collections.Iterator: pass
def sumup(mpas, weights=None): """Returns the sum of the MPArrays in ``mpas``. Same as .. code-block:: python functools.reduce(mp.MPArray.__add__, mpas) but should be faster as we can get rid of intermediate allocations. :param mpas: Iterator over :class:`~MPArray` :returns: Sum of ``mpas`` """ mpas = list(mpas) length = len(mpas[0]) assert all(len(mpa) == length for mpa in mpas) if length == 1: if weights is None: return MPArray([sum(mpa.lt[0] for mpa in mpas)]) else: return MPArray([sum(w * mpa.lt[0] for w, mpa in zip(weights, mpas))]) ltensiter = [iter(mpa.lt) for mpa in mpas] if weights is None: ltens = [np.concatenate([next(lt) for lt in ltensiter], axis=-1)] else: ltens = [np.concatenate([w * next(lt) for w, lt in zip(weights, ltensiter)], axis=-1)] ltens += [_local_add([next(lt) for lt in ltensiter]) for _ in range(length - 2)] ltens += [np.concatenate([next(lt) for lt in ltensiter], axis=0)] return MPArray(ltens)
def _metaclass_check_getitem_type_decl(cls, type_decl): # 1. Iterator[T, ...] if isinstance(type_decl, tuple): for T in type_decl: if nontype_object(T): return False return True # 2. Iterator[T] elif type_object(type_decl): return True else: return False
def _metaclass_check_instance(cls, instance): if cls.partial_cls or not check_type_of_instance(cls, instance): return False else: # is Iterator and not Iterator[...]. return True
def _class___iter__(self): return Iterator[self.partial_cls](iter(self.iterable))
def wrapper_for_deferred_checking(self): if not issubclass(self.type_, BasicMagicType): return None if issubclass(self.type_.main_cls, (abc.Iterator, abc.Callable)) and\ self.type_.partial_cls: return self.type_ else: return None
def test_gen_speckle(self): c = Specie("Cu") t = Specie("Ti") sites = [[[c, c], [t, t]], [[c, t], [t, c]]] self.sg = SitesGrid(sites) gen = SitesGrid.gen_speckle(Specie("Cu"), (2,2,2), Specie("Ti"), 4) from collections import Iterator self.assertIsInstance(gen, Iterator) self.assertIn(self.sg, gen) self.assertEqual(next(gen).to_array().sum(), 204) self.assertEqual(next(gen).to_array().sum(), 204)
def test_gen_speckle(self): c = Specie("Cu") t = Specie("Ti") sites = [[[c, c], [t, t]], [[c, t], [t, c]]] sg = SitesGrid(sites) gen = CStru.gen_speckle(self.m, Specie("Cu"), (2,2,2), Specie("Ti"), 4) from collections import Iterator self.assertIsInstance(gen, Iterator) self.assertIn(CStru(self.m, sg), gen) self.assertEqual(next(gen).get_array().sum(), 204) self.assertEqual(next(gen).get_array().sum(), 204)
def run(expressions, args, namespace={}): func = exec if args.exec else eval for expr in expressions: if args.exception_handler: exception, handler = tuple( i.strip() for i in args.exception_handler.split(':', maxsplit=1)) try: value = func(expr, namespace) except __builtins__[exception]: try: value = func(handler, namespace) except Exception as e: value = handle_errors(e, args) continue except Exception as e: value = handle_errors(e, args) continue else: try: value = func(expr, namespace) except Exception as e: value = handle_errors(e, args) continue if not args.exec: namespace.update(x=value) if not (args.quiet or args.exec): if args.join is not None and isinstance(value, collections.Iterable): print(ast.literal_eval("'''" + args.join.replace("'", r"\'") + "'''").join(map(str, value))) elif value is None: pass elif isinstance(value, collections.Iterator): for i in value: print_obj(i) else: indent = None if (args.loop or args.force_oneline_json) else 2 print_obj(value, indent)
def test_06_iterator(self): """calling __iter__() should return an iterator""" self.assertIsInstance(self.source.__iter__(), Iterator)
def test_direct_subclassing(self): for B in Hashable, Iterable, Iterator, Sized, Container, Callable: class C(B): pass self.assertTrue(issubclass(C, B)) self.assertFalse(issubclass(int, C))
def test_registration(self): for B in Hashable, Iterable, Iterator, Sized, Container, Callable: class C: __hash__ = None # Make sure it isn't hashable by default self.assertFalse(issubclass(C, B), B.__name__) B.register(C) self.assertTrue(issubclass(C, B))
def _partially_evaluate(self, addr, simplify=False): """ Return part of the lazy array. """ if self.is_homogeneous: if simplify: base_val = self.base_value else: base_val = self._homogeneous_array(addr) * self.base_value elif isinstance(self.base_value, (int, long, numpy.integer, float, bool)): base_val = self._homogeneous_array(addr) * self.base_value elif isinstance(self.base_value, numpy.ndarray): base_val = self.base_value[addr] elif have_scipy and sparse.issparse(self.base_value): # For sparse matrices larr[2, :] base_val = self.base_value[addr] elif callable(self.base_value): indices = self._array_indices(addr) base_val = self.base_value(*indices) if isinstance(base_val, numpy.ndarray) and base_val.shape == (1,): base_val = base_val[0] elif hasattr(self.base_value, "lazily_evaluate"): base_val = self.base_value.lazily_evaluate(addr, shape=self._shape) elif isinstance(self.base_value, VectorizedIterable): partial_shape = self._partial_shape(addr) if partial_shape: n = reduce(operator.mul, partial_shape) else: n = 1 base_val = self.base_value.next(n) # note that the array contents will depend on the order of access to elements if n == 1: base_val = base_val[0] elif partial_shape and base_val.shape != partial_shape: base_val = base_val.reshape(partial_shape) elif isinstance(self.base_value, collections.Iterator): raise NotImplementedError("coming soon...") else: raise ValueError("invalid base value for array (%s)" % self.base_value) return self._apply_operations(base_val, addr, simplify=simplify)
def evaluate(self, simplify=False, empty_val=0): """ Return the lazy array as a real NumPy array. If the array is homogeneous and ``simplify`` is ``True``, return a single numerical value. """ # need to catch the situation where a generator-based larray is evaluated a second time if self.is_homogeneous: if simplify: x = self.base_value else: x = self.base_value * numpy.ones(self._shape, dtype=self.dtype) elif isinstance(self.base_value, (int, long, numpy.integer, float, bool, numpy.bool_)): x = self.base_value * numpy.ones(self._shape, dtype=self.dtype) elif isinstance(self.base_value, numpy.ndarray): x = self.base_value elif callable(self.base_value): x = numpy.array(numpy.fromfunction(self.base_value, shape=self._shape, dtype=int), dtype=self.dtype) elif hasattr(self.base_value, "lazily_evaluate"): x = self.base_value.lazily_evaluate(shape=self._shape) elif isinstance(self.base_value, VectorizedIterable): x = self.base_value.next(self.size) if x.shape != self._shape: x = x.reshape(self._shape) elif have_scipy and sparse.issparse(self.base_value): # For sparse matrices if empty_val!=0: x = self.base_value.toarray((sparse.csc_matrix)) x = numpy.where(x, x, numpy.nan) else: x = self.base_value.toarray((sparse.csc_matrix)) elif isinstance(self.base_value, collections.Iterator): x = numpy.fromiter(self.base_value, dtype=self.dtype or float, count=self.size) if x.shape != self._shape: x = x.reshape(self._shape) else: raise ValueError("invalid base value for array") return self._apply_operations(x, simplify=simplify)
def test_Iterator(self): non_samples = [None, 42, 3.14, 1j, "".encode('ascii'), "", (), [], {}, set()] for x in non_samples: self.assertNotIsInstance(x, Iterator) self.assertFalse(issubclass(type(x), Iterator), repr(type(x))) samples = [iter(str()), iter(tuple()), iter(list()), iter(dict()), iter(set()), iter(frozenset()), iter(dict().keys()), iter(dict().items()), iter(dict().values()), (lambda: (yield))(), (x for x in []), ] for x in samples: self.assertIsInstance(x, Iterator) self.assertTrue(issubclass(type(x), Iterator), repr(type(x))) self.validate_abstract_methods(Iterator, 'next', '__iter__') # Issue 10565 class NextOnly: def __next__(self): yield 1 raise StopIteration self.assertNotIsInstance(NextOnly(), Iterator) class NextOnlyNew(object): def __next__(self): yield 1 raise StopIteration self.assertNotIsInstance(NextOnlyNew(), Iterator)
def test_registration(self): for B in Hashable, Iterable, Iterator, Sized, Container, Callable: class C: __metaclass__ = type __hash__ = None # Make sure it isn't hashable by default self.assertFalse(issubclass(C, B), B.__name__) B.register(C) self.assertTrue(issubclass(C, B))
def test_range(self): self.assertTrue(isinstance(range(0), Sequence)) self.assertTrue(isinstance(reversed(range(0)), Iterator))
def set_header_devpi_serial(response, tx): if isinstance(response._app_iter, collections.Iterator): return if tx.commit_serial is not None: serial = tx.commit_serial else: serial = tx.at_serial response.headers[str("X-DEVPI-SERIAL")] = str(serial)
def to_params(expr, **kwargs): if isinstance(expr, MesosDelayed): return expr._params if isinstance(expr, (Iterator, list, tuple, set)): params = [to_params(e) for e in expr] return flat_unique(params) if isinstance(expr, dict): params = [to_params(e) for e in expr.values()] return flat_unique(params) return []
def summary(x, axis=None, shorten=False): if isinstance(x, Iterator): x = list(x) if isinstance(x, (tuple, list)): x = np.array(x) mean, std = np.mean(x, axis=axis), np.std(x, axis=axis) median = np.median(x, axis=axis) qu1, qu3 = np.percentile(x, [25, 75], axis=axis) min_, max_ = np.min(x, axis=axis), np.max(x, axis=axis) samples = ', '.join([str(i) for i in np.random.choice(x.ravel(), size=8, replace=False).tolist()]) s = "" if not shorten: s += "***** Summary *****\n" s += " Min : %s\n" % str(min_) s += "1st Qu. : %s\n" % str(qu1) s += " Median : %s\n" % str(median) s += " Mean : %.8f\n" % mean s += "3rd Qu. : %s\n" % str(qu3) s += " Max : %s\n" % str(max_) s += "-------------------\n" s += " Std : %.8f\n" % std s += "#Samples : %d\n" % len(x) s += "Samples : %s\n" % samples else: s += "{#:%d|min:%s|qu1:%s|med:%s|mea:%.8f|qu3:%s|max:%s|std:%.8f}" %\ (len(x), str(min_), str(qu1), str(median), mean, str(qu3), str(max_), std) return s
def iter_chunk(it, n): """ Chunking an iterator into small chunk of size `n` Note: this can be used to slice data into mini batches """ if not isinstance(it, Iterator): it = iter(it) obj = list(islice(it, n)) while obj: yield obj obj = list(islice(it, n))
def flatten_list(x, level=None): """ Parameters ---------- level: int, or None how deep the function go into element of x to search for list and flatten it. If None is given, flatten all list found. Example ------- >>> l = [1, 2, 3, [4], [[5], [6]], [[7], [[8], [9]]]] >>> print(flatten_list(l, level=1)) >>> # [1, 2, 3, 4, [5], [6], [7], [[8], [9]]] >>> print(flatten_list(l, level=2)) >>> # [1, 2, 3, 4, 5, 6, 7, [8], [9]] >>> print(flatten_list(l, level=None)) >>> # [1, 2, 3, 4, 5, 6, 7, 8, 9] """ if isinstance(x, Iterator): x = list(x) if level is None: level = 10e8 if not isinstance(x, (tuple, list)): return [x] if any(isinstance(i, (tuple, list)) for i in x): _ = [] for i in x: if isinstance(i, (tuple, list)) and level > 0: _ += flatten_list(i, level - 1) else: _.append(i) return _ return x # =========================================================================== # Python # ===========================================================================
def _validate_texts(self, texts): """ Valiate the input to `fit` and `transform` """ if not isinstance(texts, Iterable) and \ not isinstance(texts, Iterator) and \ not is_string(texts): raise ValueError('texts must be an iterator, generator or a string.') if is_string(texts): texts = (texts,) # convert to unicode texts = (t.decode('utf-8') for t in texts) return texts # ==================== properties ==================== #
def test_iterators(self): self.assertIsInstance(django_load.iterload('menus'), Iterator) toolbars = get_cms_setting('TOOLBARS') self.assertIsInstance(django_load.iterload_objects(toolbars), Iterator)
def _streamResponse(self, data, client): if isinstance(data, collections.Iterator) or inspect.isgenerator(data): if config.ITER_STREAMING: if type(data) in self.__lazy_dict_iterator_types: raise errors.PyroError("won't serialize or stream lazy dict iterators, convert to list yourself") stream_id = str(uuid.uuid4()) self.streaming_responses[stream_id] = (client, time.time(), 0, data) return True, stream_id return True, None return False, data
def pysub(arg, line, num): """substitutes the return value of a python statement for an arg""" namespace.update(l=line) value = eval(arg, namespace) # return multiple args if the return value is a list, tuple or iterator if isinstance(value, (list, tuple, collections.Iterator)): return value return [str(value)]
def test_propclass_propertyNames(): p = Properties({"key": "value", "apple": "zebra", "foo": "bar"}) names = p.propertyNames() assert isinstance(names, collections.Iterator) assert sorted(names) == ["apple", "foo", "key"]