我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用itertools.repeat()。
def _hash_comparison(self): """ Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef """ def hash_then_or(hash_name): # For now, all the decent hashes have 6-char names, so we can get # away with hard-coding space literals. return chain([hash_name], repeat(' or')) lines = [] for hash_name, expecteds in iteritems(self.allowed): prefix = hash_then_or(hash_name) lines.extend((' Expected %s %s' % (next(prefix), e)) for e in expecteds) lines.append(' Got %s\n' % self.gots[hash_name].hexdigest()) prefix = ' or' return '\n'.join(lines)
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary
def attach_pipeline(self, pipeline, name, chunksize=None): """ Register a pipeline to be computed at the start of each day. """ if self._pipelines: raise NotImplementedError("Multiple pipelines are not supported.") if chunksize is None: # Make the first chunk smaller to get more immediate results: # (one week, then every half year) chunks = iter(chain([5], repeat(126))) else: chunks = iter(repeat(int(chunksize))) self._pipelines[name] = pipeline, chunks # Return the pipeline to allow expressions like # p = attach_pipeline(Pipeline(), 'name') return pipeline
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.iteritems())) # Override dict methods where necessary
def _group_lengths(grouping): """Convert a localeconv-style grouping into a (possibly infinite) iterable of integers representing group lengths. """ # The result from localeconv()['grouping'], and the input to this # function, should be a list of integers in one of the # following three forms: # # (1) an empty list, or # (2) nonempty list of positive integers + [0] # (3) list of positive integers + [locale.CHAR_MAX], or from itertools import chain, repeat if not grouping: return [] elif grouping[-1] == 0 and len(grouping) >= 2: return chain(grouping[:-1], repeat(grouping[-2])) elif grouping[-1] == _locale.CHAR_MAX: return grouping[:-1] else: raise ValueError('unrecognised format for grouping')
def timeit(self, number=default_number): """Time 'number' executions of the main statement. To be precise, this executes the setup statement once, and then returns the time it takes to execute the main statement a number of times, as a float measured in seconds. The argument is the number of times through the loop, defaulting to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ if itertools: it = itertools.repeat(None, number) else: it = [None] * number gcold = gc.isenabled() gc.disable() timing = self.inner(it, self.timer) if gcold: gc.enable() return timing
def repeat(self, repeat=default_repeat, number=default_number): """Call timeit() a few times. This is a convenience function that calls the timeit() repeatedly, returning a list of results. The first argument specifies how many times to call timeit(), defaulting to 3; the second argument specifies the timer argument, defaulting to one million. Note: it's tempting to calculate mean and standard deviation from the result vector and report these. However, this is not very useful. In a typical case, the lowest value gives a lower bound for how fast your machine can run the given code snippet; higher values in the result vector are typically not caused by variability in Python's speed, but by other processes interfering with your timing accuracy. So the min() of the result is probably the only number you should be interested in. After that, you should look at the entire vector and apply common sense rather than statistics. """ r = [] for i in range(repeat): t = self.timeit(number) r.append(t) return r
def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont): bitDepth = safeEval(attrs['bitDepth']) metrics = SmallGlyphMetrics() metrics.width = safeEval(attrs['width']) metrics.height = safeEval(attrs['height']) # A dict for mapping from ASCII to binary. All characters are considered # a '1' except space, period and '0' which maps to '0'. binaryConv = {' ':'0', '.':'0', '0':'0'} dataRows = [] for element in content: if not isinstance(element, tuple): continue name, attr, content = element if name == 'row': mapParams = zip(attr['value'], itertools.repeat('1')) rowData = strjoin(itertools.starmap(binaryConv.get, mapParams)) dataRows.append(_binary2data(rowData)) bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True)
def for_genre(genre,num): pool = ThreadPool(multiprocessing.cpu_count()-1) nums = list(range(1,num)) results = pool.starmap(soupit,zip(nums,itertools.repeat(genre))) pool.close() pool.join() #build up the list of urls with the results of all the sub-processes that succeeded in a single list new_results = [] for j in results: if j: for i in j: new_results.append(i) pool = ThreadPool(multiprocessing.cpu_count()-1) pool.starmap(dwnld,zip(enumerate(new_results),itertools.repeat(genre))) pool.close pool.close()
def delete_lines(self, count=None): """Deletes the indicated # of lines, starting at line with cursor. As lines are deleted, lines displayed below cursor move up. Lines added to bottom of screen have spaces with same character attributes as last line moved up. :param int count: number of lines to delete. """ count = count or 1 top, bottom = self.margins # If cursor is outside scrolling margins it -- do nothin'. if top <= self.cursor.y <= bottom: # v -- +1 to include the bottom margin. for _ in range(min(bottom - self.cursor.y + 1, count)): self.buffer.pop(self.cursor.y) self.buffer.insert(bottom, list( repeat(self.cursor.attrs, self.columns))) self.carriage_return()
def matchPreviousLiteral(expr): """Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousLiteral(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches a previous literal, will also match the leading C{"1:1"} in C{"1:10"}. If this is not desired, use C{matchPreviousExpr}. Do *not* use with packrat parsing enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): if t: if len(t) == 1: rep << t[0] else: # flatten t tokens tflat = _flatten(t.asList()) rep << And( [ Literal(tt) for tt in tflat ] ) else: rep << Empty() expr.addParseAction(copyTokenToRepeater, callDuringTry=True) return rep
def matchPreviousExpr(expr): """Helper to define an expression that is indirectly defined from the tokens matched in a previous expression, that is, it looks for a 'repeat' of a previous expression. For example:: first = Word(nums) second = matchPreviousExpr(first) matchExpr = first + ":" + second will match C{"1:1"}, but not C{"1:2"}. Because this matches by expressions, will *not* match the leading C{"1:1"} in C{"1:10"}; the expressions are evaluated first, and then compared, so C{"1"} is compared with C{"10"}. Do *not* use with packrat parsing enabled. """ rep = Forward() e2 = expr.copy() rep <<= e2 def copyTokenToRepeater(s,l,t): matchTokens = _flatten(t.asList()) def mustMatchTheseTokens(s,l,t): theseTokens = _flatten(t.asList()) if theseTokens != matchTokens: raise ParseException("",0,"") rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) return rep
def eye(sites, ldim): """Returns a MPA representing the identity matrix :param sites: Number of sites :param ldim: Int-like local dimension or iterable of local dimensions :returns: Representation of the identity matrix as MPA >>> I = eye(4, 2) >>> I.ranks, I.shape ((1, 1, 1), ((2, 2), (2, 2), (2, 2), (2, 2))) >>> I = eye(3, (3, 4, 5)) >>> I.shape ((3, 3), (4, 4), (5, 5)) """ if isinstance(ldim, collections.Iterable): ldim = tuple(ldim) assert len(ldim) == sites else: ldim = it.repeat(ldim, sites) return mp.MPArray.from_kron(map(np.eye, ldim))
def axis_iter(self, axes=0): """Returns an iterator yielding Sub-MPArrays of ``self`` by iterating over the specified physical axes. **Example:** If ``self`` represents a bipartite (i.e. length 2) array with 2 physical dimensions on each site ``A[(k,l), (m,n)]``, ``self.axis_iter(0)`` is equivalent to:: (A[(k, :), (m, :)] for m in range(...) for k in range(...)) :param axes: Iterable or int specifiying the physical axes to iterate over (default 0 for each site) :returns: Iterator over :class:`.MPArray` """ if not isinstance(axes, collections.Iterable): axes = it.repeat(axes, len(self)) ltens_iter = it.product(*(iter(np.rollaxis(lten, i + 1)) for i, lten in zip(axes, self.lt))) return (MPArray(ltens) for ltens in ltens_iter) ########################## # Algebraic operations # ##########################
def reshape(self, newshapes): """Reshape physical legs in place. Use :py:attr:`~shape` to obtain the shape of the physical legs. :param newshapes: A single new shape or a list of new shape. Alternatively, you can pass 'prune' to get rid of all legs of dimension 1. :returns: Reshaped MPA .. todo:: Why is this here? What's wrong with the purne function? """ if newshapes == 'prune': newshapes = (tuple(s for s in pdim if s > 1) for pdim in self.shape) newshapes = tuple(newshapes) if not isinstance(newshapes[0], collections.Iterable): newshapes = it.repeat(newshapes, times=len(self)) ltens = [_local_reshape(lten, newshape) for lten, newshape in zip(self._lt, newshapes)] return MPArray(LocalTensors(ltens, cform=self.canonical_form))
def _get_povm(name, nr_sites, local_dim, local_width): if name == 'global': return povm.pauli_mpps(local_width, local_dim).repeat(nr_sites) elif name == 'splitpauli': return povm.pauli_mpps(local_width, local_dim).block(nr_sites) elif name == 'pauli': return povm.pauli_mpp(local_width, local_dim).block(nr_sites) elif name == "all-y": return povm.MPPovmList([povm.MPPovm.from_local_povm( povm.pauli_parts(local_dim)[1], nr_sites)]) elif name == "local-x": return povm.MPPovmList([ povm.MPPovm.from_local_povm( povm.pauli_parts(local_dim)[0], local_width) .embed(nr_sites, 0, local_dim) ]) else: raise ValueError('Unknown MP-POVM list {!r}'.format(name))
def timeit(self, number=default_number): """Time 'number' executions of the main statement. To be precise, this executes the setup statement once, and then returns the time it takes to execute the main statement a number of times, as a float measured in seconds. The argument is the number of times through the loop, defaulting to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ if itertools: it = itertools.repeat(None, number) else: it = [None] * number gcold = gc.isenabled() gc.disable() try: timing = self.inner(it, self.timer) finally: if gcold: gc.enable() return timing
def tearDown(self): if (self.src_before and self.optimizations and self.src_check != () ): if not isinstance(self.src_before, (list, tuple)): self.src_before = (self.src_before, ) if not isinstance(self.src_check, (list, tuple)): self.src_check = repeat(self.src_check, times=len(self.src_before)) if not isinstance(self.optimizations, (list, tuple)): self.optimizations = (self.optimizations, ) for src_before, src_check in zip(self.src_before, self.src_check): src_after = src_before for optimization in self.optimizations: src_after = self.optimize(src_after, optimization) self.assertEqual(src_check, src_after) else: self.assertTrue(False, msg='Not specified all the necessary parameters') self.src_before = () self.optimizations = () self.src_check = ()
def populate_obj(self, obj, name): values = getattr(obj, name, None) try: ivalues = iter(values) except TypeError: ivalues = iter([]) candidates = itertools.chain(ivalues, itertools.repeat(None)) _fake = type(str('_fake'), (object, ), {}) output = [] for field, data in izip(self.entries, candidates): fake_obj = _fake() fake_obj.data = data field.populate_obj(fake_obj, 'data') output.append(fake_obj.data) setattr(obj, name, output)
def make_subset(coocc_features, x_axis, y_axis): logsource = np.log(coocc_features.ix[x_axis][y_axis]+1) x_sorted = logsource.ix[logsource.sum(axis=1).sort_values(ascending=False).index] y_sorted = x_sorted.T.ix[x_sorted.T.sum(axis=1).sort_values(ascending=False).index] logsource = y_sorted.T.ix[:25, :25] n_cols = len(logsource.columns) n_rows = len(logsource.index) df = pd.DataFrame() df["x"] = list(itertools.chain.from_iterable(list(itertools.repeat(i, times=n_cols)) for i in logsource.index)) df["y"] = list(itertools.chain.from_iterable(list(itertools.repeat(logsource.stack().index.levels[1].values, times=n_rows)))) df["counts"] = logsource.stack().values df["raw"] = df["counts"].map(np.exp)-1 df.sort_values("counts", ascending=False, inplace=True) new_axis_factors = logsource.index.values.tolist() return df, new_axis_factors, new_axis_factors
def save_images(nifti_files, anat, roi_dict, out_dir, **kwargs): '''Saves multiple nifti images using multiprocessing. Uses `multiprocessing`. Args: nifti_files (list): list of nifti file paths. anat (nipy.core.api.image.image.Image): anatomical image. roi_dict (dict): dictionary of cluster dictionaries. out_dir (str): output directory path. **kwargs: extra keyword arguments. ''' p = mp.Pool(30) idx = [int(f.split('/')[-1].split('.')[0]) for f in nifti_files] args_iter = itertools.izip(nifti_files, itertools.repeat(anat), [roi_dict[i] for i in idx], [path.join(out_dir, '%d.png' % i) for i in idx], idx) p.map(save_helper, args_iter) p.close() p.join()
def restore_required_config_elements(config, renewalparams): """Sets non-plugin specific values in config from renewalparams :param configuration.NamespaceConfig config: configuration for the current lineage :param configobj.Section renewalparams: parameters from the renewal configuration file that defines this lineage """ required_items = itertools.chain( (("pref_challs", _restore_pref_challs),), six.moves.zip(BOOL_CONFIG_ITEMS, itertools.repeat(_restore_bool)), six.moves.zip(INT_CONFIG_ITEMS, itertools.repeat(_restore_int)), six.moves.zip(STR_CONFIG_ITEMS, itertools.repeat(_restore_str))) for item_name, restore_func in required_items: if item_name in renewalparams and not cli.set_by_cli(item_name): value = restore_func(item_name, renewalparams[item_name]) setattr(config, item_name, value)
def tokenize(readline): """ The tokenize() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternatively, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. """ # This import is here to avoid problems when the itertools module is not # built yet and tokenize is imported. from itertools import chain, repeat encoding, consumed = detect_encoding(readline) rl_gen = iter(readline, b"") empty = repeat(b"") return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def __iter__(self): r""" returns an iterator that yields tuple of all the available data for each frame called whenever code iterates over a tracklet object, e.g. in for translation, rotation, state, occlusion, truncation, amtOcclusion, amtBorders, absoluteFrameNumber in trackletObj: ...do something ... or trackDataIter = iter(trackletObj) """ if self.amtOccs is None: return zip(self.trans, self.rots, self.states, self.occs, self.truncs, \ itertools.repeat(None), itertools.repeat(None), range(self.firstFrame, self.firstFrame+self.nFrames)) else: return zip(self.trans, self.rots, self.states, self.occs, self.truncs, \ self.amtOccs, self.amtBorders, range(self.firstFrame, self.firstFrame+self.nFrames)) #end: class Tracklet
def main(): input_dir, output_dir = getDirs() table_list = listFiles(input_dir) concurrency = cpu_count() print 'Using {0:d} Processes'.format(concurrency) pool = Pool(concurrency) # perform the passed in write action (function) for each csv row time_capture = TimeCapture(time.time()) results = pool.map( multiprocess, izip(repeat(output_dir), [copy.deepcopy(time_capture) for i in range(len(table_list))], table_list, repeat(write))) time_capture.end(1) pool.close() pool.join() print 'Finished Successfully!' displayResults(results, time_capture.total_time)