我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用collections.Iterable()。
def default(self, obj): """default method.""" if hasattr(obj, '__json__'): return obj.__json__() elif isinstance(obj, collections.Iterable): return list(obj) elif isinstance(obj, datetime): return obj.isoformat() elif hasattr(obj, '__getitem__') and hasattr(obj, 'keys'): return dict(obj) elif hasattr(obj, '__dict__'): return {member: getattr(obj, member) for member in dir(obj) if not member.startswith('_') and not hasattr(getattr(obj, member), '__call__')} return json.JSONEncoder.default(self, obj)
def __init__( self, exprs, savelist = False ): super(ParseExpression,self).__init__(savelist) if isinstance( exprs, _generatorType ): exprs = list(exprs) if isinstance( exprs, basestring ): self.exprs = [ ParserElement._literalStringClass( exprs ) ] elif isinstance( exprs, collections.Iterable ): exprs = list(exprs) # if sequence of strings provided, wrap with Literal if all(isinstance(expr, basestring) for expr in exprs): exprs = map(ParserElement._literalStringClass, exprs) self.exprs = list(exprs) else: try: self.exprs = list( exprs ) except TypeError: self.exprs = [ exprs ] self.callPreparse = False
def query_or(cls, query, *values_list, **annotations): pop_annotations = False if 'pop_annotations' in annotations: pop_annotations = annotations['pop_annotations'] annotations.pop('pop_annotations') annotated_keys = annotations.values() annotations = {key: F(value) for key, value in annotations.items()} if isinstance(query, Iterable): query = reduce(or_, query) result = cls.objects.filter(query).values(*values_list).annotate(**annotations) if pop_annotations: for querydict in result: for value in annotated_keys: querydict.pop(value) return result # tipos de impuestos aplicables a los productos
def to_players(self, *players): """ Set the destination of the chat message. :param players: Player instance(s) or player login string(s). Can be a list, or a single entry. :return: Self reference. :rtype: pyplanet.contrib.chat.query.ChatQuery """ # Unpack list in unpacked list if given. if len(players) == 1 and isinstance(players[0], collections.Iterable): players = players[0] # Replace logins. if isinstance(players, Player): self._logins = set() self._logins.add(players.login) elif isinstance(players, str): self._logins = set() self._logins.add(players) elif isinstance(players, collections.Iterable) and isinstance(players, collections.Sized): self._logins = set() self.add_to(players) return self
def add_to(self, *players): """ Add new recipient to the to list. :param players: Player login string(s) or player instance(s). :return: Self reference. :rtype: pyplanet.contrib.chat.query.ChatQuery """ # Unpack list in unpacked list if given. if len(players) == 1 and isinstance(players[0], collections.Iterable): players = players[0] # Check if we already have login lists. if not isinstance(self._logins, set): self._logins = set() for obj in players: if isinstance(obj, Player): self._logins.add(obj.login) elif isinstance(obj, str): self._logins.add(obj) return self
def parseXML(xmlSnippet): """Parses a snippet of XML. Input can be either a single string (unicode or UTF-8 bytes), or a a sequence of strings. The result is in the same format that would be returned by XMLReader, but the parser imposes no constraints on the root element so it can be called on small snippets of TTX files. """ # To support snippets with multiple elements, we add a fake root. reader = TestXMLReader_() xml = b"<root>" if isinstance(xmlSnippet, bytes): xml += xmlSnippet elif isinstance(xmlSnippet, unicode): xml += tobytes(xmlSnippet, 'utf-8') elif isinstance(xmlSnippet, collections.Iterable): xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet) else: raise TypeError("expected string or sequence of strings; found %r" % type(xmlSnippet).__name__) xml += b"</root>" reader.parser.Parse(xml, 0) return reader.root[2]
def eye(sites, ldim): """Returns a MPA representing the identity matrix :param sites: Number of sites :param ldim: Int-like local dimension or iterable of local dimensions :returns: Representation of the identity matrix as MPA >>> I = eye(4, 2) >>> I.ranks, I.shape ((1, 1, 1), ((2, 2), (2, 2), (2, 2), (2, 2))) >>> I = eye(3, (3, 4, 5)) >>> I.shape ((3, 3), (4, 4), (5, 5)) """ if isinstance(ldim, collections.Iterable): ldim = tuple(ldim) assert len(ldim) == sites else: ldim = it.repeat(ldim, sites) return mp.MPArray.from_kron(map(np.eye, ldim))
def axis_iter(self, axes=0): """Returns an iterator yielding Sub-MPArrays of ``self`` by iterating over the specified physical axes. **Example:** If ``self`` represents a bipartite (i.e. length 2) array with 2 physical dimensions on each site ``A[(k,l), (m,n)]``, ``self.axis_iter(0)`` is equivalent to:: (A[(k, :), (m, :)] for m in range(...) for k in range(...)) :param axes: Iterable or int specifiying the physical axes to iterate over (default 0 for each site) :returns: Iterator over :class:`.MPArray` """ if not isinstance(axes, collections.Iterable): axes = it.repeat(axes, len(self)) ltens_iter = it.product(*(iter(np.rollaxis(lten, i + 1)) for i, lten in zip(axes, self.lt))) return (MPArray(ltens) for ltens in ltens_iter) ########################## # Algebraic operations # ##########################
def reshape(self, newshapes): """Reshape physical legs in place. Use :py:attr:`~shape` to obtain the shape of the physical legs. :param newshapes: A single new shape or a list of new shape. Alternatively, you can pass 'prune' to get rid of all legs of dimension 1. :returns: Reshaped MPA .. todo:: Why is this here? What's wrong with the purne function? """ if newshapes == 'prune': newshapes = (tuple(s for s in pdim if s > 1) for pdim in self.shape) newshapes = tuple(newshapes) if not isinstance(newshapes[0], collections.Iterable): newshapes = it.repeat(newshapes, times=len(self)) ltens = [_local_reshape(lten, newshape) for lten, newshape in zip(self._lt, newshapes)] return MPArray(LocalTensors(ltens, cform=self.canonical_form))
def default_collate(batch): "Puts each data field into a tensor with outer dimension batch size" if torch.is_tensor(batch[0]): return torch.cat([t.view(1, *t.size()) for t in batch], 0) elif isinstance(batch[0], int): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], str): return batch elif isinstance(batch[0], collections.Iterable): # if each batch element is not a tensor, then it should be a tuple # of tensors; in that case we collate each element in the tuple transposed = zip(*batch) return [default_collate(samples) for samples in transposed] raise TypeError(("batch must contain tensors, numbers, or lists; found {}" .format(type(batch[0]))))
def _encode_params(**kw): ''' do url-encode parameters >>> _encode_params(a=1, b='R&D') 'a=1&b=R%26D' >>> _encode_params(a=u'\u4e2d\u6587', b=['A', 'B', 123]) 'a=%E4%B8%AD%E6%96%87&b=A&b=B&b=123' ''' args = [] for k, v in kw.iteritems(): if isinstance(v, basestring): qv = v.encode('utf-8') if isinstance(v, unicode) else v args.append('%s=%s' % (k, urllib.quote(qv))) elif isinstance(v, collections.Iterable): for i in v: qv = i.encode('utf-8') if isinstance(i, unicode) else str(i) args.append('%s=%s' % (k, urllib.quote(qv))) else: qv = str(v) args.append('%s=%s' % (k, urllib.quote(qv))) return '&'.join(args)
def json2xlsx(xlsx_path,json_path): wb=Workbook() ws1=wb.active ws1.title=xlsx_path.split('.')[0].split('\\')[-1] with open(json_path) as f: fulljson=f.read() j=json.loads(fulljson,object_pairs_hook=OrderedDict)#object_pairs_hook ??json??? for row,row_val in enumerate(j): _ = ws1.cell(row=row+1,column=1,value=row_val) if isinstance(j[row_val],Iterable) and not isinstance(j[row_val],str):#????????string?? for col,col_val in enumerate(j[row_val]): _= ws1.cell(row=row+1,column=col+2,value=col_val) else: _ = ws1.cell(row=row+1,column=2,value=j[row_val]) wb.save(xlsx_path)
def sort_sections(self, order): """ Sort sections according to the section names in the order list. All remaining sections are added to the end in their original order :param order: Iterable of section names :return: """ order_lc = [e.lower() for e in order] sections = OrderedDict( (k,self.sections[k]) for k in order_lc if k in self.sections) sections.update( (k,self.sections[k]) for k in self.sections.keys() if k not in order_lc) assert len(self.sections) == len(sections) self.sections = sections
def _metaclass_check_getitem_type_decl(cls, type_decl): # Callable[[T, ...], T] if not check_getitem_tuple(type_decl, 2): return False # return type. if nontype_object(type_decl[1]): return False # types of parameters. if isinstance(type_decl[0], abc.Iterable): # [T, ...] for T in type_decl[0]: if nontype_object(T): return False else: return True else: # special case, Ellipsis. return type_decl[0] is Ellipsis
def find(self, **kwargs): """ A helper method for navigating lists of dicts on the page. The kwargs parameter is used to pass requirements for matching the nested dictionary keys. All key-values must match. Args: kwargs - matching requirements Returns: An APIElement matching the filter or None if nothing matched """ if not isinstance(self.data, collections.Iterable): raise CRESTException('Can not iterate on an ' + str(type(self.data))) for element in self.data: if all(element[key] == value for key, value in kwargs.items()): if type(element) in (dict, list): return APIElement(self.url, element, self._preston) return element return None
def on_epoch_end(self, epoch, logs={}): def handle_value(k): is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0 if isinstance(k, Iterable) and not is_zero_dim_ndarray: return '"[%s]"' % (', '.join(map(lambda x: str(x), k))) else: return k if not self.writer: self.keys = sorted(logs.keys()) self.writer = csv.DictWriter(self.csv_file, fieldnames=['epoch'] + self.keys) if self.append_header: self.writer.writeheader() row_dict = OrderedDict({'epoch': epoch}) row_dict.update((key, handle_value(logs[key])) for key in self.keys) self.writer.writerow(row_dict) self.csv_file.flush()
def subscribe(id_or_symbols): """ ???????????????????????????handle_bar???bar?????? ?????????????????????????????handle_bar?????? :param id_or_ins: ??? :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] """ current_universe = Environment.get_instance().universe if isinstance(id_or_symbols, six.string_types): order_book_id = instruments(id_or_symbols).order_book_id current_universe.add(order_book_id) elif isinstance(id_or_symbols, Instrument): current_universe.add(id_or_symbols.order_book_id) elif isinstance(id_or_symbols, Iterable): for item in id_or_symbols: current_universe.add(assure_order_book_id(item)) else: raise RQInvalidArgument(_("unsupported order_book_id type")) verify_that('id_or_symbols')._are_valid_instruments("subscribe", id_or_symbols) Environment.get_instance().update_universe(current_universe)
def unsubscribe(id_or_symbols): """ ????????????????????????????????????????????????? :param id_or_ins: ??? :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] """ current_universe = Environment.get_instance().universe if isinstance(id_or_symbols, six.string_types): order_book_id = instruments(id_or_symbols).order_book_id current_universe.discard(order_book_id) elif isinstance(id_or_symbols, Instrument): current_universe.discard(id_or_symbols.order_book_id) elif isinstance(id_or_symbols, Iterable): for item in id_or_symbols: i = assure_order_book_id(item) current_universe.discard(i) else: raise RQInvalidArgument(_("unsupported order_book_id type")) Environment.get_instance().update_universe(current_universe)
def subscribe(id_or_symbols): """ ???????????????????????????handle_bar???bar?????? ?????????????????????????????handle_bar?????? :param id_or_ins: ??? :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] """ current_universe = Environment.get_instance().get_universe() if isinstance(id_or_symbols, six.string_types): order_book_id = instruments(id_or_symbols).order_book_id current_universe.add(order_book_id) elif isinstance(id_or_symbols, Instrument): current_universe.add(id_or_symbols.order_book_id) elif isinstance(id_or_symbols, Iterable): for item in id_or_symbols: current_universe.add(assure_order_book_id(item)) else: raise RQInvalidArgument(_(u"unsupported order_book_id type")) verify_that('id_or_symbols')._are_valid_instruments("subscribe", id_or_symbols) Environment.get_instance().update_universe(current_universe)
def unsubscribe(id_or_symbols): """ ????????????????????????????????????????????????? :param id_or_ins: ??? :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] """ current_universe = Environment.get_instance().get_universe() if isinstance(id_or_symbols, six.string_types): order_book_id = instruments(id_or_symbols).order_book_id current_universe.discard(order_book_id) elif isinstance(id_or_symbols, Instrument): current_universe.discard(id_or_symbols.order_book_id) elif isinstance(id_or_symbols, Iterable): for item in id_or_symbols: i = assure_order_book_id(item) current_universe.discard(i) else: raise RQInvalidArgument(_(u"unsupported order_book_id type")) Environment.get_instance().update_universe(current_universe)
def _limit_and_df(self, query, limit, as_df=False): """adds a limit (limit==None := no limit) to any query and allow a return as pandas.DataFrame :param bool as_df: if is set to True results return as pandas.DataFrame :param `sqlalchemy.orm.query.Query` query: SQL Alchemy query :param int or tuple[int] limit: maximum number of results :return: query result of pyuniprot.manager.models.XY objects """ if limit: if isinstance(limit, int): query = query.limit(limit) if isinstance(limit, Iterable) and len(limit) == 2 and [int, int] == [type(x) for x in limit]: page, page_size = limit query = query.limit(page_size) query = query.offset(page * page_size) if as_df: results = read_sql(query.statement, self.engine) else: results = query.all() return results
def _one_to_many_query(cls, query_obj, search4, model_attrib): """extends and returns a SQLAlchemy query object to allow one-to-many queries :param query_obj: SQL Alchemy query object :param str search4: search string :param model_attrib: attribute in model """ model = model_attrib.parent.class_ if isinstance(search4, str): query_obj = query_obj.join(model).filter(model_attrib.like(search4)) elif isinstance(search4, int): query_obj = query_obj.join(model).filter(model_attrib == search4) elif isinstance(search4, Iterable): query_obj = query_obj.join(model).filter(model_attrib.in_(search4)) return query_obj
def loadSpeImg(self, index): """ return a list of images' data """ from collections import Iterable if isinstance(index, Iterable): index = list(index) else: try: index = [int(index)] except: # get all images print("Warning: invalid image index", index, ". Fetch all available images") index = list(range(self._img_count)) datas = {} fmt = str(self._xdim * self._ydim) + self._datatype for i in index: self._fileObj.seek(SPE.SPE_DATA_OFFSET + i * self._img_size) data = self._fileObj.read(self._img_size) datas[i] = np.array( struct.unpack(fmt, data), dtype = self._ndtype ).reshape(self._ydim, self._xdim) return datas
def possibleErrors(self, data): if self.parent is None: raise ValueError('cannot call this method without valid parent object') if isinstance(data, str): data=[data] if isinstance(data, collections.Iterable): del self.errorRefs[:] for name in data: found=False for error in self.parent.applicationErrors: if error.name == name: self.errorRefs.append(error.ref) found=True break if found==False: raise ValueError('invalid error name: "%s"'%name) else: raise ValueError("input argument must be string or iterrable")
def __init__(self,name, portInterfaceRef, comspec=None, parent=None): self.name = name if portInterfaceRef is not None and not isinstance(portInterfaceRef,str): raise ValueError('portInterfaceRef needs to be of type None or str') self.portInterfaceRef = portInterfaceRef self.comspec=[] self.parent=parent if comspec is not None: ws = self.rootWS() assert(ws is not None) if isinstance(comspec, collections.Mapping): comspecObj = self.createComSpecFromDict(ws,portInterfaceRef,comspec) if comspecObj is None: raise ValueError('failed to create comspec from comspec data: '+repr(comspec)) self.comspec.append(comspecObj) elif isinstance(comspec, collections.Iterable): for data in comspec: comspecObj = self.createComSpecFromDict(ws,portInterfaceRef,data) if comspecObj is None: raise ValueError('failed to create comspec from comspec data: '+repr(data)) self.comspec.append(comspecObj) else: raise NotImplementedError("not supported")
def createClientServerInterface(self, name, operations, errors=None, isService=False, adminData=None): """ creates a new client server interface in current package name: name of the interface (string) operations: names of the operations in the interface (list of strings) errors: possible errors dict containing key-value pair where key is the name and value is the error code (must be integer) isService: True if this interface is a service interface (bool) adminData: optional admindata (dict or autosar.base.AdminData object) """ portInterface = autosar.portinterface.ClientServerInterface(name, isService, self, adminData) for name in operations: portInterface.append(autosar.portinterface.Operation(name)) if errors is not None: if isinstance(errors, collections.Iterable): for error in errors: portInterface.append(error) else: assert( isinstance(errors, autosar.portinterface.ApplicationError)) portInterface.append(errors) self.append(portInterface) return portInterface
def _serialize_graph(ops): """ Serializes a graph and returns the actual protobuf python object (rather than serialized byte string as done by `serialize_graph`). """ assert isinstance(ops, Iterable), "Ops passed into `serialize_graph` must be an iterable" ops = Op.all_op_references(ops) pb_ops = [] pb_edges = [] for op in ops: pb_ops.append(op_to_protobuf(op)) add_edges(pb_edges, pb_ops, op) graph_def = ops_pb.GraphDef() for edge in pb_edges: temp = graph_def.edges.add() temp.CopyFrom(edge) for op in pb_ops: temp = graph_def.ops.add() temp.CopyFrom(op) return graph_def
def _reduce_nested(elem, agg, func): """ Reduces a nested sequence by applying a function to each of its elements and returns an aggregation. Arguments: elem: The object to be reduced, either a sequence or a singleton. agg: A variable holding information collected as the sequence is collapsed. func: A function to augment the aggregate by processing a singleton. Should have the form func(agg, elem) -> agg Returns: agg: The final aggregate returned by the function. """ if isinstance(elem, collections.Iterable): for sub in elem: agg = _reduce_nested(sub, agg, func) return agg else: return func(agg, elem)