我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用data.load()。
def restart_required(self): """Indicates whether splunkd is in a state that requires a restart. :return: A ``boolean`` that indicates whether a restart is required. """ response = self.get("messages").body.read() messages = data.load(response)['feed'] if 'entry' not in messages: result = False else: if isinstance(messages['entry'], dict): titles = [messages['entry']['title']] else: titles = [x['title'] for x in messages['entry']] result = 'restart_required' in titles return result
def _load_atom(response, match=None): return data.load(response.body.read(), match) # Load an array of atom entries from the body of the given response
def refresh(self, state=None): """Refreshes the state of this entity. If *state* is provided, load it as the new state for this entity. Otherwise, make a roundtrip to the server (by calling the :meth:`read` method of ``self``) to fetch an updated state, plus at most two additional round trips if the ``autologin`` field of :func:`connect` is set to ``True``. :param state: Entity-specific arguments (optional). :type state: ``dict`` :raises EntityDeletedException: Raised if the entity no longer exists on the server. **Example**:: import splunklib.client as client s = client.connect(...) search = s.apps['search'] search.refresh() """ if state is not None: self._state = state else: self._state = self.read(self.get()) return self
def __init__(self, mode=DatasetMode.small): self.optimizer = \ Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) self.model = Sequential() self.activation = 'softmax' self.loss = 'categorical_crossentropy' self.metrics = top_k_accuracy_func_list([50, 100, 200, 300, 400, 500]) early_stopping = EarlyStopping(monitor='val_loss', patience=3) self.callbacks = [early_stopping] ''' Index of songs in x_train(or test) starts from 1 because of zero padding. Index of songs in y_train(or test) starts from zero like song hash. For instance: In dataset, index of songA is 21. songA's index is 22 in x_train(or test) songA's index is 21 in y_train(or test). The goal is the neural network having the ability to ignore zero-paddings ''' (x_train, y_train), (x_test, y_test), songs = load(mode) self.max_length = max([len(playlist) for playlist in x_train]) self.song_hash = songs self.x_train = np.asarray(sequence.pad_sequences(x_train, maxlen=self.max_length), dtype="int64") self.y_train = to_categorical(y_train, len(self.song_hash) + 1) # Zero is included self.x_test = np.asarray(sequence.pad_sequences(x_test, maxlen=self.max_length), dtype="int64") self.y_test = to_categorical(y_test, len(self.song_hash) + 1) # Zero is included
def iter(self, offset=0, count=None, pagesize=None, **kwargs): """Iterates over the collection. This method is equivalent to the :meth:`list` method, but it returns an iterator and can load a certain number of entities at a time from the server. :param offset: The index of the first entity to return (optional). :type offset: ``integer`` :param count: The maximum number of entities to return (optional). :type count: ``integer`` :param pagesize: The number of entities to load (optional). :type pagesize: ``integer`` :param kwargs: Additional arguments (optional): - "search" (``string``): The search query to filter responses. - "sort_dir" (``string``): The direction to sort returned items: "asc" or "desc". - "sort_key" (``string``): The field to use for sorting (optional). - "sort_mode" (``string``): The collating sequence for sorting returned items: "auto", "alpha", "alpha_case", or "num". :type kwargs: ``dict`` **Example**:: import splunklib.client as client s = client.connect(...) for saved_search in s.saved_searches.iter(pagesize=10): # Loads 10 saved searches at a time from the # server. ... """ assert pagesize is None or pagesize > 0 if count is None: count = self.null_count fetched = 0 while count == self.null_count or fetched < count: response = self.get(count=pagesize or count, offset=offset, **kwargs) items = self._load_list(response) N = len(items) fetched += N for item in items: yield item if pagesize is None or N < pagesize: break offset += N logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs) # kwargs: count, offset, search, sort_dir, sort_key, sort_mode
def get_minibatch(file_name, batch_size, shuffle, with_pauses=False): dataset = data.load(file_name) if shuffle: np.random.shuffle(dataset) X_batch = [] Y_batch = [] if with_pauses: P_batch = [] if len(dataset) < batch_size: print "WARNING: Not enough samples in '%s'. Reduce mini-batch size to %d or use a dataset with at least %d words." % ( file_name, len(dataset), MINIBATCH_SIZE * data.MAX_SEQUENCE_LEN) for subsequence in dataset: X_batch.append(subsequence[0]) Y_batch.append(subsequence[1]) if with_pauses: P_batch.append(subsequence[2]) if len(X_batch) == batch_size: # Transpose, because the model assumes the first axis is time X = np.array(X_batch, dtype=np.int32).T Y = np.array(Y_batch, dtype=np.int32).T if with_pauses: P = np.array(P_batch, dtype=theano.config.floatX).T if with_pauses: yield X, Y, P else: yield X, Y X_batch = [] Y_batch = [] if with_pauses: P_batch = []