Python utils 模块,sample() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用utils.sample()

项目:Tensorflow-Softmax-NER-RNNLM    作者:queue-han    | 项目源码 | 文件源码
def generate_text(session, model, config, starting_text='<eos>',
                  stop_length=100, stop_tokens=None, temp=1.0):
  """Generate text from the model.

  Hint: Create a feed-dictionary and use sess.run() to execute the model. Note
        that you will need to use model.initial_state as a key to feed_dict
  Hint: Fetch model.final_state and model.predictions[-1]. (You set
        model.final_state in add_model() and model.predictions is set in
        __init__)
  Hint: Store the outputs of running the model in local variables state and
        y_pred (used in the pre-implemented parts of this function.)

  Args:
    session: tf.Session() object
    model: Object of type RNNLM_Model
    config: A Config() object
    starting_text: Initial text passed to model.
  Returns:
    output: List of word idxs
  """
  state = model.initial_state.eval()
  # Imagine tokens as a batch size of one, length of len(tokens[0])
  tokens = [model.vocab.encode(word) for word in starting_text.split()]
  for i in xrange(stop_length):
    ### YOUR CODE HERE
    raise NotImplementedError
    ### END YOUR CODE
    next_word_idx = sample(y_pred[0], temperature=temp)
    tokens.append(next_word_idx)
    if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:
      break
  output = [model.vocab.decode(word_idx) for word_idx in tokens]
  return output
项目:CNN-LSTM-Caption-Generator    作者:mosessoh    | 项目源码 | 文件源码
def generate_caption(self, session, img_feature,toSample=False):
        dp = 1
        img_template = np.zeros([self.config.batch_size, self.config.img_dim])
        img_template[0,:] = img_feature

        sent_pred = np.ones([self.config.batch_size, 1])*3591 # <SOS>
        while sent_pred[0,-1] != 3339 and (sent_pred.shape[1] - 1) < 50:
            feed = {self._sent_placeholder: sent_pred,
                    self._img_placeholder: img_template,
                    self._targets_placeholder: np.ones([self.config.batch_size,1]), # dummy variable
                    self._dropout_placeholder: dp}

            idx_next_pred = np.arange(1, self.config.batch_size + 1)*(sent_pred.shape[1] + 1) - 1

            if toSample:
                logits = session.run(self.logits, feed_dict=feed)
                next_logits = logits[idx_next_pred,:]
                raw_predicted = []
                for row_idx in range(next_logits.shape[0]):
                    idx = sample(next_logits[row_idx,:])
                    raw_predicted.append(idx)
                raw_predicted = np.array(raw_predicted)
            else:
                raw_predicted = session.run(self._predictions, feed_dict=feed)
                raw_predicted = raw_predicted[idx_next_pred]

            next_pred = np.reshape(raw_predicted, (self.config.batch_size,1))
            sent_pred = np.concatenate([sent_pred, next_pred], 1)

        predicted_sentence = ' '.join(self.index2token[idx] for idx in sent_pred[0,1:-1])
        return predicted_sentence
项目:PhilosophyLSTM    作者:guilherme-pombo    | 项目源码 | 文件源码
def generate_sentences(self, len_sentences=60, load=True):
        """
        Generate sentences given a trained LSTM model
        :param len_sentences: the length of the sentences to be generated
        :param load: whether to load in a model, or to use the trained one, only works if you ran train_model before
        :return:
        """

        model = lstm_model.create_model(word_coding)

        if load:
            model.load_weights('lstm-weights')

        else:
            if not ModelTrainer.model_trained:
                raise Exception("The model hasn't been trained. Either train it or load in the weights from a file")
            model = self.model

        seedSrc = i_D
        outSentences = []
        while len(outSentences) < len_sentences:
            start_index = random.randint(0, len(seedSrc) - 1)
            sentence = seedSrc[start_index: start_index + 1]

            sentOutput = ''

            for iteration in range(500):
                vecsentence = []
                for vcode in sentence[0]:
                    vecsentence.append(self.vmodel[coded_word[vcode]])
                vecsentence = np.reshape(vecsentence, (1, len(vecsentence), 300))
                preds = model.predict({'input': vecsentence}, verbose=0)['output1'][0]
                next_index = sample(preds, 0.2)
                if next_index in coded_word:
                    next_char = coded_word[next_index]
                    sentence = np.append(sentence[0][1:], [next_index]).reshape(np.asarray(sentence).shape)
                    sentOutput += next_char + ' '
            print(sentOutput)
项目:neural_composer    作者:zhebrak    | 项目源码 | 文件源码
def compose_async(song_key):
    model = get_model()

    while True:
        diversity = random.uniform(0.7, 1.0)
        sentence = '#' * MEMORY_LENGTH + 'X:'
        sentence = sentence[-MEMORY_LENGTH:]
        generated = 'X:'

        while True:
            x = np.zeros((1, MEMORY_LENGTH, len(model.chars)))
            for t, char in enumerate(sentence):
                x[0, t, model.char_indices[char]] = 1.

            preds = model.predict(x, verbose=0)[0]
            next_index = utils.sample(preds, diversity)
            next_char = model.indices_char[next_index]

            sentence = sentence[-MEMORY_LENGTH + 1:] + next_char
            generated += next_char

            if generated.endswith('$$$'):
                try:
                    song = Song.objects.get(key=song_key)
                    song.song = generated.rstrip('$')
                    song.save()

                    writer.write(song_key)
                except WriterException:
                    break
                else:
                    return

            if len(generated) > MAX_SONG_LENGTH:
                break
项目:MyTwitterBot    作者:felipessalvatore    | 项目源码 | 文件源码
def generate_text(session, model, config, starting_text='<eos>',
                  stop_length=100, stop_tokens=None, temp=1.0):
    """
    This function uses the model to generate a sentence
    starting with the token(s) "starting_text".
    The generated sentence has at most "stop_length" tokens.
    If you use the list "stop_tokens", the sentence will end at any
    word of that list.

    :type session: tf Session
    :type model: RNNLanguageModel
    :type config: Config
    :type starting_text: str
    :type stop_lenght: int
    :type stop_tokens: None or list of str
    :type temp: float
    :rtype : list of str
    """
    state = session.run(model.initial_state)
    tokens = [model.vocab.encode(word) for word in starting_text.split()]
    for i in range(stop_length):
        feed = {model.input_placeholder: [[tokens[-1]]],
                model.initial_state: state,
                model.dropout_placeholder: 1.0}
        state, y_pred = session.run([model.final_state,
                                     model.predictions[-1]],
                                    feed_dict=feed)
        next_word_idx = sample(y_pred[0], temperature=temp)
        tokens.append(next_word_idx)
        if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:
            break
    output = [model.vocab.decode(word_idx) for word_idx in tokens]
    return output
项目:Named-Entity-Recognition    作者:AliceDudu    | 项目源码 | 文件源码
def generate_text(session, model, config, starting_text='<eos>',
                  stop_length=100, stop_tokens=None, temp=1.0):
  """Generate text from the model.

  Hint: Create a feed-dictionary and use sess.run() to execute the model. Note
        that you will need to use model.initial_state as a key to feed_dict
  Hint: Fetch model.final_state and model.predictions[-1]. (You set
        model.final_state in add_model() and model.predictions is set in
        __init__)
  Hint: Store the outputs of running the model in local variables state and
        y_pred (used in the pre-implemented parts of this function.)

  Args:
    session: tf.Session() object
    model: Object of type RNNLM_Model
    config: A Config() object
    starting_text: Initial text passed to model.                                    # ??text???List of word idxs??generate_text
  Returns:
    output: List of word idxs
  """
  state = model.initial_state.eval()
  # Imagine tokens as a batch size of one, length of len(tokens[0])
  tokens = [model.vocab.encode(word) for word in starting_text.split()]             # ???text???word?????one hot??
  for i in xrange(stop_length):
    ### YOUR CODE HERE
    feed = {model.input_placeholder: [tokens[-1:]],
            model.initial_state: state,
            model.dropout_placeholder: 1}
    state, y_pred = session.run(
        [model.final_state, model.predictions[-1]], feed_dict=feed)                 # ?model??????state, y_pred
    ### END YOUR CODE
    next_word_idx = sample(y_pred[0], temperature=temp)                             # ????????????idx
    tokens.append(next_word_idx)
    if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:
      break
  output = [model.vocab.decode(word_idx) for word_idx in tokens]                    # ?tokens??one hot?????word
  return output
项目:cs224d    作者:kkihara    | 项目源码 | 文件源码
def generate_text(session, model, config, starting_text='<eos>',
                  stop_length=100, stop_tokens=None, temp=1.0):
  """Generate text from the model.

  Hint: Create a feed-dictionary and use sess.run() to execute the model. Note
        that you will need to use model.initial_state as a key to feed_dict
  Hint: Fetch model.final_state and model.predictions[-1]. (You set
        model.final_state in add_model() and model.predictions is set in
        __init__)
  Hint: Store the outputs of running the model in local variables state and
        y_pred (used in the pre-implemented parts of this function.)

  Args:
    session: tf.Session() object
    model: Object of type RNNLM_Model
    config: A Config() object
    starting_text: Initial text passed to model.
  Returns:
    output: List of word idxs
  """
  state = model.initial_state.eval()
  # Imagine tokens as a batch size of one, length of len(tokens[0])
  tokens = [model.vocab.encode(word) for word in starting_text.split()]
  for i in xrange(stop_length):
    ### YOUR CODE HERE
    feed_dict = {model.input_placeholder: tokens,
                 model.initial_state: state,
                 model.dropout_placeholder: 1.}
    state, y_pred = session.run([model.final_state, model.predictions[-1]],
                                feed_dict=feed_dict)
    ### END YOUR CODE
    next_word_idx = sample(y_pred[0], temperature=temp)
    tokens.append(next_word_idx)
    if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:
      break
  output = [model.vocab.decode(word_idx) for word_idx in tokens]
  return output
项目:MyTwitterBot    作者:felipessalvatore    | 项目源码 | 文件源码
def __generate_tweet_no_unk__(self,
                                  session,
                                  model,
                                  config,
                                  starting_text='<eos>',
                                  stop_tokens=None,
                                  temp=1.0,
                                  CharSize=140):
        """
        Private method to generate a sentence.
        The sentence will have at maximun 140 characters (a tweet).
        We use the list of all noums from
        the vocav to eliminate all unk tokens that may occur.

        :type session: tf Session
        :type model: RNNLanguageModel
        :type config: Config
        :type starting_text: str
        :type stop_tokens: None or list of str
        :type temp: float
        :rtype : list of str
        """
        vocab = self.dataholder.vocab
        state = session.run(model.initial_state)
        tweet = starting_text.split()
        tweet_as_str = starting_text
        tokens = [vocab.encode(word) for word in starting_text.split()]
        while True:
            feed = {model.input_placeholder: [[tokens[-1]]],
                    model.initial_state: state,
                    model.dropout_placeholder: 1.0}
            state, y_pred = session.run([model.final_state,
                                         model.predictions[-1]],
                                        feed_dict=feed)
            next_word_idx = sample(y_pred[0], temperature=temp)
            condit1 = vocab.decode(next_word_idx) == self.dataholder.unk_token
            condit2 = vocab.decode(next_word_idx) in self.black_list
            if condit1 or condit2:
                choice = np.random.choice(len(self.dataholder.all_noums), 1)[0]
                next_word = self.dataholder.all_noums[choice]
            else:
                next_word = vocab.decode(next_word_idx)
            before_next_word = copy(tweet)
            tokens.append(next_word_idx)
            tweet.append(next_word)
            tweet_as_str = " ".join(tweet)
            if len(tweet_as_str) == CharSize:
                break
            if not TweetValid(tweet_as_str, CharNumber=CharSize):
                tweet = copy(before_next_word)
                break
            if stop_tokens and vocab.decode(tokens[-1]) in stop_tokens:
                break
        return tweet