我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用theano.tensor.nnet.softmax()。
def set_inpt(self, inpt, inpt_dropout, mini_batch_size): self.inpt = inpt.reshape((mini_batch_size, self.n_in)) self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b) self.y_out = T.argmax(self.output, axis=1) # Predicted class self.inpt_dropout = dropout_layer( inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout) self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
def set_inpt(self, inpt, inpt_dropout, mini_batch_size): self.inpt = inpt.reshape((mini_batch_size, self.n_in)) self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b) self.y_out = T.argmax(self.output, axis=1) self.inpt_dropout = dropout_layer( inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout) self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
def set_inpt(self, inpt, mini_batch_size, timestep_n): #Reshape 3d to 2d so we can softmax correctly self.inpt = inpt.reshape((mini_batch_size*timestep_n, self.n_in)) #The wx+b changes our 2d input to be the correct output shape self.inpt = softmax(T.dot(self.inpt, self.w) + self.b) #Finally, now that we have the correct output shape, we #Convert back to 3d, making sure to use self.n_out, since this is the output #And it's already correctly shaped, just in 2d. self.output = self.inpt.reshape((mini_batch_size, timestep_n, self.n_out))
def set_inpt(self, inpt, mini_batch_size): self.inpt = inpt.reshape((mini_batch_size, self.n_in)) self.output = softmax(T.dot(self.inpt, self.w) + self.b) self.y_out = T.argmax(self.output, axis=1)
def build_prediction(self): # return NN.softmax(self.activation) #use this line to expose a slow subtensor # implementation return NN.sigmoid(self.activation)