Python theano.tensor.shared_randomstreams 模块,RandomStreams() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.shared_randomstreams.RandomStreams()

项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def __init__(self,
                 input,
                 nvis,
                 nhid,
                 rnd=None,
                 theano_rng=None,
                 bhid=None,
                 cost_type=CostType.CrossEntropy,
                 bvis=None):
        super(SparseAutoencoder, self).__init__(
            input=input,
            nvis=nvis,
            nhid=nhid,
            rnd=rnd,
            bhid=bhid,
            cost_type=cost_type,
            bvis=bvis)
        if not theano_rng:
            theano_rng = RandomStreams(rnd.randint(2 ** 30))
        self.theano_rng = theano_rng
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def reset(self):
        # Set Original ordering
        self.ordering.set_value(np.arange(self._input_size, dtype=theano.config.floatX))

        # Reset RandomStreams
        self._rng.seed(self._random_seed)

        # Initial layer connectivity
        self.layers_connectivity[0].set_value((self.ordering + 1).eval())
        for i in range(1, len(self.layers_connectivity)-1):
            self.layers_connectivity[i].set_value(np.zeros((self._hidden_sizes[i-1]), dtype=theano.config.floatX))
        self.layers_connectivity[-1].set_value(self.ordering.get_value())

        # Reset MRG_RandomStreams (GPU)
        self._mrng.rstate = self._initial_mrng_rstate
        for state, value in zip(self._mrng.state_updates, self._initial_mrng_state_updates):
            state[0].set_value(value)

        self.sample_connectivity()
项目:MIDS    作者:freegraphics    | 项目源码 | 文件源码
def main(load_id):
        consts = Consts()
        consts.load_from_ids = load_id
        rng = numpy.random.RandomState()
        theano_rng = RandomStreams(rng.randint(2 ** 30))
        user_lines = UserLines(rng = rng,theano_rng = theano_rng,consts = consts)
        rating_info = numpy.zeros(1,dtype=theano.config.floatX)
        wday = 4 # friday
        rating_info[0] = get_aranged(value = wday, min_value = 0, max_value = 6)
        #user_id = user_lines.rng.randint(low=0,high=user_lines.matrix_ids.users_count)
        #user_ids = user_lines.__find_nearest(user_id,5)
        user_indices = [user_lines.rng.randint(low=0,high=len(user_lines.users_cvs)-1) for it in numpy.arange(5)]
        user_ids = [user_lines.users_cvs.at[indice,"id"] for indice in user_indices]
        #user_lines.build_line_for_rand_user(rating_info = rating_info, user_ids = user_ids, consts = consts)
        user_lines.build_rate_for_rand_user(rating_info = rating_info, user_ids = user_ids, consts = consts)
        sys.stdout.write("all done\n")
        return
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def __init__(self, memory_size: int, num_node_types: int, max_num_children: int, hyperparameters: dict,
                 rng: RandomStreams, name: str = "single_layer_combination"):
        self.__memory_size = memory_size
        self.__rng = rng
        self.__name = name
        self.__hyperparameters = hyperparameters

        w = np.random.randn(num_node_types, memory_size, max_num_children * memory_size) * \
            10 ** self.__hyperparameters["log_init_scale_embedding"]
        self.__w = theano.shared(w.astype(theano.config.floatX), name=name + ":w")

        bias = np.random.randn(num_node_types, memory_size) * 10 ** self.__hyperparameters["log_init_scale_embedding"]
        self.__bias = theano.shared(bias.astype(theano.config.floatX), name=name + ":b")

        self.__w_with_dropout = \
            dropout(self.__hyperparameters['dropout_rate'], self.__rng, self.__w, True)
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def __init__(self, embeddings, memory_size: int, embeddings_size: int, hyperparameters: dict, rng: RandomStreams,
                 name="SequenceGRU", use_centroid=False):
        """
        :param embeddings: the embedding matrix
        """
        self.__name = name
        self.__embeddings = embeddings
        self.__memory_size = memory_size
        self.__embeddings_size = embeddings_size
        self.__hyperparameters = hyperparameters
        self.__rng = rng

        if use_centroid:
            self.__gru = GruCentroidsCell(memory_size, embeddings_size, hyperparameters['num_centroids'],
                                          hyperparameters['centroid_use_rate'], self.__rng, self.__name + ":GRUCell",
                                          hyperparameters['log_init_noise'])
        else:
            self.__gru = GruCell(memory_size, embeddings_size, self.__name + ":GRUCell",
                                 hyperparameters['log_init_noise'])

        self.__params = {self.__name + ":" + n: v for n, v in self.__gru.get_params().items()}
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def __init__(self, embeddings, memory_size: int, embeddings_size: int, hyperparameters: dict, rng: RandomStreams,
                 name="SequenceAveragingGRU", use_centroid=False):
        """
        :param embeddings: the embedding matrix
        """
        self.__name = name
        self.__embeddings = embeddings
        self.__memory_size = memory_size
        self.__embeddings_size = embeddings_size
        self.__hyperparameters = hyperparameters
        self.__rng = rng

        if use_centroid:
            self.__gru = GruCentroidsCell(memory_size, embeddings_size, hyperparameters['num_centroids'],
                                          hyperparameters['centroid_use_rate'], self.__rng, self.__name + ":GRUCell",
                                          hyperparameters['log_init_noise'])
        else:
            self.__gru = GruCell(memory_size, embeddings_size, self.__name + ":GRUCell",
                                 hyperparameters['log_init_noise'])

        self.__params = {self.__name + ":" + n: v for n, v in self.__gru.get_params().items()}
项目:lmkit    作者:jiangnanhugo    | 项目源码 | 文件源码
def __init__(self, n_input, n_hidden, n_output, cell='gru', optimizer='sgd', p=0.5,bptt=-1):
        self.x = T.imatrix('batched_sequence_x')  # n_batch, maxlen 
        self.x_mask = T.fmatrix('x_mask')
        self.y = T.imatrix('batched_sequence_y') 
        self.y_mask = T.fmatrix('y_mask')
        self.n_input = n_input 
        self.n_hidden = n_hidden 
        self.n_output = n_output 

        init_Embd = np.asarray(np.random.uniform(low=-np.sqrt(1. / n_output), high=np.sqrt(1. / n_output), size=(n_output, n_input)), dtype=theano.config.floatX) 

        self.E = theano.shared(value=init_Embd, name='word_embedding',borrow=True) 
        self.cell = cell 
        self.optimizer = optimizer 
        self.p = p
        self.bptt=bptt


        self.is_train = T.iscalar('is_train')

        self.rng = RandomStreams(1234)
        self.build()
项目:lmkit    作者:jiangnanhugo    | 项目源码 | 文件源码
def __init__(self,n_input,n_hidden,n_output,cell='gru',optimizer='sgd',p=0):
        self.x=T.imatrix('batched_sequence_x')  # n_batch, maxlen
        self.x_mask=T.matrix('x_mask')
        self.y=T.imatrix('batched_sequence_y')
        self.y_mask=T.matrix('y_mask')

        self.n_input=n_input
        self.n_hidden=n_hidden
        self.n_output=n_output
        init_Embd=np.asarray(np.random.uniform(low=-np.sqrt(1./n_output),
                                               high=np.sqrt(1./n_output),
                                               size=(n_output,n_input)),
                           dtype=theano.config.floatX)
        self.E=theano.shared(value=init_Embd,name='word_embedding')

        self.cell=cell
        self.optimizer=optimizer
        self.p=p
        self.is_train=T.iscalar('is_train')
        self.n_batch=T.iscalar('n_batch')

        self.epsilon=1.0e-15
        self.rng=RandomStreams(1234)
        self.build()
项目:lmkit    作者:jiangnanhugo    | 项目源码 | 文件源码
def __init__(self, n_input, n_hidden, n_output, cell='gru', optimizer='sgd', p=0.5,bptt=-1):
        self.x = T.imatrix('batched_sequence_x')  # n_batch, maxlen 
        self.x_mask = T.fmatrix('x_mask')
        self.y = T.imatrix('batched_sequence_y') 
        self.y_mask = T.fmatrix('y_mask')
        self.n_input = n_input 
        self.n_hidden = n_hidden 
        self.n_output = n_output 

        init_Embd = np.asarray(np.random.uniform(low=-np.sqrt(1. / n_output), high=np.sqrt(1. / n_output), size=(n_output, n_input)), dtype=theano.config.floatX) 

        self.E = theano.shared(value=init_Embd, name='word_embedding',borrow=True) 
        self.cell = cell 
        self.optimizer = optimizer 
        self.p = p
        self.bptt=bptt


        self.is_train = T.iscalar('is_train')

        self.rng = RandomStreams(1234)
        self.build()
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __init__(self, rng, batchsize, epochs=100, alpha=0.001, beta1=0.9, beta2=0.999, eps=1e-08, gamma=0.1, cost='mse'):
        self.alpha = alpha
        self.beta1 = beta1
        self.beta2 = beta2
        self.eps = eps
        self.gamma = gamma
        self.rng = rng
        self.theano_rng = RandomStreams(rng.randint(2 ** 30))
        self.epochs = epochs
        self.batchsize = batchsize
        if   cost == 'mse':
            self.cost = lambda network, x, y: T.mean((network(x) - y)**2)
        elif cost == 'cross_entropy':
            self.cost = lambda network, x, y: T.nnet.binary_crossentropy(network(x), y).mean()
        else:
            self.cost = cost
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __init__(self, rng, batchsize, epochs=100, alpha=0.001, beta1=0.9, beta2=0.999, eps=1e-08, gamma=0.1, cost='mse'):
        self.alpha = alpha
        self.beta1 = beta1
        self.beta2 = beta2
        self.eps = eps
        self.gamma = gamma
        self.rng = rng
        self.theano_rng = RandomStreams(rng.randint(2 ** 30))
        self.epochs = epochs
        self.batchsize = batchsize
        if   cost == 'mse':
            self.cost = lambda network, x, y: T.mean((network(x) - y)**2)
        elif cost == 'cross_entropy':
            self.cost = lambda network, x, y: T.nnet.binary_crossentropy(network(x), y).mean()
        else:
            self.cost = cost
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __init__(self, rng, filter_shape, input_shape, scale=1.0):

        self.filter_shape = filter_shape
        self.input_shape = input_shape
        self.output_shape = (input_shape[0], filter_shape[0], input_shape[2], input_shape[3])
        self.input_units = np.prod(self.input_shape)
        self.output_units = np.prod(self.output_shape)

        self.theano_rng = RandomStreams(rng.randint(2 ** 30))

        fan_in = np.prod(filter_shape[1:])
        fan_out = filter_shape[0] * np.prod(filter_shape[2:])

        W_bound = scale * np.sqrt(6. / (fan_in + fan_out))
        W = np.asarray(
                rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                dtype=theano.config.floatX)

        b = np.zeros((filter_shape[0],), dtype=theano.config.floatX)

        self.W = theano.shared(value=W, borrow=True)
        self.b = theano.shared(value=b, borrow=True)
        self.params = [self.W, self.b]
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __init__(self, options, shape, rng, drop=0, zone_hidden=0, zone_cell=0, prefix="lstm",
                 bn=False, clip_gradients=False, mask=None):

        self.nsteps = shape
        self.mask = mask if mask is None else '' #TODO: Make mask
        self.prefix = prefix
        #TODO: Replace options and update the step function
        self.options = options
        self.clip_gradients = clip_gradients
        self.params = init_params(param_init_lstm(options=options, params=[], prefix=prefix))
        #TODO: Sort shapes, can have input,hidden for W, U = hidden,hidden, b = hidden
        # Saves upon changing code lots below.
        self.bninput = BatchNormLayer(None, shape) if bn else lambda x: x
        self.bnhidden = BatchNormLayer(None, shape) if bn else lambda x: x
        self.bncell = BatchNormLayer(None, shape) if bn else lambda x: x
        # Add BN params to layer (for SGD)
        if bn:
            self.params += self.bnhidden.params + self.bninput.params + self.bncell.params
        self.dropout = drop
        self.zoneout = {'h': zone_hidden, 'c': zone_cell}
        self.theano_rng = RandomStreams(rng.randint(2 ** 30))
项目:deep-motion-analysis    作者:Brimborough    | 项目源码 | 文件源码
def __init__(self, rng, batchsize, epochs=100, alpha=0.001, beta1=0.9, beta2=0.999, eps=1e-08, gamma=0.1, cost='mse'):
        self.alpha = alpha
        self.beta1 = beta1
        self.beta2 = beta2
        self.eps = eps
        self.gamma = gamma
        self.rng = rng
        self.theano_rng = RandomStreams(rng.randint(2 ** 30))
        self.epochs = epochs
        self.batchsize = batchsize
        if   cost == 'mse':
            self.cost = lambda network, x, y: T.mean((network(x) - y)**2)
        elif cost == 'cross_entropy':
            self.cost = lambda network, x, y: T.nnet.binary_crossentropy(network(x), y).mean()
        else:
            self.cost = cost
项目:doublecnn    作者:Shuangfei    | 项目源码 | 文件源码
def __init__(self, incoming, num_filters, filter_size, stride=(1,1),
                 pad=0, untie_biases=False, kernel_size=3, kernel_pool_size=1,
                 W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
                 nonlinearity=lasagne.nonlinearities.rectify, flip_filters=True,
                 convolution=theano.tensor.nnet.conv2d, **kwargs):
        super(DoubleConvLayer, self).__init__(incoming, num_filters, filter_size,
                                              stride, 0, untie_biases, W, b,
                                              nonlinearity, flip_filters, n=2,
                                              **kwargs)
        self.convolution = convolution
        self.kernel_size = kernel_size
        self.pool_size = kernel_pool_size
        self.filter_offset = self.filter_size[0] - self.kernel_size + 1

        self.n_times = self.filter_offset ** 2
        self.rng = RandomStreams(123)
项目:adaptiveSM    作者:OlivierShi    | 项目源码 | 文件源码
def __init__(self, n_input, n_hidden, n_batch, n_output, optimizer=sgd, p=0.5, use_adaptive_softmax=True):
        self.x = T.imatrix('batched_sequence_x')  # n_batch, maxlen
        self.x_mask = T.matrix('x_mask')
        self.y = T.imatrix('batched_sequence_y')
        self.y_mask = T.matrix('y_mask')

        self.n_input = n_input
        self.n_hidden = n_hidden
        self.n_output = n_output
        init_Embd = np.asarray(np.random.uniform(low=-np.sqrt(1. / n_output),
                                                 high=np.sqrt(1. / n_output),
                                                 size=(n_output, n_input)),
                               dtype=theano.config.floatX)
        self.E = theano.shared(value=init_Embd, name='word_embedding',borrow=True)

        self.optimizer = optimizer
        self.p = p
        self.is_train = T.iscalar('is_train')
        self.n_batch = n_batch
        self.epsilon = 1.0e-15
        self.rng = RandomStreams(1234)
        self.use_adaptive_softmax = use_adaptive_softmax
        self.build()
项目:Buffe    作者:bentzinir    | 项目源码 | 文件源码
def _connect(self, game_params, solver_params):

        self.dt = game_params['dt']
        self.w = game_params['width']
        self.inv_m = np.float32(1./game_params['m'])
        self.v_max = game_params['v_max']
        self.c_0_w_accel = solver_params['controler_0']['w_accel']
        self.c_0_w_progress = solver_params['controler_0']['w_progress']
        self.c_1_w_progress = solver_params['controler_1']['w_progress']
        self.c_1_w_mines = solver_params['controler_1']['w_mines']
        self.c_1_w_step_size = solver_params['controler_1']['w_step_size']
        self.d_mines = game_params['d_mines']
        self.n_mines = game_params['n_mines']
        self.v_max = game_params['v_max']
        self.switch_interval = solver_params['switch_interval']
        self.srng = RandomStreams()
项目:GRAN    作者:jiwoongim    | 项目源码 | 文件源码
def get_corrupted_input(rng, input, corruption_level, ntype='zeromask'):
    ''' depending on requirement, returns input corrupted by zeromask/gaussian/salt&pepper'''
    MRG = RNG_MRG.MRG_RandomStreams(rng.randint(2 ** 30))
    #theano_rng = RandomStreams()
    if corruption_level == 0.0:
        return input

    if ntype=='zeromask':
        return MRG.binomial(size=input.shape, n=1, p=1-corruption_level,dtype=theano.config.floatX) * input
    elif ntype=='gaussian':
        return input + MRG.normal(size = input.shape, avg = 0.0,
                std = corruption_level, dtype = theano.config.floatX)
    elif ntype=='salt_pepper':

        # salt and pepper noise
        print 'DAE uses salt and pepper noise'
        a = MRG.binomial(size=input.shape, n=1,\
                p=1-corruption_level,dtype=theano.config.floatX)
        b = MRG.binomial(size=input.shape, n=1,\
                p=corruption_level,dtype=theano.config.floatX)

        c = T.eq(a,0) * b
        return input * a + c
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def __init__(self,
                 input,
                 nvis,
                 nhid,
                 rnd=None,
                 theano_rng=None,
                 bhid=None,
                 cost_type=CostType.MeanSquared,
                 momentum=1,
                 L1_reg=-1,
                 L2_reg=-1,
                 sparse_initialize=False,
                 nonlinearity=NonLinearity.TANH,
                 bvis=None,
                 tied_weights=True,
                 reverse=False,
                 corruption_level=0.):
        super(DenoisingAutoencoder, self).__init__(
            input=input,
            nvis=nvis,
            nhid=nhid,
            rnd=rnd,
            bhid=bhid,
            cost_type=cost_type,
            momentum=momentum,
            L1_reg=L1_reg,
            L2_reg=L2_reg,
            sparse_initialize=sparse_initialize,
            nonlinearity=nonlinearity,
            bvis=bvis,
            tied_weights=tied_weights,
            reverse=reverse)
        self.corruption_level = corruption_level

        if not theano_rng:
            theano_rng = RandomStreams(rnd.randint(2 ** 30))
        self.theano_rng = theano_rng

    # Overrite this function:
项目:structured-output-ae    作者:sbelharbi    | 项目源码 | 文件源码
def __init__(self,
                 input,
                 nvis,
                 nhid,
                 rnd=None,
                 theano_rng=None,
                 bhid=None,
                 sigma=0.06,
                 nonlinearity=NonLinearity.SIGMOID,
                 cost_type=CostType.MeanSquared,
                 bvis=None):

        self.sigma = sigma
        super(ContractiveAutoencoder, self).__init(
            input=input,
            nvis=nvis,
            nhid=nhid,
            rnd=rnd,
            bhid=bhid,
            cost_type=cost_type,
            nonlinearity=nonlinearity,
            sparse_initialize=True,
            bvis=bvis)
        # Create a Theano random generator that gives symbolic random values
        if not theano_rng:
            theano_rng = RandomStreams(rnd.randint(2**30))
        self.theano_rng = theano_rng
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def __init__(self, input_size, hidden_sizes, l, random_seed=1234):
        self._random_seed = random_seed
        self._mrng = MRG_RandomStreams(seed=random_seed)
        self._rng = RandomStreams(seed=random_seed)

        self._hidden_sizes = hidden_sizes
        self._input_size = input_size
        self._l = l

        self.ordering = theano.shared(value=np.arange(input_size, dtype=theano.config.floatX), name='ordering', borrow=False)

        # Initial layer connectivity
        self.layers_connectivity = [theano.shared(value=(self.ordering + 1).eval(), name='layer_connectivity_input', borrow=False)]
        for i in range(len(self._hidden_sizes)):
            self.layers_connectivity += [theano.shared(value=np.zeros((self._hidden_sizes[i]), dtype=theano.config.floatX), name='layer_connectivity_hidden{0}'.format(i), borrow=False)]
        self.layers_connectivity += [self.ordering]

        ## Theano functions
        new_ordering = self._rng.shuffle_row_elements(self.ordering)
        self.shuffle_ordering = theano.function(name='shuffle_ordering',
                                                inputs=[],
                                                updates=[(self.ordering, new_ordering), (self.layers_connectivity[0], new_ordering + 1)])

        self.layers_connectivity_updates = []
        for i in range(len(self._hidden_sizes)):
            self.layers_connectivity_updates += [self._get_hidden_layer_connectivity(i)]
        # self.layers_connectivity_updates = [self._get_hidden_layer_connectivity(i) for i in range(len(self._hidden_sizes))]  # WTF THIS DO NOT WORK
        self.sample_connectivity = theano.function(name='sample_connectivity',
                                                   inputs=[],
                                                   updates=[(self.layers_connectivity[i+1], self.layers_connectivity_updates[i]) for i in range(len(self._hidden_sizes))])

        # Save random initial state
        self._initial_mrng_rstate = copy.deepcopy(self._mrng.rstate)
        self._initial_mrng_state_updates = [state_update[0].get_value() for state_update in self._mrng.state_updates]

        # Ensuring valid initial connectivity
        self.sample_connectivity()
项目:nn-patterns    作者:pikinder    | 项目源码 | 文件源码
def get_conv_xy(layer, deterministic=True):
    w_np = layer.W.get_value()
    input_layer = layer.input_layer
    if layer.pad == 'same':
        input_layer = L.PadLayer(layer.input_layer,
                                 width=np.array(w_np.shape[2:])/2,
                                 batch_ndim=2)
    input_shape = L.get_output_shape(input_layer)
    max_x = input_shape[2] - w_np.shape[2]
    max_y = input_shape[3] - w_np.shape[3]
    srng = RandomStreams()
    patch_x = srng.random_integers(low=0, high=max_x)
    patch_y = srng.random_integers(low=0, high=max_y)

    #print("input_shape shape: ", input_shape)
    #print("pad: \"%s\""% (layer.pad,))
    #print(" stride: " ,layer.stride)
    #print("max_x %d max_y %d"%(max_x,max_y))

    x = L.get_output(input_layer, deterministic=deterministic)
    x = x[:, :,
          patch_x:patch_x + w_np.shape[2], patch_y:patch_y + w_np.shape[3]]
    x = T.flatten(x, 2)  # N,D

    w = layer.W
    if layer.flip_filters:
        w = w[:, :, ::-1, ::-1]
    w = T.flatten(w, outdim=2).T  # D,O
    y = T.dot(x, w) # N,O
    if layer.b is not None:
        y += T.shape_padaxis(layer.b, axis=0)
    return x, y
项目:lightML    作者:jfzhang95    | 项目源码 | 文件源码
def dropout(X, dropout_prob=0.0):
    retain_prob = 1 - dropout_prob
    srng = RandomStreams(seed=1234)
    X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
    X /= retain_prob
    return X

# def dropout(x, dropout_prob):
#     if dropout_prob < 0. or dropout_prob > 1.:
#         raise Exception('Dropout level must be in interval [0, 1]')
#     retain_prob = 1. - dropout_prob
#     sample=np.random.binomial(n=1, p=retain_prob, size=x.shape)
#     x *= sample
#     x /= retain_prob
#     return x
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def get_srng():
    '''Shared Randomstream.

    '''
    srng = SRandomStreams(random.randint(0, 1000000))
    return srng
项目:e2e-ie-release    作者:rasmusbergpalm    | 项目源码 | 文件源码
def __init__(self, incoming, input_size, output_size, W=init.Normal(), dropout=0., **kwargs):
        super(DropoutEmbeddingLayer, self).__init__(incoming, **kwargs)

        self.input_size = input_size
        self.output_size = output_size
        self.dropout = dropout
        self._srng = RandomStreams(get_rng().randint(1, 2147462579))

        self.W = self.add_param(W, (input_size, output_size), name="W")
项目:SERT    作者:cvangysel    | 项目源码 | 文件源码
def _negative_sampling(self, num_negative_samples, target_indices):
        assert num_negative_samples > 0

        logging.debug('Stochastically sampling %d negative instances '
                      'out of %d classes (%.2f%%).',
                      num_negative_samples, self.num_entities,
                      100.0 *
                      float(num_negative_samples) / self.num_entities)

        from theano.tensor.shared_randomstreams import RandomStreams

        srng = RandomStreams(
            seed=np.random.randint(low=0, high=(1 << 30)))

        rng_sample_size = (self.batch_size, num_negative_samples,)

        logging.debug(
            'Using %s for random sample generation of %s tensors.',
            RandomStreams, rng_sample_size)

        logging.debug('For every batch %d random integers are sampled.',
                      np.prod(rng_sample_size))

        random_negative_indices = srng.choice(
            rng_sample_size,
            a=self.num_entities,
            p=self.clazz_distribution)

        if self.__DEBUG__:
            random_negative_indices = theano.printing.Print(
                'random_negative_indices')(random_negative_indices)

        return random_negative_indices
项目:merlin    作者:CSTR-Edinburgh    | 项目源码 | 文件源码
def __init__(self, rng, x, n_in, n_out, W = None, b = None, activation = T.tanh, p=0.0, training=0):
        n_in = int(n_in)  # ensure sizes have integer type
        n_out = int(n_out)# ensure sizes have integer type

        self.x = x

        if p > 0.0:
            if training==1:
                srng = RandomStreams(seed=123456)
                self.x = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
            else:
                self.x =  (1-p) * x


        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        if W is None:
            W_value = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
                      size=(n_in, n_out)), dtype=theano.config.floatX)
            W = theano.shared(value=W_value,
                              name='W', borrow=True)
        if b is None:
            b = theano.shared(value=numpy.zeros((n_out,),
                              dtype=theano.config.floatX),
                              name='b', borrow=True)

        self.W = W
        self.b = b

        self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
                                     dtype=theano.config.floatX), name='delta_W')

        self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
                                     dtype=theano.config.floatX), name='delta_b')

        self.output = T.dot(self.x, self.W) + self.b
        self.output = activation(self.output)

        self.params = [self.W, self.b]
        self.delta_params = [self.delta_W, self.delta_b]
项目:MIDS    作者:freegraphics    | 项目源码 | 文件源码
def train_rates():
    consts = Consts()
    rng = numpy.random.RandomState()
    theano_rng = RandomStreams(rng.randint(2 ** 30))
    rs = RecommenderSystem(rng= rng,theano_rng = theano_rng,consts=consts)
    validate_loss_min = 0
    validate_loss = 0
    for i in numpy.arange(100000):
        lt = time.time()
        for j in numpy.arange(consts.ids_move_count):
            loss_rates = rs.train_rates(learning_rate = consts.result_learning_rate)
            t1 = time.time()
            if t1>lt+1:
                sys.stdout.write("\t\t\t\t\t\t\t\t\t\r")
                sys.stdout.write("[%d] loss = %f , val = %f valmin = %f\r" % (i,loss_rates,validate_loss,validate_loss_min))
                lt = lt+1
        trace_rates(i + (consts.load_from_ids*consts.save_cycles),loss_rates,validate_loss_min,validate_loss,consts.trace_rates_file_name)
        if i % consts.save_cycles == 0:
            rs.save_rates((i/consts.save_cycles) + consts.load_from_ids,consts)
        if i % consts.validate_cycles == 0:
            validate_loss = rs.validate_rates(consts=consts)
            if validate_loss_min==0 or validate_loss<validate_loss_min:
                validate_loss_min = validate_loss
                rs.save_rates(0,consts)
        consts.update_index(i + (consts.load_from_ids*consts.save_cycles))

    return
项目:neural_network    作者:mnpappo    | 项目源码 | 文件源码
def randdrop(x, level, noise_shape=None, seed=None):
    '''Sets entries in `x` to zero at random,
    while scaling the entire tensor.
    # Arguments
        x: tensor
        level: fraction of the entries in the tensor
            that will be set to 0.
        noise_shape: shape for randomly generated keep/drop flags,
            must be broadcastable to the shape of `x`
        seed: random seed to ensure determinism.
    '''
    # if level < 0. or level >= 1:
    #     raise Exception('Dropout level must be in interval [0, 1[.')
    if seed is None:
        seed = np.random.randint(1337)

    rng = RandomStreams(seed=seed)
    retain_prob = 1 - level

    if noise_shape is None:
        random_tensor = rng.binomial(x.shape, p=retain_prob, dtype=x.dtype)
    else:
        random_tensor = rng.binomial(noise_shape, p=retain_prob, dtype=x.dtype)
        random_tensor = T.patternbroadcast(random_tensor, [dim == 1 for dim in noise_shape])

    x *= random_tensor
    x /= retain_prob
    return x
项目:sequence-based-recommendations    作者:rdevooght    | 项目源码 | 文件源码
def __init__(self, incoming, num_units, num_outputs=0.01, **kwargs):
        super(BlackoutLayer, self).__init__(incoming, num_units, **kwargs)
        self._srng = RandomStreams(get_rng().randint(1, 2147462579))
        if num_outputs < 1:
            num_outputs = num_outputs * num_units
        self.num_outputs = int(num_outputs)
项目:python-machine-learning    作者:sho-87    | 项目源码 | 文件源码
def dropout_layer(layer, p_dropout):
    srng = shared_randomstreams.RandomStreams(
        np.random.RandomState(0).randint(999999))
    mask = srng.binomial(n=1, p=1-p_dropout, size=layer.shape)
    return layer*T.cast(mask, theano.config.floatX)
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def __init__(self, **kwargs):
        super(TheanoBackend, self).__init__(**kwargs)
        self.rng = RandomStreams(self._seed)
        theano.config.floatX = _FLOATX
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def reset_random_state(self):
        self.rng = RandomStreams(self._seed)

    # TENSOR CREATION
项目:machine-deep_learning    作者:Charleswyt    | 项目源码 | 文件源码
def dropout_layer(layer, p_dropout):
    srng = shared_randomstreams.RandomStreams(
        np.random.RandomState(0).randint(999999))
    mask = srng.binomial(n=1, p=1-p_dropout, size=layer.shape)
    return layer*T.cast(mask, theano.config.floatX)
项目:DeepEnhancer    作者:minxueric    | 项目源码 | 文件源码
def __init__(self, rng, name, is_train, x, n_in, n_out, W=None, b=None, activation=ReLU, p=0.5):
        """p is the probability of NOT dropping out a unit"""
        self.name = name
        self.x = x
        bound = np.sqrt(6./(n_in+n_out))
        if W is None:
            W_values = np.asarray(
                    rng.uniform(
                        low=-bound,
                        high=bound,
                        size=(n_in, n_out)
                        ),
                    dtype=theano.config.floatX)
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4
            W = theano.shared(value=W_values, name='W', borrow=True)

        if b is None:
            # b_values = np.zeros((n_out,), dtype=theano.config.floatX)
            b_values = np.ones((n_out,), dtype=theano.config.floatX) * np.cast[theano.config.floatX](bound)
            b = theano.shared(value=b_values, name='b', borrow=True)

        self.W = W
        self.b = b

        lin_output= T.dot(x, self.W) + self.b
        output = (
                lin_output if activation is None
                else activation(lin_output))

        def drop(x, rng=rng, p=p):
            """p is the probability of NOT dropping out a unit"""
            srng = RandomStreams(rng.randint(999999))
            mask = srng.binomial(n=1, p=p, size=x.shape, dtype=theano.config.floatX)
            return x * mask

        train_output = drop(np.cast[theano.config.floatX](1./p) * output)

        self.output = T.switch(T.neq(is_train, 0), train_output, output)

        self.params = [self.W, self.b]
项目:NADE    作者:MarcCote    | 项目源码 | 文件源码
def __init__(self, n_visible, n_hidden, nonlinearity="RLU"):
        self.theano_rng = RandomStreams(np.random.randint(2 ** 30))
        self.add_parameter(SizeParameter("n_visible"))
        self.add_parameter(SizeParameter("n_hidden"))
        self.add_parameter(NonLinearityParameter("nonlinearity"))
        self.n_visible = n_visible
        self.n_hidden = n_hidden
        self.parameters["nonlinearity"].set_value(nonlinearity)
项目:lazyprogrammer    作者:inhwane    | 项目源码 | 文件源码
def __init__(self, M, an_id):
        self.M = M
        self.id = an_id
        self.rng = RandomStreams()
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def __init__(self, embedding_size: int, vocabulary_size: int, empirical_distribution, representation_size: int,
                 hyperparameters: dict, encoder_type: str, name: str = "GRUSequenceSiameseEncoder", use_centroid=False):
        self.__hyperparameters = hyperparameters
        self.__name = name
        log_init_noise = self.__hyperparameters["log_init_noise"]

        self.__memory_size = representation_size
        self.__embedding_size = embedding_size
        self.__vocabulary_size = vocabulary_size
        self.__empirical_distribution = empirical_distribution
        self.__encoder_type = encoder_type

        embeddings = np.random.randn(vocabulary_size, embedding_size) * 10 ** log_init_noise
        self.__embeddings = theano.shared(embeddings.astype(theano.config.floatX), name=name + ":embeddings")
        self.__name_bias = theano.shared(np.log(empirical_distribution).astype(theano.config.floatX),
                                         name=name + ":name_bias")

        encoder_init_state = np.random.randn(representation_size) * 10 ** log_init_noise
        self.__encoder_init_state = theano.shared(encoder_init_state.astype(theano.config.floatX),
                                                  name=name + ":encoder_init_state")

        self.__rng = RandomStreams()

        self.__input_sequence = T.ivector(name + ":input_sequence")

        if encoder_type == 'gru':
            self.__encoder = GRU(self.__embeddings, representation_size, embedding_size,
                                 self.__hyperparameters, self.__rng, name=name + ":GRUSequenceEncoder",
                                 use_centroid=use_centroid)
        elif encoder_type == 'averaging_gru':
            self.__encoder = AveragingGRU(self.__embeddings, representation_size, embedding_size,
                                          self.__hyperparameters, self.__rng,
                                          name=name + ":AveragingGRUSequenceEncoder", use_centroid=use_centroid)
        else:
            raise Exception("Unrecognized encoder type `%s`, possible options `gru` and `averaging_gru`")

        self.__params = {"embeddings": self.__embeddings,
                         "encoder_init_state": self.__encoder_init_state}
        self.__params.update(self.__encoder.get_params())
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def __init__(self, embedding_size: int, vocabulary_size: int, empirical_distribution, representation_size: int,
                 hyperparameters: dict, encoder_type: str, name: str = "GRUSequenceSupervisedEncoder",
                 use_centroid=False):
        self.__hyperparameters = hyperparameters
        self.__name = name
        log_init_noise = self.__hyperparameters["log_init_noise"]

        self.__memory_size = representation_size
        self.__embedding_size = embedding_size

        embeddings = np.random.randn(vocabulary_size, embedding_size) * 10 ** log_init_noise
        self.__embeddings = theano.shared(embeddings.astype(theano.config.floatX), name=name + ":embeddings")
        self.__name_bias = theano.shared(np.log(empirical_distribution).astype(theano.config.floatX),
                                         name=name + ":name_bias")

        encoder_init_state = np.random.randn(representation_size) * 10 ** log_init_noise
        self.__encoder_init_state = theano.shared(encoder_init_state.astype(theano.config.floatX),
                                                  name=name + ":encoder_init_state")

        self.__rng = RandomStreams()

        self.__input_sequence = T.ivector(name + ":input_sequence")
        self.__output_sequence = T.ivector(name + ":output_sequence")
        self.__inverted_output_sequence = self.__output_sequence[::-1]
        if encoder_type == 'gru':
            self.__encoder = GRU(self.__embeddings, representation_size, embedding_size,
                                 self.__hyperparameters, self.__rng, name=name + ":GRUSequenceEncoder",
                                 use_centroid=use_centroid)
        elif encoder_type == 'averaging_gru':
            self.__encoder = AveragingGRU(self.__embeddings, representation_size, embedding_size,
                                          self.__hyperparameters, self.__rng,
                                          name=name + ":AveragingGRUSequenceEncoder", use_centroid=use_centroid)
        else:
            raise Exception("Unrecognized encoder type `%s`, possible options `gru` and `averaging_gru`")

        self.__params = {"embeddings": self.__embeddings,
                         "encoder_init_state": self.__encoder_init_state}
        self.__params.update(self.__encoder.get_params())
        self.__standalone_representation = T.dvector(self.__name + ":representation_input")
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def __init__(self, training_filename: str, hyperparameters: dict, combination_type='residual_with_ae'):
        self.__hyperparameters = hyperparameters

        self.__dataset_extractor = TreeDatasetExtractor(training_filename)
        self.__rng = RandomStreams()

        self.__rnn = RNN(self.__hyperparameters['memory_size'], self.__hyperparameters, self.__rng,
                         self.__dataset_extractor, combination_type=combination_type)
        self.__trainable_params = list(self.__rnn.get_params().values())
        check_hyperparameters(self.REQUIRED_HYPERPARAMETERS | self.__rnn.required_hyperparameters,
                              self.__hyperparameters)

        self.__compiled_methods = None
        self.__trained_parameters = None
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def __init__(self, training_filename: str, hyperparameters: dict, combination_type='eqnet'):
        self.__hyperparameters = hyperparameters

        self.__dataset_extractor = TreeDatasetExtractor(training_filename)
        self.__rng = RandomStreams()

        self.__rnn = RNN(self.__hyperparameters['memory_size'], self.__hyperparameters, self.__rng,
                         self.__dataset_extractor, combination_type=combination_type)
        check_hyperparameters(self.REQUIRED_HYPERPARAMETERS | self.__rnn.required_hyperparameters,
                              self.__hyperparameters)

        target_embeddings = np.random.randn(self.__hyperparameters['memory_size'],
                                            self.__dataset_extractor.num_equivalent_classes) * 10 ** \
                                                                                               self.__hyperparameters[
                                                                                                   "log_init_scale_embedding"]
        self.__target_embeddings = theano.shared(target_embeddings.astype(theano.config.floatX),
                                                 name="target_embeddings")
        self.__target_embeddings_dropout = dropout(self.__hyperparameters['dropout_rate'], self.__rng,
                                                   self.__target_embeddings, True)

        self.__target_bias = np.log(self.__dataset_extractor.training_empirical_distribution)

        self.__trainable_params = list(self.__rnn.get_params().values()) + [self.__target_embeddings]

        self.__compiled_methods = None
        self.__trained_parameters = None
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def dropout(dropout_rate: float, rng: RandomStreams, parameter, use_dropout: bool):
    if use_dropout:
        mask = rng.binomial(parameter.shape, p=1. - dropout_rate, dtype=parameter.dtype)
        return parameter * mask / (1. - dropout_rate)
    else:
        return parameter
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def dropout_multiple(dropout_rate: float, rng: RandomStreams, use_dropout: bool, *parameters):
    return tuple([dropout(dropout_rate, rng, p, use_dropout) for p in parameters])
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def get_cell_with_dropout(self, rng: RandomStreams, dropout_rate: float):
        raise NotImplementedError()
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def get_cell_with_dropout(self, rng: RandomStreams, dropout_rate: float):
        with_dropout = SimpleRecurrentCell.__new__(self.__class__)

        with_dropout.__prev_hidden_to_next, with_dropout.__prediction_to_hidden = dropout_multiple(
            dropout_rate, rng, True, self.__prev_hidden_to_next, self.__prediction_to_hidden)
        with_dropout.__bias = self.__bias
        with_dropout.get_cell_with_dropout = None
        with_dropout.__name = self.__name + ":with_dropout"
        return with_dropout
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def get_cell_with_dropout(self, rng: RandomStreams, dropout_rate: float):
        with_dropout = GruCell.__new__(GruCell)

        with_dropout.__w_hid, with_dropout.__w_in = dropout_multiple(
            dropout_rate, rng, True, self.__w_hid, self.__w_in)
        with_dropout.__biases = self.__biases
        with_dropout.get_cell_with_dropout = None
        with_dropout.__name = self.__name + ":with_dropout"
        with_dropout.__memory_D = self.__memory_D
        with_dropout.__grad_clip = self.__grad_clip
        return with_dropout
项目:world_merlin    作者:pbaljeka    | 项目源码 | 文件源码
def __init__(self, rng, x, n_in, n_out, W = None, b = None, activation = T.tanh, p=0.0, training=0):
        n_in = int(n_in)  # ensure sizes have integer type
        n_out = int(n_out)# ensure sizes have integer type

        self.x = x

        if p > 0.0:
            if training==1:
                srng = RandomStreams(seed=123456)
                self.x = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
            else:
                self.x =  (1-p) * x


        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        if W is None:
            W_value = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
                      size=(n_in, n_out)), dtype=theano.config.floatX)
            W = theano.shared(value=W_value,
                              name='W', borrow=True)
        if b is None:
            b = theano.shared(value=numpy.zeros((n_out,),
                              dtype=theano.config.floatX),
                              name='b', borrow=True)

        self.W = W
        self.b = b

        self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
                                     dtype=theano.config.floatX), name='delta_W')

        self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
                                     dtype=theano.config.floatX), name='delta_b')

        self.output = T.dot(self.x, self.W) + self.b
        self.output = activation(self.output)

        self.params = [self.W, self.b]
        self.delta_params = [self.delta_W, self.delta_b]
项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def __init__(self, rng, x, n_in, n_out, W = None, b = None, activation = T.tanh, p=0.0, training=0):
        n_in = int(n_in)  # ensure sizes have integer type
        n_out = int(n_out)# ensure sizes have integer type

        self.x = x

        if p > 0.0:
            if training==1:
                srng = RandomStreams(seed=123456)
                self.x = T.switch(srng.binomial(size=x.shape, p=p), x, 0)
            else:
                self.x =  (1-p) * x


        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        if W is None:
            W_value = numpy.asarray(rng.normal(0.0, 1.0/numpy.sqrt(n_in),
                      size=(n_in, n_out)), dtype=theano.config.floatX)
            W = theano.shared(value=W_value,
                              name='W', borrow=True)
        if b is None:
            b = theano.shared(value=numpy.zeros((n_out,),
                              dtype=theano.config.floatX),
                              name='b', borrow=True)

        self.W = W
        self.b = b

        self.delta_W = theano.shared(value = numpy.zeros((n_in,n_out),
                                     dtype=theano.config.floatX), name='delta_W')

        self.delta_b = theano.shared(value = numpy.zeros_like(self.b.get_value(borrow=True),
                                     dtype=theano.config.floatX), name='delta_b')

        self.output = T.dot(self.x, self.W) + self.b
        self.output = activation(self.output)

        self.params = [self.W, self.b]
        self.delta_params = [self.delta_W, self.delta_b]
项目:corelm    作者:nusnlp    | 项目源码 | 文件源码
def __init__(self, classifier, args, noise_dist):
        self.y = T.ivector('y')

        ## Cost function
        #  Sum over minibatch instances (log ( u(w|c) / (u(w|c) + k * p_n(w)) ) + sum over noise samples ( log ( u(x|c) / ( u(x|c) + k * p_n(x) ) )))

        # Generating noise samples
        srng = RandomStreams(seed=1234)
        noise_samples = srng.choice(size=(self.y.shape[0],args.num_noise_samples),  a=args.num_classes, p=noise_dist, dtype='int32')

        log_noise_dist = theano.shared(np.log(noise_dist.get_value()),borrow=True)
        #log_num_noise_samples = theano.shared(math.log(args.num_noise_samples)).astype(theano.config.floatX)
        log_num_noise_samples = theano.shared(np.log(args.num_noise_samples,dtype=theano.config.floatX))
        # Data Part of Cost Function: log ( u(w|c) / (u(w|c) + k * p_n(w))
        data_scores = classifier.output[T.arange(self.y.shape[0]),self.y]
        data_denom = self.logadd(data_scores, log_num_noise_samples + log_noise_dist[self.y] )
        data_prob = data_scores - data_denom
        # Sumation of Noise Part of Cost Function: sum over noise samples ( log ( u(x|c) / ( u(x|c) + k * p_n(x) ) ))
        noise_mass = log_num_noise_samples + log_noise_dist[noise_samples] # log(k) + log(p_n(x)) for all noise samples (Shape: #instaces x k)
        noise_scores = classifier.output[T.arange(noise_samples.shape[0]).reshape((-1,1)),noise_samples]
        noise_denom = self.logadd(noise_scores, noise_mass)
        noise_prob_sum = T.sum(noise_mass - noise_denom, axis=1)

        self.cost = (
            -T.mean(data_prob + noise_prob_sum)
        )
        self.test = (
            T.sum(data_scores)
        )
项目:Theano-NN_Starter    作者:nightinwhite    | 项目源码 | 文件源码
def CTC_train(self):
        CTC_LOSSs = T.cast(T.mean(self.CTC_LOSS(), axis=0), "float32")
        train_data_d = []
        train_data_m = []
        train_data_m_s = [] 
        learning_rate = T.scalar()
        decay = T.scalar()
        seed = np.random.randint(10e6)
        rng = RandomStreams(seed=seed)
        grad_rate = 0.8
        for data in self.train_data:
            data_d = rng.binomial((1,), p=grad_rate, dtype="float32")[0]*T.grad(CTC_LOSSs, data)
            train_data_d.append(data_d)
            data_m_s = theano.shared(np.zeros(data.get_value().shape).astype(np.float32))
            train_data_m_s.append(data_m_s)
            data_m = data_m_s*decay + (1-decay)*data_d**2
            train_data_m.append(data_m)
        #self.grad_test = theano.function([self.X, self.Y], train_data_d[-4])
        #self.data_d_print = theano.function([self.X,self.Y],train_data_d[0][0])
        #upd = [(d,d-learning_rate*d_d)for d,d_d in zip(self.train_data,train_data_d)]
        upd = [(d, d-learning_rate*d_d/T.sqrt(d_m+1e-4))for d,d_d,d_m in zip(self.train_data,train_data_d,train_data_m)]
        upd1 = [(d_m_s, decay*d_m_s+(1-decay)*d_d**2) for d_m_s,d_d in zip(train_data_m_s,train_data_d)]
        upd +=upd1    
        #self.test = theano.function([self.X,self.Y],train_data_d[0])
        self.sgd_train = theano.function([self.X, self.Y, learning_rate, decay],
                                         [],
                                         updates = upd
                                         )