Python scipy.special 模块,expit() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.special.expit()

项目:diamond    作者:stitchfix    | 项目源码 | 文件源码
def predict(self, new_df):
        """ Use the estimated model to make predictions. \
        New levels of grouping factors are given fixed effects,
            with zero random effects

        Args:
            new_df (DataFrame):  data to make predictions on
        Returns:
            n x J matrix, where n is the number of rows \
            of new_df and J is the number \
            of possible response values. The (i, j) entry of \
           this matrix is the probability that observation i \
            realizes response level j.
        """
        eta = super(CumulativeLogisticRegression, self).predict(new_df)
        intercepts = self.effects['intercepts']
        J = self.J
        preds = np.zeros((len(eta), J))
        preds[:, 0] = expit(intercepts[0] + eta)
        preds[:, J - 1] = 1.0 - expit(intercepts[J - 2] + eta)
        for j in range(1, J - 1):
            preds[:, j] = expit(intercepts[j] + eta) - \
                expit(intercepts[j - 1] + eta)
        return preds
项目:pyglmnet    作者:glm-tools    | 项目源码 | 文件源码
def _mu(distr, z, eta):
    """The non-linearity (inverse link)."""
    if distr in ['softplus', 'gamma']:
        mu = np.log1p(np.exp(z))
    elif distr == 'poisson':
        mu = z.copy()
        intercept = (1 - eta) * np.exp(eta)
        mu[z > eta] = z[z > eta] * np.exp(eta) + intercept
        mu[z <= eta] = np.exp(z[z <= eta])
    elif distr == 'gaussian':
        mu = z
    elif distr == 'binomial':
        mu = expit(z)
    elif distr == 'probit':
        mu = norm.cdf(z)
    return mu
项目:dl4nlp    作者:yohokuno    | 项目源码 | 文件源码
def logistic_regression_cost_gradient(parameters, input, output):
    """
    Cost and gradient for logistic regression
    :param parameters: weight vector
    :param input: feature vector
    :param output: binary label (0 or 1)
    :return: cost and gradient for the input and output
    """
    prediction = expit(np.dot(input, parameters))
    if output:
        inside_log = prediction
    else:
        inside_log = 1.0 - prediction

    if inside_log != 0.0:
        cost = -np.log(inside_log)
    else:
        cost = np.finfo(float).min

    gradient = (prediction - output) * input
    return cost, gradient
项目:dl4nlp    作者:yohokuno    | 项目源码 | 文件源码
def assertLogisticRegression(self, sampler):
        data_size = 3
        input_size = 5
        inputs = np.random.uniform(-10.0, 10.0, size=(data_size, input_size))
        outputs = np.random.randint(0, 2, size=data_size)
        initial_parameters = np.random.normal(scale=1e-5, size=input_size)

        # Create cost and gradient function for gradient descent and check its gradient
        cost_gradient = bind_cost_gradient(logistic_regression_cost_gradient,
                                           inputs, outputs, sampler=sampler)
        result = gradient_check(cost_gradient, initial_parameters)
        self.assertEqual([], result)

        # Train logistic regression and see if it predicts correct labels
        final_parameters, cost_history = gradient_descent(cost_gradient, initial_parameters, 100)
        predictions = expit(np.dot(inputs, final_parameters)) > 0.5

        # Binary classification of 3 data points with 5 dimension is always linearly separable
        for output, prediction in zip(outputs, predictions):
            self.assertEqual(output, prediction)
项目:instacart-basket-prediction    作者:colinmorris    | 项目源码 | 文件源码
def get_probmap(model, sess):
  """{uid -> {pid -> prob}}"""
  # Start a fresh pass through the validation data
  sess.run(model.dataset.new_epoch_op())
  pmap = defaultdict(dict)
  i = 0
  nseqs = 0
  to_fetch = [model.lastorder_logits, model.dataset['uid'], model.dataset['pid']]
  while 1:
    try:
      final_logits, uids, pids = sess.run(to_fetch)
    except tf.errors.OutOfRangeError:
      break
    batch_size = len(uids)
    nseqs += batch_size
    final_probs = expit(final_logits)
    for uid, pid, prob in zip(uids, pids, final_probs):
      pmap[uid][pid] = prob
    i += 1
  tf.logging.info("Computed probabilities for {} users over {} sequences in {} batches".format(
    len(pmap), nseqs, i
    ))
  return pmap
项目:instacart-basket-prediction    作者:colinmorris    | 项目源码 | 文件源码
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--tag', default='pairs')
  parser.add_argument('--fold', default='test')
  args = parser.parse_args()

  #metavec = load_metavectors(args.fold)

  clf = train.load_model(args.tag)
  X, y = vectorize.load_fold(args.fold, args.tag)

  if hasattr(clf, 'predict_proba'):
    probs = clf.predict_proba(X)
    # returns an array of shape (n, 2), where each len-2 subarray
    # has the probability of the negative and positive classes. which is silly.
    probs = probs[:,1]
  else:
    scores = clf.decision_function(X)
    probs = expit(scores)

  pdict = pdictify(probs, args.fold)
  common.save_pdict_for_tag(args.tag, pdict, args.fold)
项目:cebl    作者:idfah    | 项目源码 | 文件源码
def logistic(x, prime=0):
    if prime == 0:
        ##v = np.empty_like(x)
        ##mask = x < 0.0

        ##zl = np.exp(x[mask])
        ##zl = 1.0 / (1.0 + zl)
        ##v[mask] = zl

        ##zh = np.exp(-x[~mask])
        ##zh = zh / (1.0 + zh)
        ##v[~mask] = zh

        v = sps.expit(x)

        return v

    elif prime == 1:
        return logistic(x) * (1.0 - logistic(x))

    else:
        raise NotImplementedError('%d order derivative not implemented.' % int(prime))
项目:ShallowLearn    作者:giacbrd    | 项目源码 | 文件源码
def train_cbow_pair_softmax(model, target, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
        neu1e = zeros(l1.shape)

        target_vect = zeros(model.syn1neg.shape[0])
        target_vect[target.index] = 1.
        l2 = copy(model.syn1neg)
        fa = expit(dot(l1, l2.T))  # propagate hidden -> output
        ga = (target_vect - fa) * alpha  # vector of error gradients multiplied by the learning rate
        if learn_hidden:
            model.syn1neg += outer(ga, l1)  # learn hidden -> output
        neu1e += dot(ga, l2)  # save error

        if learn_vectors:
            # learn input -> hidden, here for all words in the window separately
            if not model.cbow_mean and input_word_indices:
                neu1e /= len(input_word_indices)
            for i in input_word_indices:
                model.wv.syn0[i] += neu1e * model.syn0_lockf[i]

        return neu1e
项目:ShallowLearn    作者:giacbrd    | 项目源码 | 文件源码
def score_cbow_labeled_pair(model, targets, l1):
        if model.hs:
            prob = []
            # FIXME this cycle should be executed internally in numpy
            for target in targets:
                l2a = model.syn1[target.point]
                sgn = (-1.0) ** target.code  # ch function, 0-> 1, 1 -> -1
                prob.append(prod(expit(sgn * dot(l1, l2a.T))))
        # Softmax
        else:
            def exp_dot(x):
                return exp(dot(l1, x.T))

            prob_num = exp_dot(model.syn1neg[[t.index for t in targets]])
            prob_den = np_sum(apply_along_axis(exp_dot, 1, model.syn1neg))
            prob = prob_num / prob_den
        return prob
项目:Steal-ML    作者:ftramer    | 项目源码 | 文件源码
def temp_log_loss(w, X, Y, alpha):
    n_classes = Y.shape[1]
    w = w.reshape(n_classes, -1)
    intercept = w[:, -1]
    w = w[:, :-1]
    z = safe_sparse_dot(X, w.T) + intercept

    denom = expit(z)
    #print denom
    #print denom.sum()
    denom = denom.sum(axis=1).reshape((denom.shape[0], -1))
    #print denom
    p = log_logistic(z)

    loss = - (Y * p).sum()
    loss += np.log(denom).sum()
    loss += 0.5 * alpha * squared_norm(w)

    return loss
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def logistic_regression(x, t, w, eps=1e-2, max_iter=int(1e3)):
    N = x.shape[1]
    Phi = np.vstack([np.ones(N), phi(x)]).T

    for k in range(max_iter):
        y = expit(Phi.dot(w))
        R = np.diag(np.ones(N) * (y * (1 - y)))
        H = Phi.T.dot(R).dot(Phi)
        g = Phi.T.dot(y - t)

        w_new = w - linalg.solve(H, g)

        diff = linalg.norm(w_new - w) / linalg.norm(w)
        if (diff < eps):
            break

        w = w_new
        print('{0:5d} {1:10.6f}'.format(k, diff))

    return w
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def transform(self, y):

        yexpit = expit(self.scale * y)

        return yexpit
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def make_image_data():
    """Make some simple data."""
    N = 100
    M = 3
    # N 28x28 RGB float images
    x = expit(RAND.randn(N, 28, 28, 3)).astype(np.float32)
    w = np.linspace(-2.5, 2.5, 28*28*3)
    Y = np.dot(x.reshape(-1, 28*28*3), w) + RAND.randn(N, 1)
    X = tf.tile(tf.expand_dims(x, 0), [M, 1, 1, 1, 1])
    return x, Y, X
项目:diamond    作者:stitchfix    | 项目源码 | 文件源码
def predict(self, new_df):
        """ Use estimated coefficients to make predictions on new data

        Args:
            new_df (DataFrame). DataFrame to make predictions on.
        Returns:
            array-like. Predictions on the response scale, i.e. probabilities
        """
        return expit(super(LogisticRegression, self).predict(new_df))
项目:diamond    作者:stitchfix    | 项目源码 | 文件源码
def l2_clogistic_llh(X, Y, alpha, beta, penalty_matrix, offset):
    """ Penalized log likelihood function for proportional odds cumulative logit model

    Args:
        X : array_like. design matrix
        Y : array_like. response matrix
        alpha : array_like. intercepts.\
        must have shape == one less than the number of columns of `Y`
        beta : array_like. parameters.\
        must have shape == number of columns of X
        penalty_matrix : array_like. Regularization matrix
        offset : array_like, optional. Defaults to 0
    Returns:
        scalar : penalized loglikelihood
    """
    offset = 0.0 if offset is None else offset
    obj = 0.0
    J = Y.shape[1]
    Xb = dot(X, beta) + offset
    for j in range(J):
        if j == 0:
            obj += dot(np.log(expit(alpha[j] + Xb)), Y[:, j])
        elif j == J - 1:
            obj += dot(np.log(1 - expit(alpha[j - 1] + Xb)), Y[:, j])
        else:
            obj += dot(np.log(expit(alpha[j] + Xb) - expit(alpha[j - 1] + Xb)), Y[:, j])
    obj -= 0.5 * dot(beta, dot(penalty_matrix, beta))
    return -np.inf if np.isnan(obj) else obj
项目:diamond    作者:stitchfix    | 项目源码 | 文件源码
def _l2_clogistic_gradient_IL(X, alpha, beta, offset=None, **kwargs):
    """ Helper function for calculating the cumulative logistic gradient. \
        The inverse logit of alpha[j + X*beta] is \
        ubiquitous in gradient and Hessian calculations \
        so it's more efficient to calculate it once and \
        pass it around as a parameter than to recompute it every time

    Args:
        X : array_like. design matrix
        alpha : array_like. intercepts. must have shape == one less than the number of columns of `Y`
        beta : array_like. parameters. must have shape == number of columns of X
        offset : array_like, optional. Defaults to 0
        n : int, optional.\
        You must specify the number of rows if there are no main effects
    Returns:
        array_like. n x J-1 matrix where entry i,j is the inverse logit of (alpha[j] + X[i, :] * beta)
    """
    J = len(alpha) + 1
    if X is None:
        n = kwargs.get("n")
    else:
        n = X.shape[0]
    if X is None or beta is None:
        Xb = 0.
    else:
        Xb = dot(X, beta) + (0 if offset is None else offset)
    IL = np.zeros((n, J - 1))
    for j in range(J - 1):
        IL[:, j] = expit(alpha[j] + Xb)
    return IL
项目:pyglmnet    作者:glm-tools    | 项目源码 | 文件源码
def _grad_mu(distr, z, eta):
    """Derivative of the non-linearity."""
    if distr in ['softplus', 'gamma']:
        grad_mu = expit(z)
    elif distr == 'poisson':
        grad_mu = z.copy()
        grad_mu[z > eta] = np.ones_like(z)[z > eta] * np.exp(eta)
        grad_mu[z <= eta] = np.exp(z[z <= eta])
    elif distr == 'gaussian':
        grad_mu = np.ones_like(z)
    elif distr == 'binomial':
        grad_mu = expit(z) * (1 - expit(z))
    elif distr in 'probit':
        grad_mu = norm.pdf(z)
    return grad_mu
项目:dl4nlp    作者:yohokuno    | 项目源码 | 文件源码
def neural_network_cost_gradient(parameters, input, output):
    """
    3-layer network cost and gradient function
    :param parameters: pair of (W1, W2)
    :param input: input vector
    :param output: index to correct label
    :return: cross entropy cost and gradient
    """
    W1, W2 = parameters
    input = input.reshape(-1, 1)

    hidden_layer = expit(W1.dot(input))
    inside_softmax = W2.dot(hidden_layer)

    # TODO: allow softmax to normalize column vector
    prediction = softmax(inside_softmax.reshape(-1)).reshape(-1, 1)
    cost = -np.sum(np.log(prediction[output]))

    one_hot = np.zeros_like(prediction)
    one_hot[output] = 1
    delta = prediction - one_hot
    gradient_W2 = delta.dot(hidden_layer.T)
    gradient_W1 = sigmoid_gradient(hidden_layer) * W2.T.dot(delta).dot(input.T)

    gradient = [gradient_W1, gradient_W2]
    return cost, gradient
项目:dl4nlp    作者:yohokuno    | 项目源码 | 文件源码
def test_sigmoid(self):
        x = np.array([[1, 2], [-1, -2]])
        f = expit(x)
        g = sigmoid_gradient(f)
        expected = np.array([[0.73105858,  0.88079708],
                    [0.26894142,  0.11920292]])
        self.assertNumpyEqual(expected, f)

        expected = np.array([[0.19661193,  0.10499359],
                    [0.19661193,  0.10499359]])
        self.assertNumpyEqual(expected, g)
项目:dl4nlp    作者:yohokuno    | 项目源码 | 文件源码
def test_logistic_regression(self):
        input = np.random.uniform(-10.0, 10.0, size=10)
        output = np.random.randint(0, 2)

        def logistic_regression_wrapper(parameters):
            return logistic_regression_cost_gradient(parameters, input, output)

        initial_parameters = np.random.normal(scale=1e-5, size=10)
        result = gradient_check(logistic_regression_wrapper, initial_parameters)
        self.assertEqual([], result)

        # Train logistic regression and see if it predicts correct label
        final_parameters, cost_history = gradient_descent(logistic_regression_wrapper, initial_parameters, 100)
        prediction = expit(np.dot(input, final_parameters)) > 0.5
        self.assertEqual(output, prediction)
项目:dl4nlp    作者:yohokuno    | 项目源码 | 文件源码
def test_gradient_check_sigmoid(self):
        def sigmoid_check(x):
            return expit(x), sigmoid_gradient(expit(x))

        x = np.array(0.0)
        result = gradient_check(sigmoid_check, x)
        self.assertEqual([], result)
项目:instacart-basket-prediction    作者:colinmorris    | 项目源码 | 文件源码
def b_and_a(feat, val):
    before, after = alt(feat, val)
    print 'Setting {} to {}'.format(feat, val)
    delta = after - before
    print 'Logits: {:.2f} -> {:.2f} ({}{:.2f})'.format(before, after,
        ('+' if delta >= 0 else ''), delta
        )
    print 'Prob: {:.3f} -> {:.3f}'.format(expit(before), expit(after))
项目:instacart-basket-prediction    作者:colinmorris    | 项目源码 | 文件源码
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('tags', nargs='+')
  parser.add_argument('--dest-tag', default='stacked', 
      help='Tag for generated pdict (default: "stacked")')
  parser.add_argument('--fold', default='test')
  args = parser.parse_args()

  metavec = load_metavectors(args.fold)

  #clf = train.load_model()
  clf = joblib.load('model.pkl')

  with time_me('Vectorized fold {}'.format(args.fold)):
    # TODO: this fn is not a thing?
    X, y = train.vectorize_fold(args.fold, args.tags, metavec)

  if hasattr(clf, 'predict_proba'):
    probs = clf.predict_proba(X)
    # returns an array of shape (n, 2), where each len-2 subarray
    # has the probability of the negative and positive classes. which is silly.
    probs = probs[:,1]
  else:
    scores = clf.decision_function(X)
    probs = expit(scores)

  pdict = pdictify(probs, metavec)
  common.save_pdict_for_tag(args.dest_tag, pdict, args.fold)
项目:vec4ir    作者:lgalke    | 项目源码 | 文件源码
def delta(u, v):
    """ cosine ° sigmoid
    >>> delta([0.2], [0.3])
    0.5
    >>> delta([0.3], [0.2])
    0.5
    >>> delta([0.1,0.9], [-0.9,0.1]) == delta([-0.9,0.1], [0.1,0.9])
    True
    """
    # TODO scale with a and c
    return expit(cosine(u, v))
项目:vec4ir    作者:lgalke    | 项目源码 | 文件源码
def delta(X, Y, n_jobs=-1, a=1, c=0):
    """Pairwise delta function: cosine and sigmoid

    :X: TODO
    :returns: TODO

    """
    D = pairwise_distances(X, Y, metric="cosine", n_jobs=n_jobs)
    if c != 0:
        D -= c
    if a != 1:
        D *= a
    D = expit(D)
    return D
项目:HackCU-Machine-Learning    作者:spencer-hanson    | 项目源码 | 文件源码
def hypothesisFunc(theta, x):
    hx = expit(np.dot(x, theta));
    return hx;
项目:ML_algorithm    作者:luoshao23    | 项目源码 | 文件源码
def logistic(X):
    return logistic_sigmoid(X, out=X)
项目:Tethys    作者:JosePedroMatos    | 项目源码 | 文件源码
def _activate(self, X, layer):
        if self.activationFuns[layer]=='log':
            return expit(X)
        elif self.activationFuns[layer]=='tan':
            return 2 / (1 + np.exp(-2*X)) - 1
        else:
            return X
项目:ML-DS_practice    作者:PiyKat    | 项目源码 | 文件源码
def H(initTheta,X):
    # X1 = FeatureScaling(X) 
    hypothesis = expit(np.dot(X,initTheta))
    return hypothesis
项目:Steal-ML    作者:ftramer    | 项目源码 | 文件源码
def logistic_loss(w, X, Y, alpha):
    """
    Implementation of the logistic loss function when Y is a probability
    distribution.

    loss = -SUM_i SUM_k y_ik * log(P[yi == k]) + alpha * ||w||^2
    """
    n_classes = Y.shape[1]
    n_features = X.shape[1]
    intercept = 0

    if n_classes > 2:
        fit_intercept = w.size == (n_classes * (n_features + 1))
        w = w.reshape(n_classes, -1)
        if fit_intercept:
            intercept = w[:, -1]
            w = w[:, :-1]
    else:
        fit_intercept = w.size == (n_features + 1)
        if fit_intercept:
            intercept = w[-1]
            w = w[:-1]

    z = safe_sparse_dot(X, w.T) + intercept

    if n_classes == 2:
        # in the binary case, simply compute the logistic function
        p = np.vstack([log_logistic(-z), log_logistic(z)]).T
    else:
        # compute the logistic function for each class and normalize
        denom = expit(z)
        denom = denom.sum(axis=1).reshape((denom.shape[0], -1))
        p = log_logistic(z)
        loss = - (Y * p).sum()
        loss += np.log(denom).sum()  # Y.sum() = 1
        loss += 0.5 * alpha * squared_norm(w)
        return loss

    loss = - (Y * p).sum() + 0.5 * alpha * squared_norm(w)
    return loss
项目:Steal-ML    作者:ftramer    | 项目源码 | 文件源码
def logistic_grad_bin(w, X, Y, alpha):
    """
    Implementation of the logistic loss gradient when Y is a binary probability
    distribution.
    """
    grad = np.empty_like(w)
    n_classes = Y.shape[1]
    n_features = X.shape[1]
    fit_intercept = w.size == (n_features + 1)

    if fit_intercept:
        intercept = w[-1]
        w = w[:-1]
    else:
        intercept = 0

    z = safe_sparse_dot(X, w.T) + intercept

    _, n_features = X.shape
    z0 = - (Y[:, 1] + (expit(-z) - 1))

    grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w

    if fit_intercept:
        grad[-1] = z0.sum()

    return grad.flatten()
项目:Steal-ML    作者:ftramer    | 项目源码 | 文件源码
def predict(self, x):
        _x = np.ones((x.shape[0], x.shape[1] + 1))
        _x[:, : - 1] = x
        score = expit(np.inner(self.w, _x))
        signs = np.sign(score - .5)
        return [0 if x == -1 else 1 for x in signs]
项目:othello-rl    作者:jpypi    | 项目源码 | 文件源码
def activation(x):
    #return expit(x)
    ##return 1.7159 * math.tanh(2/3*x)
    #print(x)

    return np.tanh(x)#list(map(math.tanh, x))
    #return np.multiply(x > 0, x)
项目:othello-rl    作者:jpypi    | 项目源码 | 文件源码
def dactivation(x):
    #v = expit(x)
    #return v*(1-v)
    #return 1 - math.tanh(x)**2

    return 1 - np.tanh(x)**2#list(map(lambda y: 1 - math.tanh(y)**2, x))
    #return np.float64(x > 0)
项目:nonce2vec    作者:minimalparts    | 项目源码 | 文件源码
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
    neu1e = zeros(l1.shape)

    if model.hs:
        l2a = model.syn1[word.point]  # 2d matrix, codelen x layer1_size
        fa = expit(dot(l1, l2a.T))  # propagate hidden -> output
        ga = (1. - word.code - fa) * alpha  # vector of error gradients multiplied by the learning rate
        if learn_hidden:
            model.syn1[word.point] += outer(ga, l1)  # learn hidden -> output
        neu1e += dot(ga, l2a)  # save error

    if model.negative:
        # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
        word_indices = [word.index]
        while len(word_indices) < model.negative + 1:
            w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
            if w != word.index:
                word_indices.append(w)
        l2b = model.syn1neg[word_indices]  # 2d matrix, k+1 x layer1_size
        fb = expit(dot(l1, l2b.T))  # propagate hidden -> output
        gb = (model.neg_labels - fb) * alpha  # vector of error gradients multiplied by the learning rate
        if learn_hidden:
            model.syn1neg[word_indices] += outer(gb, l1)  # learn hidden -> output
        neu1e += dot(gb, l2b)  # save error

    if learn_vectors:
        # learn input -> hidden, here for all words in the window separately
        if not model.cbow_mean and input_word_indices:
            neu1e /= len(input_word_indices)
        for i in input_word_indices:
            model.wv.syn0[i] += neu1e * model.syn0_lockf[i]

    return neu1e
项目:RBM_AE    作者:lingxuez    | 项目源码 | 文件源码
def predict_x_mean(self, noisy_data_x, noise_prob=0):
        """
        Calculate the predicted mean given input data_x.
        :param data_x: binary input with dimension (dim_input, 1)
        """
        ## hidden layer
        h = expit(self.bias_hidden + self.W.dot(noisy_data_x))
        ## predicted x
        x_mean = expit(self.bias_input + self.W.transpose().dot(h))

        return (h, x_mean)
项目:RBM_AE    作者:lingxuez    | 项目源码 | 文件源码
def activate(aValue):
        """
        activate function: sigmoid
        g(a) = 1/(1+exp(-a)); same dimension as aValue
        """
        return special.expit(aValue)
项目:RBM_AE    作者:lingxuez    | 项目源码 | 文件源码
def energy_gradient(self, x):
        """
        Calculate the (estimated) gradient of energy E(h, x) at given x,
        with respect to W, bias_input, bias_hidden at current values.
        :param x: input vector with shape (dim_input, 1)
        """
        h_mean = expit(self.bias_hidden + self.W.dot(x))
        grad_W = -h_mean.dot(x.transpose())
        grad_bias_input = -x
        grad_bias_hidden = -h_mean

        return (grad_W, grad_bias_input, grad_bias_hidden)
项目:RBM_AE    作者:lingxuez    | 项目源码 | 文件源码
def gibbs_sample_h(self, x):
        """
        Sample a new h from p(h|x) using current parameters.
        :param x: shape (dim_input, 1)
        :return: shape (dim_hidden, 1)
        """
        h_mean = expit(self.bias_hidden + self.W.dot(x))
        return self.bernoulli(h_mean)
项目:RBM_AE    作者:lingxuez    | 项目源码 | 文件源码
def gibbs_sample_x(self, h):
        """
        Sample a new x from p(x|h) using current parameters.
        :param h: shape (dim_hidden, 1)
        :return: shape (dim_input, 1)
        """
        x_mean = expit(self.bias_input + self.W.transpose().dot(h))
        return self.bernoulli(x_mean)
项目:loglizer    作者:logpai    | 项目源码 | 文件源码
def computeWeight(rawData,numEvents):
    # to avoid the case that a term never occurs in a document, we add 1 to the cnt
    numLines,numEvents=rawData.shape
    weightedData=np.zeros((numLines,numEvents),float)
    for j in range(numEvents):
        cnt = np.count_nonzero(rawData[:,j])
        for i in range(numLines):
            weight = 0.5 * expit(math.log(numLines/float(cnt)))
            weightedData[i,j] = rawData[i,j] * weight
    print('weighted data size is',weightedData.shape)
    return weightedData
项目:loglizer    作者:logpai    | 项目源码 | 文件源码
def computeWeight(rawData):
    # to avoid the case that a term never occurs in a document, we add 1 to the cnt
    numLines,numEvents=rawData.shape
    weightedData=np.zeros((numLines,numEvents),float)
    for j in range(numEvents):
        cnt = np.count_nonzero(rawData[:,j])
        for i in range(numLines):
            weight = 0.5 * expit(math.log(numLines/float(cnt)))
            weightedData[i,j] = rawData[i,j] * weight
    print('weighted data size is',weightedData.shape)
    return weightedData
项目:pulse2percept    作者:uwescience    | 项目源码 | 文件源码
def stationary_nonlinearity(self, stim):
        """Stationary nonlinearity

        Nonlinearly rescale a temporal signal `stim` across space and time,
        based on a sigmoidal function dependent on the maximum value of `stim`.
        This is Box 4 in Nanduri et al. (2012).
        The parameter values of the asymptote, slope, and shift of the logistic
        function are given by self.asymptote, self.slope, and self.shift,
        respectively.

        Parameters
        ----------
        stim: array
           Temporal signal to process, stim(r, t) in Nanduri et al. (2012).

        Returns
        -------
        Rescaled signal, b4(r, t) in Nanduri et al. (2012).

        Notes
        -----
        Conversion to TimeSeries is avoided for the sake of speedup.
        """
        # use expit (logistic) function for speedup
        sigmoid = ss.expit((stim.max() - self.shift) / self.slope)
        return stim * sigmoid
项目:prml    作者:Yevgnen    | 项目源码 | 文件源码
def sigmoid(a):
    h = expit(a)

    return h, h * (1 - h)
项目:nodeembedding-to-communityembedding    作者:andompesta    | 项目源码 | 文件源码
def gradient_update(positive_node_embedding, negative_nodes_embedding, neg_labels, _alpha):
        '''
          Perform stochastic gradient descent of the first and second order embedding.
          NOTE: using the cython implementation (fast_community_sdg_X) is much more fast
        '''
        fb = sigmoid(np.dot(positive_node_embedding, negative_nodes_embedding.T))  #  propagate hidden -> output
        gb = (neg_labels - fb) * _alpha# vector of error gradients multiplied by the learning rate
        return gb
项目:nodeembedding-to-communityembedding    作者:andompesta    | 项目源码 | 文件源码
def loss(self, model, edges):
        ret_loss = 0
        for edge in prepare_sentences(model, edges):
            assert len(edge) == 2, "edges have to be done by 2 nodes :{}".format(edge)
            ret_loss -= np.log(sigmoid(np.dot(model.node_embedding[edge[1].index], model.node_embedding[edge[0].index].T)))
        return ret_loss
项目:MLAlgorithms    作者:rushter    | 项目源码 | 文件源码
def grad(self, actual, predicted):
        return actual * expit(-actual * predicted)
项目:MLAlgorithms    作者:rushter    | 项目源码 | 文件源码
def hess(self, actual, predicted):
        expits = expit(predicted)
        return expits * (1 - expits)
项目:MLAlgorithms    作者:rushter    | 项目源码 | 文件源码
def transform(self, output):
        # Apply logistic (sigmoid) function to the output
        return expit(output)
项目:singing_horse    作者:f0k    | 项目源码 | 文件源码
def sigmoid(x, out):
        if out is not x:
            out[:] = x
        np.negative(out, out)
        np.exp(out, out)
        out += 1
        np.reciprocal(out, out)
        return out