本文整理汇总了Python中utils.sigmoid函数的典型用法代码示例。如果您正苦于以下问题:Python sigmoid函数的具体用法?Python sigmoid怎么用?Python sigmoid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sigmoid函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
def forward(self, x_t, h_tm1, c_tm1):
i_t = sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tm1, self.W_hi) + c_tm1 * self.W_ci)
f_t = sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tm1, self.W_hf) + c_tm1 * self.W_cf)
c_t = f_t * c_tm1 + i_t * self.activation(T.dot(x_t, self.W_xc) + T.dot(h_tm1, self.W_hc))
o_t = sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tm1, self.W_ho) + c_t * self.W_co)
h_t = o_t * self.activation(c_t)
return h_t, c_t
示例2: logistic_predict
def logistic_predict(weights, data):
"""
Compute the probabilities predicted by the logistic classifier.
Note: N is the number of examples and
M is the number of features per example.
Inputs:
weights: (M+1) x 1 vector of weights, where the last element
corresponds to the bias (intercepts).
data: N x M data matrix where each row corresponds
to one data point.
Outputs:
y: :N x 1 vector of probabilities. This is the output of the classifier.
"""
# In case of MNIST classification of 4 and 9, output will be integer values
# TODO: Finish this function
N, M = data.shape
y = [0]*N
for i in range(0, N):
z = 0
for j in range(0, M):
z = z + weights[j] * data[i,j] + weights[-1]
y[i] = sigmoid(z)
#iprint 'z y[i]', z, y[i]
augdata = np.ones((N, M+1))
augdata[:, :-1] = data
z = np.dot(augdata, weights) # z is N x 1
y = sigmoid(z)
return y
示例3: grad_loss
def grad_loss(self, *args):
"""
Compute the gradient logistic loss function
Inputs:
- X: N x D array of data; each row is a data point.
- y: 1-dimensional array of length N with real values.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.theta; an array of the same shape as theta
"""
theta,X,y,reg = args
m,dim = X.shape
grad = np.zeros((dim,))
##########################################################################
# Compute the gradient of the loss function for unregularized logistic #
# regression #
# TODO: 1 line of code expected #
##########################################################################
grad = X.T.dot((utils.sigmoid(X.dot(theta)) - y)) / m + reg * theta / m
grad[0] = X[:, :1].T.dot((utils.sigmoid(X.dot(theta)) - y)) / m
###########################################################################
# END OF YOUR CODE #
###########################################################################
return grad
示例4: cost
def cost(self, theta1, theta2):
z1 = np.dot(self.train, theta1)
a2 = utils.sigmoid(z1)
a2 = np.append(np.ones((a2.shape[0],1)), a2, 1)
z2 = np.dot(a2, theta2)
h = utils.sigmoid(z2)
return -sum(sum(self.goal*np.log(h) + (1-self.goal)*np.log(1-h)))/self.m
示例5: _step
def _step(x_t, ct_1, ht_1, Wi, Wf, Wo, Wc, Whi, Whf, Who, Whc, bi, bf, bo, bc):
i = sigmoid(T.dot(x_t, Wi) + T.dot(ht_1, Whi) + bi)
f = sigmoid(T.dot(x_t, Wf) + T.dot(ht_1, Whf) + bf)
o = sigmoid(T.dot(x_t, Wo) + T.dot(ht_1, Who) + bo)
c = tanh(T.dot(x_t, Wc) + T.dot(ht_1, Whc) + bc)
c_new = i * c + f * ct_1
h_new = o * tanh(c_new)
return c_new, h_new
示例6: _step
def _step(x_t, ct_1, ht_1, W, Wh, b, dim):
tmp = T.dot(x_t, W) + T.dot(ht_1, Wh) + b
i = sigmoid(_slice(tmp, 0, dim))
f = sigmoid(_slice(tmp, 1, dim))
o = sigmoid(_slice(tmp, 2, dim))
c = tanh(_slice(tmp, 3, dim))
c_new = i * c + f * ct_1
h_new = o * tanh(c_new)
return c_new, h_new
示例7: predict
def predict(self, newData=None):
if newData is None:
newData = self.train
else:
newData = np.append(np.ones((newData.shape[0],1)), newData, 1)
z = utils.sigmoid(np.dot(newData, self.inputWeight))
z = np.append(np.ones((z.shape[0],1)), z, 1)
digitProb = utils.sigmoid(np.dot(z, self.hiddenWeight))
return np.argmax(digitProb,1)
示例8: _step_index
def _step_index(x_t, ct_1, ht_1, Wi, Wf, Wo, Wc, Whi, Whf, Who, Whc, bi, bf, bo, bc):
# x_t: array of type int32
# use indexing on Wi, Wf, Wo and Wc matrices instead of computing the product with the one-hot representation of the input for computational and memory efficiency
i = sigmoid(Wi[x_t] + T.dot(ht_1, Whi) + bi)
f = sigmoid(Wf[x_t] + T.dot(ht_1, Whf) + bf)
o = sigmoid(Wo[x_t] + T.dot(ht_1, Who) + bo)
c = tanh(Wc[x_t] + T.dot(ht_1, Whc) + bc)
c_new = i * c + f * ct_1
h_new = o * tanh(c_new)
return c_new, h_new
示例9: _step_index
def _step_index(x_t, ct_1, ht_1, W, Wh, b, dim):
# x_t: array of type int32
# use indexing on W matrix instead of computing dot product with the one-hot representation of the input for computational and memory efficiency
tmp = W[x_t] + T.dot(ht_1, Wh) + b
i = sigmoid(_slice(tmp, 0, dim))
f = sigmoid(_slice(tmp, 1, dim))
o = sigmoid(_slice(tmp, 2, dim))
c = tanh(_slice(tmp, 3, dim))
c_new = i * c + f * ct_1
h_new = o * tanh(c_new)
return c_new, h_new
示例10: forward
def forward(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = identity_function(a3)
return y
示例11: get_reconstruction_cross_entropy
def get_reconstruction_cross_entropy(self):
pre_sigmoid_activation_h = numpy.dot(self.input, self.W) + self.hbias
sigmoid_activation_h = sigmoid(pre_sigmoid_activation_h)
pre_sigmoid_activation_v = numpy.dot(sigmoid_activation_h, self.W.T) + self.vbias
sigmoid_activation_v = sigmoid(pre_sigmoid_activation_v)
cross_entropy = -numpy.mean(
numpy.sum(self.input * numpy.log(sigmoid_activation_v) +
(1 - self.input) * numpy.log(1 - sigmoid_activation_v), axis=1))
return cross_entropy
示例12: _CD1
def _CD1(self, visible_data, weights, visible_bias, hidden_bias):
N = np.shape(visible_data)[0]
# Positive phase
visible_state = visible_data
if self.visible_type == "SIGMOID" :
visible_state = self._samplebinary(visible_state)
elif self.visible_type == "LINEAR" :
visible_state = self._add_gaussian_noise(visible_state);
nw = np.dot(visible_state, weights) + np.tile(hidden_bias, (N, 1))
if self.hidden_type == "SIGMOID":
hidden_probability = u.sigmoid(nw)
hidden_state = self._samplebinary(hidden_probability)
elif self.hidden_type == "LINEAR":
hidden_state = self._add_gaussian_noise(nw)
gradient1 = self._gradient_weights(visible_state, hidden_state, weights)
visible_biases1 = self._gradient_biases(visible_state, visible_bias)
hidden_biases1 = self._gradient_biases(hidden_state, hidden_bias)
# Negative phase
# Skip sampling as well...
visible_state = np.dot(hidden_state, weights.T) + np.tile(visible_bias, (N, 1))
if self.visible_type == "SIGMOID":
visible_state = u.sigmoid(visible_state)
#visible_probability = u.sigmoid(visible_state)
#visible_state = self._samplebinary(visible_probability)
# skip sampling here
nw = np.dot(visible_state, weights) + np.tile(hidden_bias, (N, 1))
if self.hidden_type == "SIGMOID":
hidden_probability = hidden_probability = u.sigmoid(nw)
hidden_state = hidden_probability
elif self.hidden_type == "LINEAR" :
hidden_state = nw
gradient2 = self._gradient_weights(visible_state, hidden_state, weights)
visible_biases2 = self._gradient_biases(visible_state, visible_bias)
hidden_biases2 = self._gradient_biases(hidden_state, hidden_bias)
# gradients
weights = gradient1 - gradient2;
visible_biases = visible_biases1 - visible_biases2;
hidden_biases= hidden_biases1 - hidden_biases2;
return weights, visible_biases, hidden_biases
示例13: train
def train(set_, dimension, lambda_):
temp_w = np.zeros(dimension)
w0 = 0.
w = temp_w
# print ("Lambda: " + str(lambda_))
prev_error = 0.
h = [0.5] * len(set_)
current_error = calc_error(set_, w, lambda_, h, dimension)
# num_iter = 0
while abs(current_error - prev_error) > 0.001:
delta_w0 = 0.
delta_w = np.zeros(dimension)
# print ("Current error: " + str(current_error))
for i in range(len(set_)):
h = utils.sigmoid(np.dot(set_[i][1], w) + w0)
y = set_[i][0]
delta_w0 += float(h) - y
temp_x = (float(h) - y) * set_[i][1]
delta_w = delta_w + temp_x
n, error, w, w0 = line_search(set_, dimension, lambda_, w, w0,
delta_w, delta_w0, current_error)
# print ("Line search result: ")
# print (str(w0) + " " + str(w))
if n == 0:
break
# num_iter += 1
prev_error = current_error
current_error = error
# print (current_error)
# print ("Num iter: " + str(num_iter))
# print ("params found: " + str(w0) + str(w))
# print ("-------------------------------")
# print()
h = [float(utils.sigmoid(np.dot(tup[1], w) + w0))
for tup in set_]
return w, w0, calc_error(set_, w, 0, h, dimension)
示例14: minibatch_update
def minibatch_update(self,x,y,lr,regularization):
n_sample = x.shape[0]
info = x
hidden_cache = []
for i in xrange(self.n_hidden + 1):
if i == self.n_hidden:
probs = softmax(info.dot(self.W[i]) + self.b[i])
else:
info = sigmoid(info.dot(self.W[i]) + self.b[i])
hidden_cache.append(info)
loss = neg_log_likelihood(probs,y)
probs[np.arange(n_sample),y] -= 1.0
errors = probs
for i in range(self.n_hidden,-1,-1):
if i >= 1:
hidden_out = hidden_cache[i - 1]
grad_hidden_out = errors.dot(self.W[i].T)
self.W[i] -= (lr * (hidden_out.T).dot(errors) + regularization * self.W[i])
self.b[i] -= lr * np.sum(errors,axis = 0)
errors = hidden_out * (1 - hidden_out) * grad_hidden_out
else:
hidden_out = x
self.W[i] -= (lr * (hidden_out.T).dot(errors) + regularization * self.W[i])
self.b[i] -= lr * np.sum(errors,axis = 0)
return loss
示例15: loss
def loss(self, *args):
"""
Compute the logistic loss function
Inputs:
- X: N x D array of data; each row is a data point.
- y: 1-dimensional array of length N with real values.
Returns: loss as a single float
"""
theta,X,y = args
m,dim = X.shape
J = 0
##########################################################################
# Compute the loss function for unregularized logistic regression #
# TODO: 1-2 lines of code expected #
##########################################################################
hx = utils.sigmoid(X.dot(theta))
J = -1*(np.log(hx).T.dot(y)+(np.log(1-hx)).T.dot(1-y)) / m
###########################################################################
# END OF YOUR CODE #
###########################################################################
return J