本文整理汇总了Python中minpy.numpy.sum函数的典型用法代码示例。如果您正苦于以下问题:Python sum函数的具体用法?Python sum怎么用?Python sum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: softmax_loss
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Either of the followings:
- One hot encoding of labels, of shape (N, C)
- Label index of shape (N, ), each y[i] is the label of i^th example
(0 <= y[i] < C)
Returns a tuple of:
- loss: Scalar giving the loss
"""
N = x.shape[0]
C = x.shape[1]
if len(y.shape) == 1:
#convert it to one hot encoding
onehot_y = np.zeros([N, C])
np.onehot_encode(y, onehot_y)
else:
onehot_y = y
probs = x - np.max(x, axis=1, keepdims=True)
loss = -np.sum(probs * onehot_y) / N
loss += np.sum(np.log(np.sum(np.exp(probs), axis=1, keepdims=True))) / N
return loss
示例2: svm_loss
def svm_loss(x, y, mode):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
if mode == 'cpu':
np.set_policy(policy.OnlyNumpyPolicy())
else:
np.set_policy(policy.PreferMXNetPolicy())
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
#margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins = np.maximum(0, x - np.expand_dims(correct_class_scores, axis = 1) + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
示例3: softmax_loss
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
#np.expand_dims(correct_class_scores, axis = 1)
#probs = np.exp(x - np.max(x, axis=1, keepdims=True))
#print "x.shape", x.shape
#Somehow Buggy. Max doesn't work.
probs = np.exp(x - np.max(x, axis=1))
#probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
probs /= np.expand_dims(np.sum(probs, axis=1), axis = 1)
N = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
示例4: svm_loss
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
#TODO: Support broadcast case: (X,) (X, Y)
#shape(x) is (d0, d1)
#shape(correct_class_scores) is (d0,)
#margins = np.maximum(0, x - correct_class_scores + 1.0)
margins = np.transpose(np.maximum(0, np.transpose(x) - np.transpose(correct_class_scores) + 1.0))
loss = (np.sum(margins) - np.sum(margins[np.arange(N), y])) / N
return loss
示例5: quick_grad_check
def quick_grad_check(fun, arg0, extra_args=(), kwargs={}, verbose=True,
eps=EPS, rtol=RTOL, atol=ATOL, rs=None):
"""Checks the gradient of a function (w.r.t. to its first arg) in a random direction"""
if verbose:
print("Checking gradient of {0} at {1}".format(fun, arg0))
if rs is None:
rs = nnp.random.RandomState()
random_dir = rs.standard_normal(nnp.shape(arg0))
random_dir = random_dir / nnp.sqrt(nnp.sum(random_dir * random_dir))
if not extra_args == ():
unary_fun = lambda x : fun(arg0 + x * random_dir, extra_args)
numeric_grad = (unary_fun(eps/2) - unary_fun(-eps/2)) / eps
analytic_grad = np.sum(grad(fun)(arg0, extra_args) * random_dir)
else:
unary_fun = lambda x : fun(arg0 + x * random_dir)
numeric_grad = (unary_fun(eps/2) - unary_fun(-eps/2)) / eps
analytic_grad = np.sum(grad(fun)(arg0) * random_dir)
if isinstance(numeric_grad, minpy.array.Number):
assert abs((analytic_grad - numeric_grad).get_data(None)) < atol and abs((analytic_grad - numeric_grad).get_data(None)) < abs((analytic_grad * rtol).get_data(None)), \
"Check failed! nd={0}, ad={1}".format(numeric_grad, analytic_grad)
elif isinstance(numeric_grad, minpy.array.Array):
assert nnp.prod(nnp.shape(analytic_grad.asnumpy())[:]) == 1, "Currently only support check loss"
assert abs((analytic_grad - numeric_grad).asnumpy()) < atol and abs((analytic_grad - numeric_grad).asnumpy()) < abs((analytic_grad * rtol).asnumpy()), \
"Check failed! nd={0}, ad={1}".format(numeric_grad, analytic_grad)
else:
assert False
if verbose:
print("Gradient projection OK (numeric grad: {0}, analytic grad: {1})".format(
numeric_grad, analytic_grad))
示例6: softmax
def softmax(x, y):
import numpy as np
y = y.astype(int)
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(N), y])) / N
return loss
示例7: train_loss
def train_loss(X, y, W1, W2, b1, b2):
l1 = affine_relu_forward(X, W1, b1)
l2 = affine_forward(l1, W2, b2)
scores = l2
if y:
#[TODO]: softmax is not supported yet
# loss, d_scores = softmax_loss(scores, y)
loss = svm_loss(scores, y)
loss_with_reg = loss + np.sum(W1**2) * 0.5 * self.reg + np.sum(
W2**2) * 0.5 * self.reg
return loss_with_reg
示例8: train_loss
def train_loss(X, y, W1, W2, b1, b2):
l1, l1_cache = affine_relu_forward(X, W1, b1)
l2, l2_cache = affine_forward(l1, W2, b2)
scores = l2
if y is None:
return scores
loss, d_scores = softmax_loss(scores, y)
loss += np.sum(W1 ** 2) * 0.5 * self.reg
loss += np.sum(W2 ** 2) * 0.5 * self.reg
return loss
示例9: train_loss
def train_loss(*args):
inputs = args[0]
softmax_label = args[1]
probs = self.symbol_func(**self.make_mxnet_weight_dict(inputs, softmax_label, args[self.data_target_cnt:len(args)]))
if softmax_label is None:
return probs
samples_num = X.shape[0]
targets = np.zeros((samples_num, self.num_classes))
targets[np.arange(samples_num), softmax_label] = 1
loss = -np.sum(targets * np.log(probs)) / samples_num
for i in self.get_index_reg_weight():
loss = loss + np.sum(0.5*args[i]**2*self.reg)
return loss
示例10: affine_backward
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
x_plain = np.reshape(x, (x.shape[0], -1))
db = np.sum(dout, axis=0)
dx_plain = np.dot(dout, np.transpose(w))
dx = np.reshape(dx_plain, x.shape)
dw = np.dot(np.transpose(x_plain), dout)
return dx, dw, db
示例11: softmax_cross_entropy
def softmax_cross_entropy(prob, label):
"""
Computes the cross entropy for softmax activation.
Inputs:
- prob: Probability, of shape (N, C) where x[i, j] is the probability for the jth class
for the ith input.
- label: Either of the followings:
- One hot encoding of labels, of shape (N, C)
- Label index of shape (N, ), each y[i] is the label of i^th example
(0 <= y[i] < C)
Returns a Value:
- cross_entropy
"""
N = prob.shape[0]
C = prob.shape[1]
if len(label.shape) == 1:
#convert it to one hot encoding
onehot_label = np.zeros([N, C])
np.onehot_encode(label, onehot_label)
else:
onehot_label = label
return -np.sum(np.log(prob) * onehot_label) / N
示例12: check_accuracy
def check_accuracy(self, dataiter, num_samples=None):
"""
Check accuracy of the model on the provided data.
Inputs:
- dataiter: data iterator that can produce batches.
- num_samples: If not None and dataiter has more than num_samples datapoints,
subsample the data and only test the model on num_samples datapoints.
Returns:
- acc: Scalar giving the fraction of instances that were correctly
classified by the model.
"""
# Maybe subsample the data
N = dataiter.num_data
check_dataiter = dataiter
if num_samples is not None and N > num_samples:
# Sample a sub iter
check_dataiter = dataiter.getsubiter(num_samples)
else:
# Use the entire dataiter otherwise.
check_dataiter.reset()
acc_count = 0
num_samples = 0
for each_batch in check_dataiter:
predict = self.model.forward_batch(
each_batch, mode='test').asnumpy()
# TODO(minjie): multiple labels.
acc_count += np.sum(
np.argmax(
predict, axis=1) == each_batch.label[0])
num_samples += check_dataiter.batch_size
return float(acc_count.asnumpy()) / num_samples
示例13: log_likelihood
def log_likelihood(weights, inputs, targets):
logprobs = outputs(weights, inputs)
loglik = 0.0
num_time_steps, num_examples, _ = inputs.shape
for t in range(num_time_steps):
loglik += np.sum(logprobs[t] * targets[t])
return loglik / (num_time_steps * num_examples)
示例14: loss
def loss(caffe_layer_specs, X, T):
# original code:
# log_prior = -L2_reg * np.dot(W_vect, W_vect)
log_prior = 0
for caffe_layer in caffe_layer_specs:
log_prior += -L2_reg * np.dot(caffe_layer.get_learnable_params()[0], caffe_layer.get_learnable_params()[0])
log_lik = np.sum(predictions(caffe_layer_specs, X) * T)
return - log_prior - log_lik
示例15: grad
def grad(g):
import numpy as np
y = label.astype(int)
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return dx