本文整理匯總了Python中numpy.log方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.log方法的具體用法?Python numpy.log怎麽用?Python numpy.log使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.log方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: load_RSM
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def load_RSM(filename):
om, tt, psd = xu.io.getxrdml_map(filename)
om = np.deg2rad(om)
tt = np.deg2rad(tt)
wavelength = 1.54056
q_y = (1 / wavelength) * (np.cos(tt) - np.cos(2 * om - tt))
q_x = (1 / wavelength) * (np.sin(tt) - np.sin(2 * om - tt))
xi = np.linspace(np.min(q_x), np.max(q_x), 100)
yi = np.linspace(np.min(q_y), np.max(q_y), 100)
psd[psd < 1] = 1
data_grid = griddata(
(q_x, q_y), psd, (xi[None, :], yi[:, None]), fill_value=1, method="cubic"
)
nx, ny = data_grid.shape
range_values = [np.min(q_x), np.max(q_x), np.min(q_y), np.max(q_y)]
output_data = (
Panel(np.log(data_grid).reshape(nx, ny, 1), minor_axis=["RSM"])
.transpose(2, 0, 1)
.to_frame()
)
return range_values, output_data
示例2: wave2input_image
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def wave2input_image(wave, window, pos=0, pad=0):
wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
wave_image *= window
spectrum_image = np.fft.fft(wave_image, axis=1)
input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)
np.clip(input_image, 1000, None, out=input_image)
np.log(input_image, out=input_image)
input_image += bias
input_image /= scale
if np.max(input_image) > 0.95:
print('input image max bigger than 0.95', np.max(input_image))
if np.min(input_image) < 0.05:
print('input image min smaller than 0.05', np.min(input_image))
return input_image
示例3: cost0
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def cost0(params, input_size, hidden_size, num_labels, X, y, learning_rate):
m = X.shape[0]
X = np.matrix(X)
y = np.matrix(y)
# reshape the parameter array into parameter matrices for each layer
theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
# run the feed-forward pass
a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
# compute the cost
J = 0
for i in range(m):
first_term = np.multiply(-y[i,:], np.log(h[i,:]))
second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
J += np.sum(first_term - second_term)
J = J / m
return J
示例4: cost
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def cost(params, input_size, hidden_size, num_labels, X, y, learning_rate):
m = X.shape[0]
X = np.matrix(X)
y = np.matrix(y)
# reshape the parameter array into parameter matrices for each layer
theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
# run the feed-forward pass
a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
# compute the cost
J = 0
for i in range(m):
first_term = np.multiply(-y[i,:], np.log(h[i,:]))
second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
J += np.sum(first_term - second_term)
J = J / m
# add the cost regularization term
J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2)))
return J
示例5: apply_cmap
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False):
'''
apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed,
they are used to scale z.
Note that this function can automatically rescale data into log-space if the colormap is a
neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the
optional argument logrescale=True.
'''
zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit)
zs = np.asarray(zs, dtype='float')
if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
if logrescale:
if vmin is None: vmin = np.log(np.nanmin(zs))
if vmax is None: vmax = np.log(np.nanmax(zs))
mn = np.exp(vmin)
u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan)
else:
if vmin is None: vmin = np.nanmin(zs)
if vmax is None: vmax = np.nanmax(zs)
u = zdivide(zs - vmin, vmax - vmin, null=np.nan)
u[np.isnan(u)] = -np.inf
return cmap(u)
示例6: to_logeccen
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def to_logeccen(ecc, vmin=0, vmax=90, offset=0.75):
'''
to_logeccen(ecc) yields a rescaled log-space version of the eccentricity value (or values) ecc,
which are extracted in degrees.
to_logeccen(xy_matrix) rescales all the (x,y) points in the given matrix to have lox-spaced
eccentricity values.
to_logeccen is the inverse of from_logeccen.
'''
if pimms.is_matrix(ecc):
xy = np.asarray(pimms.mag(ecc, 'deg'))
trq = xy.shape[0] != 2
xy = np.transpose(xy) if trq else np.asarray(xy)
ecc = np.sqrt(np.sum(xy**2, axis=0))
esc = to_logeccen(ecc, vmin=vmin, vmax=vmax, offset=offset)
ecc = zinv(ecc)
xy = xy * [ecc,ecc] * [esc,esc]
return xy.T if trq else xy
else:
(ecc,vmin,vmax,offset) = [np.asarray(pimms.mag(u, 'deg')) for u in (ecc,vmin,vmax,offset)]
log_ecc = np.log(ecc + offset)
(vmin, vmax) = [np.log(u + offset) for u in (vmin, vmax)]
return (log_ecc - vmin) / (vmax - vmin)
示例7: update
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
mx.metric.check_label_shapes(labels, preds)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
pred = np.column_stack((1 - pred, pred))
label = label.ravel()
num_examples = pred.shape[0]
assert label.shape[0] == num_examples, (label.shape[0], num_examples)
prob = pred[np.arange(num_examples, dtype=np.int64), np.int64(label)]
self.sum_metric += (-np.log(prob + self.eps)).sum()
self.num_inst += num_examples
示例8: Perplexity
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def Perplexity(label, pred):
""" Calculates prediction perplexity
Args:
label (mx.nd.array): labels array
pred (mx.nd.array): prediction array
Returns:
float: calculated perplexity
"""
# collapse the time, batch dimension
label = label.reshape((-1,))
pred = pred.reshape((-1, pred.shape[-1]))
loss = 0.
for i in range(pred.shape[0]):
loss += -np.log(max(1e-10, pred[i][int(label[i])]))
return np.exp(loss / label.size)
示例9: update
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def update(self, labels, preds):
"""
Implementation of updating metrics
"""
# get generated multi label from network
cls_prob = preds[0].asnumpy()
loc_loss = preds[1].asnumpy()
cls_label = preds[2].asnumpy()
valid_count = np.sum(cls_label >= 0)
# overall accuracy & object accuracy
label = cls_label.flatten()
mask = np.where(label >= 0)[0]
indices = np.int64(label[mask])
prob = cls_prob.transpose((0, 2, 1)).reshape((-1, cls_prob.shape[1]))
prob = prob[mask, indices]
self.sum_metric[0] += (-np.log(prob + self.eps)).sum()
self.num_inst[0] += valid_count
# smoothl1loss
self.sum_metric[1] += np.sum(loc_loss)
self.num_inst[1] += valid_count
示例10: logging_config
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def logging_config(name=None, level=logging.DEBUG, console_level=logging.DEBUG):
if name is None:
name = inspect.stack()[1][1].split('.')[0]
folder = os.path.join(os.getcwd(), name)
if not os.path.exists(folder):
os.makedirs(folder)
logpath = os.path.join(folder, name + ".log")
print("All Logs will be saved to %s" %logpath)
logging.root.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logfile = logging.FileHandler(logpath)
logfile.setLevel(level)
logfile.setFormatter(formatter)
logging.root.addHandler(logfile)
#TODO Update logging patterns in other files
logconsole = logging.StreamHandler()
logconsole.setLevel(console_level)
logconsole.setFormatter(formatter)
logging.root.addHandler(logconsole)
return folder
示例11: update
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def update(self, labels, preds):
pred = preds[self.pred.index('rpn_cls_prob')]
label = labels[self.label.index('rpn_label')]
# label (b, p)
label = label.asnumpy().astype('int32').reshape((-1))
# pred (b, c, p) or (b, c, h, w) --> (b, p, c) --> (b*p, c)
pred = pred.asnumpy().reshape((pred.shape[0], pred.shape[1], -1)).transpose((0, 2, 1))
pred = pred.reshape((label.shape[0], -1))
# filter with keep_inds
keep_inds = np.where(label != -1)[0]
label = label[keep_inds]
cls = pred[keep_inds, label]
cls += 1e-14
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
示例12: update
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
labels, preds = check_label_shapes(labels, preds, True)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
label = label.ravel()
assert label.shape[0] == pred.shape[0]
prob = pred[numpy.arange(label.shape[0]), numpy.int64(label)]
self.sum_metric += (-numpy.log(prob + self.eps)).sum()
self.num_inst += label.shape[0]
示例13: test_bce_loss
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def test_bce_loss():
N = 20
data = mx.random.uniform(-1, 1, shape=(N, 20))
label = mx.nd.array(np.random.randint(2, size=(N,)), dtype='float32')
data_iter = mx.io.NDArrayIter(data, label, batch_size=10, label_name='label')
output = get_net(1)
l = mx.symbol.Variable('label')
Loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
loss = Loss(output, l)
loss = mx.sym.make_loss(loss)
mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',))
mod.fit(data_iter, num_epoch=200, optimizer_params={'learning_rate': 0.01},
eval_metric=mx.metric.Loss(), optimizer='adam',
initializer=mx.init.Xavier(magnitude=2))
assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.01
# Test against npy
data = mx.random.uniform(-5, 5, shape=(10,))
label = mx.random.uniform(0, 1, shape=(10,))
mx_bce_loss = Loss(data, label).asnumpy()
prob_npy = 1.0 / (1.0 + np.exp(-data.asnumpy()))
label_npy = label.asnumpy()
npy_bce_loss = - label_npy * np.log(prob_npy) - (1 - label_npy) * np.log(1 - prob_npy)
assert_almost_equal(mx_bce_loss, npy_bce_loss, rtol=1e-4, atol=1e-5)
示例14: __init__
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def __init__(self, choice="sigmoid"):
"""
:param choice: Which activation function you want, must be in self.available
"""
if choice not in self.available:
msg = "Choice of activation (" + choice + ") not available!"
log.out.error(msg)
raise ValueError(msg)
elif choice == "tanh":
self.function = self._tanh
elif choice == "tanhpos":
self.function = self._tanhpos
elif choice == "sigmoid":
self.function = self._sigmoid
elif choice == "softplus":
self.function = self._softplus
elif choice == "relu":
self.function = self._relu
elif choice == "leakyrelu":
self.function = self._leakyrelu
示例15: _compute_eps
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import log [as 別名]
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps