本文整理汇总了Python中numpy.e方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.e方法的具体用法?Python numpy.e怎么用?Python numpy.e使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.e方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: to_potential
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def to_potential(f):
'''
to_potential(f) yields f if f is a potential function; if f is not, but f can be converted to
a potential function, that conversion is performed then the result is yielded.
to_potential(Ellipsis) yields a potential function whose output is simply its input (i.e., the
identity function).
to_potential(None) is equivalent to to_potential(0).
The following can be converted into potential functions:
* Anything for which pimms.is_array(x, 'number') yields True (i.e., arrays of constants).
* Any tuple (g, h) where g(x) yields a potential value and h(x) yields a jacobian matrix for
the parameter vector x.
'''
if is_potential(f): return f
elif f is Ellipsis: return identity
elif pimms.is_array(f, 'number'): return const_potential(f)
elif isinstance(f, tuple) and len(f) == 2: return PotentialLambda(f[0], f[1])
else: raise ValueError('Could not convert object of type %s to potential function' % type(f))
示例2: sigmoid
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def sigmoid(f=Ellipsis, mu=0, sigma=1, scale=1, invert=False, normalize=False):
'''
sigmoid() yields a potential function that is equivalent to the integral of gaussian(), i.e.,
the error function, but scaled to match gaussian().
sigmoid(f) is equivalent to compose(sigmoid(), f).
All options that are accepted by the gaussian() function are accepted by sigmoid() with the same
default values and are handled in an equivalent manner with the exception of the invert option;
when a sigmoid is inverted, the function approaches its maximum value at -inf and approaches 0
at inf.
Note that because sigmoid() explicitly matches gaussian(), the base formula used is as follows:
f(x) = scale * sigma * sqrt(pi/2) * erf((x - mu) / (sqrt(2) * sigma))
k*sig*Sqrt[Pi/2] Erf[(x - mu)/sig/Sqrt[2]]
'''
f = to_potential(f)
F = erf((f - mu) / (sigma * np.sqrt(2.0)))
if invert: F = 1 - F
F = np.sqrt(np.pi / 2) * scale * F
if normalize: F = F / (np.sqrt(2.0*np.pi) * sigma)
return F
示例3: test_closing_fid
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with suppress_warnings() as sup:
sup.filter(Warning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
示例4: test_complex_negative_exponent
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
示例5: test_complex_misformatted
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def test_complex_misformatted(self):
# test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.16e')
c.seek(0)
txt = c.read()
c.seek(0)
# misformat the sign on the imaginary part, gh 7895
txt_bad = txt.replace(b'e+00-', b'e00+-')
assert_(txt_bad != txt)
c.write(txt_bad)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, a)
示例6: entropy
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
示例7: test_invalid_raise
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
示例8: test_closing_fid
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
示例9: gencrossentropy
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def gencrossentropy(px,py,pxpy,alpha=1,logbase=2, measure='T'):
"""
Generalized cross-entropy measures.
Parameters
----------
px : array-like
Discrete probability distribution of random variable X
py : array-like
Discrete probability distribution of random variable Y
pxpy : 2d array-like, optional
Joint probability distribution of X and Y. If pxpy is None, X and Y
are assumed to be independent.
logbase : int or np.e, optional
Default is 2 (bits)
measure : str, optional
The measure is the type of generalized cross-entropy desired. 'T' is
the cross-entropy version of the Tsallis measure. 'CR' is Cressie-Read
measure.
"""
示例10: entropy_sym
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def entropy_sym(self, dist_info_vars, name='entropy_sym'):
"""Symbolic entropy of a distribution.
Args:
dist_info_vars (dict): Symbolic parameters of a distribution.
name (str): TensorFlow scope name.
Returns:
tf.Tensor: Symbolic entropy of the distribution.
"""
with tf.name_scope(name):
log_std_var = dist_info_vars['log_std']
return tf.reduce_sum(log_std_var +
np.log(np.sqrt(2 * np.pi * np.e)),
axis=-1)
示例11: CalcDistProb
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def CalcDistProb(data=None, bins=None, invalidDistanceSeparated=False):
labelMatrices = [ ]
for distm in data:
#m, _, _ = DiscretizeDistMatrix(distm, subType=subType)
m, _, _ = DiscretizeDistMatrix(distm, bins=bins, invalidDistanceSeparated=invalidDistanceSeparated)
labelMatrices.append(m)
## need fix here
#probs = CalcLabelProb( labelMatrices, config.responseProbDims['Discrete' + subType] )
if invalidDistanceSeparated:
probs = CalcLabelProb( labelMatrices, len(bins) + 1 )
else:
probs = CalcLabelProb( labelMatrices, len(bins) )
return probs
## d needs to be positive, cannot be -1
## cutoffs is the distance boundary array
## return the largest index position such that cutoffs[position] <= d, i.e., d< cutoffs[position+1]
示例12: balanced_l1_loss
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
示例13: balanced_l1_loss
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
示例14: part
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def part(f, ii=None, input_len=None):
'''
part(u, ii) for constant or constant potential u yields a constant-potential form of u[ii].
part(f, ii) for potential function f yields a potential function g(x) that is equivalent to
f(x)[ii].
part(ii) is equivalent to part(identity, ii); i.e., pat of the input parameters to the function.
'''
if ii is None: return PotentialPart(f, input_len=input_len)
f = to_potential(f)
if is_const_potential(f): return PotentialConstant(f.c[ii])
else: return compose(PotentialPart(ii, input_len=input_len), f)
示例15: exp
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import e [as 别名]
def exp(x):
x = to_potential(x)
if is_const_potential(x): return PotentialConstant(np.exp(x.c))
else: return ConstantPowerPotential(np.e, x)