本文整理汇总了Python中autograd.numpy.inf方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.inf方法的具体用法?Python numpy.inf怎么用?Python numpy.inf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.inf方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: constraint_c2
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import inf [as 别名]
def constraint_c2(f, r):
n_obj = f.shape[1]
v1 = anp.inf * anp.ones(f.shape[0])
for i in range(n_obj):
temp = (f[:, i] - 1) ** 2 + (anp.sum(f ** 2, axis=1) - f[:, i] ** 2) - r ** 2
v1 = anp.minimum(temp.flatten(), v1)
a = 1 / anp.sqrt(n_obj)
v2 = anp.sum((f - a) ** 2, axis=1) - r ** 2
g = anp.minimum(v1, v2.flatten())
return g
示例2: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import inf [as 别名]
def __init__(self, K, B, dt=1.0, sigma=np.inf, lmbda=np.inf):
self.K, self.B, self.dt, self.sigma, self.lmbda = K, B, dt, sigma, lmbda
# Initialize weights
self.w = np.zeros(1+self.K*self.B)
# List of event counts and filtered inputs
self.data_list = []
示例3: defaultmax
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import inf [as 别名]
def defaultmax(x, default=-np.inf):
if x.size == 0:
return default
return np.max(x)
示例4: test_nan_to_num
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import inf [as 别名]
def test_nan_to_num():
y = np.array([0., np.nan, np.inf, -np.inf])
fun = lambda x: np.sum(np.sin(np.nan_to_num(x + y)))
x = np.random.randn(4)
check_grads(fun)(x)
# TODO(mattjj): np.frexp returns a pair of ndarrays and the second is an int
# type, for which there is currently no vspace registered
#def test_frexp():
# fun = lambda x: np.frexp(x)[0]
# A = 1.2 #np.random.rand(4,3) * 0.8 + 2.1
# check_grads(fun)(A)
示例5: uniform_reference_directions
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import inf [as 别名]
def uniform_reference_directions(self, n_partitions, n_dim):
ref_dirs = []
ref_dir = anp.full(n_dim, anp.inf)
self.__uniform_reference_directions(ref_dirs, ref_dir, n_partitions, n_partitions, 0)
return anp.concatenate(ref_dirs, axis=0)
示例6: multivariate_t_rvs
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import inf [as 别名]
def multivariate_t_rvs(self, m, S, random_state = None):
'''generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
random_state : int
seed
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable
'''
np.random.rand(9)
m = np.asarray(m)
d = self.n_features
df = self.degree_freedom
n = 1
if df == np.inf:
x = 1.
else:
x = random_state.chisquare(df, n)/df
np.random.rand(90)
z = random_state.multivariate_normal(np.zeros(d),S,(n,))
return m + z/np.sqrt(x)[:,None]
# same output format as random.multivariate_normal
示例7: coreset_single
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import inf [as 别名]
def coreset_single(N, D, dist, algn):
#sys.stderr.write('n: ' + str(N) + ' d: ' +str(D) + ' dist: ' + str(dist) + ' salgn: ' + str(algn) + '\n')
x, mu0, Sig0, Sig = gendata(N, D, dist)
Sig0inv = np.linalg.inv(Sig0)
Siginv = np.linalg.inv(Sig)
mup, Sigp = weighted_post(mu0, np.linalg.inv(Sig0), np.linalg.inv(Sig), x, np.ones(x.shape[0]))
anm, alg = algn
coreset = alg(x, mu0, Sig0, Sig)
#incremental M tests
prev_err = np.inf
for m in range(1, N+1):
coreset.build(m)
muw, Sigw = weighted_post(mu0, Sig0inv, Siginv, x, coreset.weights())
w = coreset.weights()
#check if coreset for 1 datapoint is immediately optimal
if x.shape[0] == 1:
assert np.fabs(w - np.array([1])) < tol, anm +" failed: coreset not immediately optimal with N = 1. weights: " + str(coreset.weights()) + " mup = " + str(mup) + " Sigp = " + str(Sigp) + " muw = " + str(muw) + " Sigw = " + str(Sigw)
#check if coreset is valid
assert (w > 0.).sum() <= m, anm+" failed: coreset size > m"
assert (w > 0.).sum() == coreset.size(), anm+" failed: sum of coreset.weights()>0 not equal to size(): sum = " + str((coreset.weights()>0).sum()) + " size(): " + str(coreset.size())
assert np.all(w >= 0.), anm+" failed: coreset has negative weights"
#check if actual output error is monotone
err = weighted_post_KL(mu0, Sig0inv, Siginv, x, w, reverse=True if 'Reverse' in anm else False)
assert err - prev_err < tol, anm+" failed: error is not monotone decreasing, err = " + str(err) + " prev_err = " +str(prev_err)
#check if coreset is computing error properly
assert np.fabs(coreset.error() - err) < tol, anm+" failed: error est is not close to true err: est err = " + str(coreset.error()) + ' true err = ' + str(err)
prev_err = err
#save incremental M result
w_inc = coreset.weights()
#check reset
coreset.reset()
err = weighted_post_KL(mu0, Sig0inv, Siginv, x, np.zeros(x.shape[0]), reverse=True if 'Reverse' in anm else False)
assert coreset.M == 0 and np.all(np.fabs(coreset.weights()) == 0.) and np.fabs(coreset.error() - err) < tol and not coreset.reached_numeric_limit, anm+" failed: reset() did not properly reset"
#check build up to N all at once vs incremental
#do this test for all except bin, where symmetries can cause instabilities in the choice of vector / weights
if dist != 'bin':
coreset.build(N)
w = coreset.weights()
err = weighted_post_KL(mu0, Sig0inv, Siginv, x, w, reverse=True if 'Reverse' in anm else False)
err_inc = weighted_post_KL(mu0, Sig0inv, Siginv, x, w_inc, reverse=True if 'Reverse' in anm else False)
assert np.sqrt(((w - w_inc)**2).sum()) < tol, anm+" failed: incremental buid up to N doesn't produce same result as one run at N : \n error = " +str(err) + " error_inc = " + str(err_inc)
#check if coreset with all_data_wts is optimal
coreset._update_weights(coreset.all_data_wts)
assert coreset.error() < tol, anm + " failed: coreset with all_data_wts does not have error 0"