本文整理汇总了Python中numpy.real_if_close函数的典型用法代码示例。如果您正苦于以下问题:Python real_if_close函数的具体用法?Python real_if_close怎么用?Python real_if_close使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了real_if_close函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _brownian_eigs
def _brownian_eigs(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc):
"""Analytic eigenvalues/eigenvectors for 1D Brownian dynamics
"""
import scipy.linalg
ONE_OVER_SQRT_2PI = 1.0 / (np.sqrt(2 * np.pi))
normalpdf = lambda x: ONE_OVER_SQRT_2PI * np.exp(-0.5 * (x * x))
grid = np.linspace(xmin, xmax, n_grid)
width = grid[1] - grid[0]
transmat = np.zeros((n_grid, n_grid))
for i, x_i in enumerate(grid):
if reflect_bc:
for offset in range(-(n_grid - 1), n_grid):
x_j = x_i + (offset * width)
j = _reflect_boundary_conditions(i + offset, 0, n_grid - 1)
# What is the probability of going from x_i to x_j in one step?
diff = (x_j - x_i + DT * grad_potential(x_i)) / DT_SQRT_2D
transmat[i, j] += normalpdf(diff)
else:
for j, x_j in enumerate(grid):
# What is the probability of going from x_i to x_j in one step?
diff = (x_j - x_i + DT * grad_potential(x_i)) / DT_SQRT_2D
transmat[i, j] += normalpdf(diff)
transmat[i, :] = transmat[i, :] / np.sum(transmat[i, :])
transmat = np.linalg.matrix_power(transmat, lag_time)
u, v = scipy.linalg.eig(transmat)
order = np.argsort(np.real(u))[::-1]
return np.real_if_close(u[order]), np.real_if_close(v[:, order])
示例2: compute_score
def compute_score(self):
self.scores = []
# We now have a dictionary
# start with a 'row' of all zeroes
adjacency = []
adjacency = adjacency + [0]*(len(self.user_dict) - len(adjacency))
# Adjacency Matrix
A = np.zeros( shape=(len(self.user_dict), len(self.user_dict)) )
# keep track of A's rows
outer_count = 0
for mentioning_user in self.user_dict:
inner_count = 0
for mentioned_user in self.user_dict:
if( mentioned_user in self.user_dict[mentioning_user]['mentioned'] ):
adjacency[inner_count] = 1
else:
adjacency[inner_count] = 0
inner_count += 1
# print adjacency
A[outer_count] = adjacency
outer_count += 1
self.scores = [np.dot(A, np.transpose(A)), np.dot(np.transpose(A), A)]
dictList = []
print "Hub:"
w, v = LA.eig(np.dot(A, np.transpose(A)))
i = np.real_if_close(w).argmax()
principal = v[:,i]
print self.user_dict.keys()[principal.argmax()]
print "Authority:"
w, v = LA.eig(np.dot(A, np.transpose(A)))
i = np.real_if_close(w).argmax()
principal = v[:,i]
print self.user_dict.keys()[principal.argmax()]
示例3: epsilon
def epsilon(a_one,a_two,gamma):
# laplacian
l_one = laplacian(a_one)
l_two = laplacian(a_two)
# eigenvalues
e_one = np.real_if_close(np.linalg.eigvals(l_one))
e_two = np.real_if_close(np.linalg.eigvals(l_two))
# sorting
e_one.sort()
e_two.sort()
# excluding smallest value (zero)
ee_one = np.real_if_close(e_one[1:],tol=1e-10)
ee_two = np.real_if_close(e_two[1:],tol=1e-10)
print ee_one
print ee_two
# omega
omega_one = np.sqrt([round(l,10) for l in ee_one])
omega_two= np.sqrt([round(l,10) for l in ee_two])
# K
K_one = 1 / rho_unnormalized_int(omega_k=omega_one, gamma=gamma)
K_two = 1 / rho_unnormalized_int(omega_k=omega_two, gamma=gamma)
# epsilon
return np.sqrt(density_diff_int(omega_one, omega_two, K_one, K_two, gamma))
示例4: doublewell_eigs
def doublewell_eigs(n_grid, lag_time=1, grad_potential=DOUBLEWELL_GRAD_POTENTIAL):
"""Analytic eigenvalues/eigenvectors for the doublwell system.
"""
import scipy.linalg
ONE_OVER_SQRT_2PI = 1.0 / (np.sqrt(2*np.pi))
normalpdf = lambda x : ONE_OVER_SQRT_2PI * np.exp(-0.5 * (x*x))
grid = np.linspace(-np.pi, np.pi, n_grid)
width = grid[1]-grid[0]
transmat = np.zeros((n_grid, n_grid))
for i, x_i in enumerate(grid):
for offset in range(-(n_grid-1), n_grid):
x_j = x_i + (offset * width)
j = _reflect_boundary_conditions(i+offset, 0, n_grid-1)
# What is the probability of going from x_i to x_j in one step?
diff = (x_j - x_i + DT * grad_potential(x_i)) / DT_SQRT_2D
transmat[i, j] += normalpdf(diff)
transmat[i, :] = transmat[i, :] / np.sum(transmat[i, :])
transmat = np.linalg.matrix_power(transmat, lag_time)
u, v = scipy.linalg.eig(transmat)
order = np.argsort(np.real(u))[::-1]
return np.real_if_close(u[order]), np.real_if_close(v[:, order])
示例5: _check
def _check(self, res, ref):
if hasattr(res, "get_x"):
x = res.get_x()
for k in list(res.keys()):
if np.all(res[k] == x):
continue
elif np.any(np.iscomplex(res[k])) or np.any(np.iscomplex(ref[k])):
# Interpolate Re and Im of the results to compare.
x = x.reshape((-1,))
refx = ref[ref.x].reshape((-1,))
d1 = InterpolatedUnivariateSpline(x, np.real(res[k]).reshape((-1,)))
d2 = InterpolatedUnivariateSpline(refx, np.real(ref[k]).reshape((-1,)))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED (Re)" % self.test_id))
d1 = InterpolatedUnivariateSpline(x, np.imag(res[k]).reshape((-1,)))
d2 = InterpolatedUnivariateSpline(refx, np.imag(ref[k]).reshape((-1,)))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED (Im)" % self.test_id))
else:
# Interpolate the results to compare.
x = x.reshape((-1,))
refx = ref[ref.x].reshape((-1,))
d1 = InterpolatedUnivariateSpline(x, np.real_if_close(res[k]).reshape((-1,)))
d2 = InterpolatedUnivariateSpline(refx, np.real_if_close(ref[k]).reshape((-1,)))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED" % self.test_id))
elif isinstance(res, results.op_solution):
for k in list(res.keys()):
assert k in ref
ok(res[k], ref[k], rtol=self.er, atol=self.ea, msg=("Test %s FAILED" % self.test_id))
elif isinstance(res, results.pz_solution):
# recover the reference signularities from Re/Im data
ref_sing_keys = list(ref.keys())[:]
ref_sing_keys.sort()
assert len(ref_sing_keys) % 2 == 0
ref_sing = [
ref[ref_sing_keys[int(len(ref_sing_keys) / 2) + k]] + ref[ref_sing_keys[k]] * 1j
for k in range(int(len(ref_sing_keys) / 2))
]
ref_poles_num = len([k for k in ref.keys() if k[:4] == "Re(p"])
poles_ref, zeros_ref = ref_sing[:ref_poles_num], ref_sing[ref_poles_num:]
assert len(poles_ref) == len(res.poles)
pz._check_singularities(res.poles, poles_ref)
assert len(zeros_ref) == len(res.zeros)
pz._check_singularities(res.zeros, zeros_ref)
else:
if isinstance(res, list) or isinstance(res, tuple):
for i, j in zip(res, ref):
self._check(i, j)
elif res is not None:
for k in list(res.keys()):
assert k in ref
if isinstance(res[k], dict): # hence ref[k] will be a dict too
self._check(res[k], ref[k])
elif isinstance(ref[k], sympy.Basic) and isinstance(res[k], sympy.Basic):
# get rid of assumptions. Evaluate only expression
rf = parse_expr(str(ref[k]))
rs = parse_expr(str(res[k]))
assert (rs == rf) or (sympy.simplify(rf / rs) == 1)
else:
assert res[k] == ref[k]
示例6: _fit_asymetric
def _fit_asymetric(self, counts):
rc = counts + self.prior_counts
transmat = rc.astype(float) / rc.sum(axis=1)[:, None]
u, lv = scipy.linalg.eig(transmat, left=True, right=False)
order = np.argsort(-np.real(u))
u = np.real_if_close(u[order])
lv = np.real_if_close(lv[:, order])
populations = lv[:, 0]
populations /= populations.sum(dtype=float)
return transmat, populations
示例7: _solve_msm_eigensystem
def _solve_msm_eigensystem(transmat, k):
"""Find the dominant eigenpairs of an MSM transition matrix
Parameters
----------
transmat : np.ndarray, shape=(n_states, n_states)
The transition matrix
k : int
The number of eigenpairs to find.
Notes
-----
Normalize the left (:math:`\phi`) and right (:math:``\psi``) eigenfunctions
according to the following criteria.
* The first left eigenvector, \phi_1, _is_ the stationary
distribution, and thus should be normalized to sum to 1.
* The left-right eigenpairs should be biorthonormal:
<\phi_i, \psi_j> = \delta_{ij}
* The left eigenvectors should satisfy
<\phi_i, \phi_i>_{\mu^{-1}} = 1
* The right eigenvectors should satisfy <\psi_i, \psi_i>_{\mu} = 1
Returns
-------
eigvals : np.ndarray, shape=(k,)
The largest `k` eigenvalues
lv : np.ndarray, shape=(n_states, k)
The normalized left eigenvectors (:math:`\phi`) of ``transmat``
rv : np.ndarray, shape=(n_states, k)
The normalized right eigenvectors (:math:`\psi`) of ``transmat``
"""
u, lv, rv = scipy.linalg.eig(transmat, left=True, right=True)
order = np.argsort(-np.real(u))
u = np.real_if_close(u[order[:k]])
lv = np.real_if_close(lv[:, order[:k]])
rv = np.real_if_close(rv[:, order[:k]])
# first normalize the stationary distribution separately
lv[:, 0] = lv[:, 0] / np.sum(lv[:, 0])
for i in range(1, lv.shape[1]):
# the remaining left eigenvectors to satisfy
# <\phi_i, \phi_i>_{\mu^{-1}} = 1
lv[:, i] = lv[:, i] / np.sqrt(np.dot(lv[:, i], lv[:, i] / lv[:, 0]))
for i in range(rv.shape[1]):
# the right eigenvectors to satisfy <\phi_i, \psi_j> = \delta_{ij}
rv[:, i] = rv[:, i] / np.dot(lv[:, i], rv[:, i])
return u, lv, rv
示例8: eigimages
def eigimages(self, n_eigs=None):
'''
Create eigenimages up to the n_eigs.
Parameters
----------
n_eigs : None or int
The number of eigenimages to create. When n_eigs is negative, the
last -n_eig eigenimages are created. If None is given, the number
in `~PCA.n_eigs` will be returned.
Returns
-------
eigimgs : `~numpy.ndarray`
3D array, where the first dimension if the number of eigenvalues.
'''
if n_eigs is None:
n_eigs = self.n_eigs
if n_eigs > 0:
iterat = range(n_eigs)
elif n_eigs < 0:
# We're looking for the noisy components whenever n_eigs < 0
# Find where we have valid eigenvalues, and use the last
# n_eigs of those.
iterat = self._valid_eigenvectors()[n_eigs:]
for ct, idx in enumerate(iterat):
eigimg = np.zeros(self.data.shape[1:], dtype=float)
for channel in range(self.data.shape[0]):
if self._mean_sub:
mean_value = np.nanmean(self.data[channel])
eigimg += np.nan_to_num((self.data[channel] - mean_value) *
np.real_if_close(
self.eigvecs[channel, idx]))
else:
eigimg += np.nan_to_num(self.data[channel] *
np.real_if_close(
self.eigvecs[channel, idx]))
if ct == 0:
eigimgs = eigimg
else:
eigimgs = np.dstack((eigimgs, eigimg))
if eigimgs.ndim == 3:
return eigimgs.swapaxes(0, 2)
else:
return eigimgs
示例9: _normalize_gain
def _normalize_gain(self):
"""
Normalize the gain factor so that the maximum of |H(f)| stays 1 or a
previously stored maximum value of |H(f)|. Do this every time a P or Z
has been changed.
Called by setModelData() and when cmbNorm is activated
"""
norm = qget_cmb_box(self.ui.cmbNorm, data=False)
self.ui.ledGain.setEnabled(norm == 'None')
if norm != self.norm_last:
qstyle_widget(self.ui.butSave, 'changed')
if not np.isfinite(self.zpk[2]):
self.zpk[2] = 1.
self.zpk[2] = np.real_if_close(self.zpk[2]).item()
if np.iscomplex(self.zpk[2]):
logger.warning("Casting complex to real for gain k!")
self.zpk[2] = np.abs(self.zpk[2])
if norm != "None":
b, a = zpk2tf(self.zpk[0], self.zpk[1], self.zpk[2])
[w, H] = freqz(b, a, whole=True)
Hmax = max(abs(H))
if not np.isfinite(Hmax) or Hmax > 1e4 or Hmax < 1e-4:
Hmax = 1.
if norm == "1":
self.zpk[2] = self.zpk[2] / Hmax # normalize to 1
elif norm == "Max":
if norm != self.norm_last: # setting has been changed -> 'Max'
self.Hmax_last = Hmax # use current design to set Hmax_last
self.zpk[2] = self.zpk[2] / Hmax * self.Hmax_last
self.norm_last = norm # store current setting of combobox
self._restore_gain()
示例10: BorueErukhimovich_Powerlaw
def BorueErukhimovich_Powerlaw(q, C, r0, s, t, nu):
"""Borue-Erukhimovich model ending in a power-law.
Inputs:
-------
``q``: independent variable
``C``: scaling factor
``r0``: typical el.stat. screening length
``s``: dimensionless charge concentration
``t``: dimensionless temperature
``nu``: excluded volume parameter
Formula:
--------
``C*(x^2+s)/((x^2+s)(x^2+t)+1)`` where ``x=q*r0`` if ``q<qsep``
``A*q^(-1/nu)``if ``q>qsep``
``A`` and ``qsep`` are determined from conditions of smoothness at the
cross-over.
"""
def get_xsep(alpha, s, t):
A = alpha + 2
B = 2 * s * alpha + t * alpha + 4 * s
C = s * t * alpha + alpha + alpha * s ** 2 + alpha * s * t - 2 + 2 * s ** 2
D = alpha * s ** 2 * t + alpha * s
r = np.roots([A, B, C, D])
#print "get_xsep: ", alpha, s, t, r
return r[r > 0][0] ** 0.5
get_B = lambda C, xsep, s, t, nu:C * (xsep ** 2 + s) / ((xsep ** 2 + s) * (xsep ** 2 + t) + 1) * xsep ** (1.0 / nu)
x = q * r0
xsep = np.real_if_close(get_xsep(-1.0 / nu, s, t))
A = get_B(C, xsep, s, t, nu)
return np.piecewise(q, (x < xsep, x >= xsep),
(lambda a:BorueErukhimovich(a, C, r0, s, t),
lambda a:A * (a * r0) ** (-1.0 / nu)))
示例11: values
def values(self):
"""Get all of the results set's variables values."""
data = self.asmatrix()
values = [np.real_if_close(data[0, :])]
for i in range(1, data.shape[0]):
values.append(data[i, :])
return values
示例12: b_conv
def b_conv(v1, v2):
w1 = dual.fft(v1[rand_perm[0]])
w2 = dual.fft(v2[rand_perm[1]])
return np.real_if_close(dual.ifft(w1 * w2))
示例13: __init__
def __init__(
self,
vectors= Matrix.eye,
mean= numpy.zeros,
):
mean = mean if callable(mean) else numpy.array(mean).flatten()[None, :]
vectors = vectors if callable(vectors) else Matrix(vectors)
stack=helpers.autoshape([
[vectors],
[mean ],
],default= 1)
#unpack the stack into the object's parameters
self.vectors = Matrix(numpy.real_if_close(stack[0, 0]))
self.mean = Matrix(numpy.real_if_close(stack[1, 0]))
示例14: test_spicefft
def test_spicefft():
"""Test fourier.spicefft() and printing.print_spicefft()"""
for w in (ahkab.options.RECT_WINDOW, ahkab.options.BART_WINDOW,
ahkab.options.HANN_WINDOW, ahkab.options.HAMM_WINDOW,
ahkab.options.BLACK_WINDOW, ahkab.options.HARRIS_WINDOW,
ahkab.options.KAISER_WINDOW):
fsp, Fsp, THDsp = ahkab.fourier.spicefft('vn1', r, window=w)
fsp, Fsp, THDsp = ahkab.fourier.spicefft('vn1', r, window='GAUSS', alpha=3000)
ahkab.printing.print_spicefft('vn1', fsp, Fsp, uformat='UNORM', window='GAUSS')
assert np.allclose(fsp, np.linspace(0, 512*1000, 512, endpoint=False))
assert len(fsp) == 512
assert len(Fsp) == 512
fsp, Fsp, THDsp = ahkab.fourier.spicefft('vn1', r, window='GAUSS', alpha=3000, np=4096)
assert len(fsp) == 2048
assert len(Fsp) == 2048
assert fsp[0] == 0
assert np.real(Fsp[0]) == np.real_if_close(Fsp[0])
f, F, THD = ahkab.fourier.spicefft('vn1', r, 10e3)
ahkab.printing.print_spicefft('vn1', f, F, THD, outfile='stdout')
f, F, THD = ahkab.fourier.spicefft('vn1', r, 10e3, fmin=20e3, fmax=50e3)
assert np.allclose(f, np.array([0., 20000., 30000., 40000., 50000.]))
ahkab.printing.print_spicefft('vn1', f, F, THD, outfile='stdout')
f1, F1, THD1 = ahkab.fourier.spicefft('vn1', r, 10e3, window='HANN', **{'from':.1e-3, 'to':.4e-3})
f2, F2, THD1 = ahkab.fourier.spicefft('vn1', r, 10e3, window='HANN', **{'start':.1e-3, 'stop':.4e-3})
assert np.allclose(f1, f2)
assert np.allclose(F1, F2)
示例15: make_lcp
def make_lcp(A,F=0.25):
"""
Copyright 2012, Michael Andersen, DIKU, michael (at) diku (dot) dk
"""
# [x, b] = MAKE_LCP(A): Make LCP
#
# INPUT:
#
# A -- The coefficient matrix in the LCP
# F -- The fraction of zero-values in the x-solution.
#
# OUTPUT:
#
# x -- A solution for the LCP problem.
# b -- The right hand side vector
#
# Port of Kenny Erleben, DIKU 2011 Matlab code to Python
##### Get of number of variable ##########
N = np.size(A,0) # Pick a dimension, it should be NxN
##### Generate a random LCP solution ##########
x = np.random.uniform(0,1,(N,1))
x[x < F] = 0
# x = np.real_if_close(x)
##### Generate a right-hand-side vector that is ##########
##### consistent with a random solution ##########
b = np.zeros((N,1))
s = np.real_if_close(np.dot(A,x))
b[x>0] = -s[x>0]
return (x, b)