本文整理匯總了Python中scipy.ones方法的典型用法代碼示例。如果您正苦於以下問題:Python scipy.ones方法的具體用法?Python scipy.ones怎麽用?Python scipy.ones使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類scipy
的用法示例。
在下文中一共展示了scipy.ones方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_download
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def test_download(tmpdata):
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = assert_warns(DeprecationWarning, fetch_mldata,
'mock', data_home=tmpdata)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
assert_warns, DeprecationWarning,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
示例2: _flat_field
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def _flat_field(X, uniformity_thresh):
"""."""
Xhoriz = _low_frequency_horiz(X, sigma=4.0)
Xhorizp = _low_frequency_horiz(X, sigma=3.0)
nl, nb, nc = X.shape
FF = s.zeros((nb, nc))
use_ff = s.ones((X.shape[0], X.shape[2])) > 0
for b in range(nb):
xsub = Xhoriz[:, b, :]
xsubp = Xhorizp[:, b, :]
mu = xsub.mean(axis=0)
dists = abs(xsub - mu)
distsp = abs(xsubp - mu)
thresh = _percentile(dists.flatten(), 90.0)
uthresh = dists * uniformity_thresh
#use = s.logical_and(dists<thresh, abs(dists-distsp) < uthresh)
use = dists < thresh
FF[b, :] = ((xsub*use).sum(axis=0)/use.sum(axis=0)) / \
((X[:, b, :]*use).sum(axis=0)/use.sum(axis=0))
use_ff = s.logical_and(use_ff, use)
return FF, Xhoriz, Xhorizp, s.array(use_ff)
示例3: __MR_boundary_indictor
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def __MR_boundary_indictor(self,labels):
s = sp.amax(labels)+1
up_indictor = (sp.ones((s,1))).astype(float)
right_indictor = (sp.ones((s,1))).astype(float)
low_indictor = (sp.ones((s,1))).astype(float)
left_indictor = (sp.ones((s,1))).astype(float)
upper_ids = sp.unique(labels[0,:]).astype(int)
right_ids = sp.unique(labels[:,labels.shape[1]-1]).astype(int)
low_ids = sp.unique(labels[labels.shape[0]-1,:]).astype(int)
left_ids = sp.unique(labels[:,0]).astype(int)
up_indictor[upper_ids] = 0.0
right_indictor[right_ids] = 0.0
low_indictor[low_ids] = 0.0
left_indictor[left_ids] = 0.0
return up_indictor,right_indictor,low_indictor,left_indictor
示例4: test_download
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
示例5: ElasticRod
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def ElasticRod(n):
# Fixed-free elastic rod
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1),1])-diag(ones(n-1),1)-diag(ones(n-1),-1))
B = mass*(diag(r_[4.*ones(n-1),2])+diag(ones(n-1),1)+diag(ones(n-1),-1))
return A,B
示例6: test_trivial
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def test_trivial():
n = 5
X = ones((n, 1))
A = eye(n)
compare_solutions(A, None, n)
示例7: test_regression
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def test_regression():
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, V = lobpcg(A, X)
assert_allclose(w, [1])
示例8: __init__
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def __init__(self, distance_pairs):
self.data = distance_pairs
self._n = len(self.data)
self._processed = scipy.zeros((self._n, 1), dtype=bool)
self._reachability = scipy.ones(self._n) * scipy.inf
self._core_dist = scipy.ones(self._n) * scipy.nan
self._index = scipy.array(range(self._n))
self._nneighbors = scipy.ones(self._n, dtype=int)*self._n
self._cluster_id = -scipy.ones(self._n, dtype=int)
self._is_core = scipy.ones(self._n, dtype=bool)
self._ordered_list = []
示例9: __MR_get_adj_loop
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def __MR_get_adj_loop(self, labels):
s = sp.amax(labels) + 1
adj = np.ones((s, s), np.bool)
for i in range(labels.shape[0] - 1):
for j in range(labels.shape[1] - 1):
if labels[i, j] != labels[i+1, j]:
adj[labels[i, j], labels[i+1, j]] = False
adj[labels[i+1, j], labels[i, j]] = False
if labels[i, j] != labels[i, j + 1]:
adj[labels[i, j], labels[i, j+1]] = False
adj[labels[i, j+1], labels[i, j]] = False
if labels[i, j] != labels[i + 1, j + 1]:
adj[labels[i, j] , labels[i+1, j+1]] = False
adj[labels[i+1, j+1], labels[i, j]] = False
if labels[i + 1, j] != labels[i, j + 1]:
adj[labels[i+1, j], labels[i, j+1]] = False
adj[labels[i, j+1], labels[i+1, j]] = False
upper_ids = sp.unique(labels[0,:]).astype(int)
right_ids = sp.unique(labels[:,labels.shape[1]-1]).astype(int)
low_ids = sp.unique(labels[labels.shape[0]-1,:]).astype(int)
left_ids = sp.unique(labels[:,0]).astype(int)
bd = np.append(upper_ids, right_ids)
bd = np.append(bd, low_ids)
bd = sp.unique(np.append(bd, left_ids))
for i in range(len(bd)):
for j in range(i + 1, len(bd)):
adj[bd[i], bd[j]] = False
adj[bd[j], bd[i]] = False
return adj
示例10: pixMorphSequence_mask_seed_fill_holes
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def pixMorphSequence_mask_seed_fill_holes(self, I):
Imask = self.reduction_T_1(I)
Imask = self.reduction_T_1(Imask)
Imask = ndimage.binary_fill_holes(Imask)
Iseed = self.reduction_T_4(Imask)
Iseed = self.reduction_T_3(Iseed)
mask = array(ones((5, 5)), dtype=int)
Iseed = ndimage.binary_opening(Iseed, mask)
Iseed = self.expansion(Iseed, Imask.shape)
return Imask, Iseed
示例11: pixSeedfillBinary
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def pixSeedfillBinary(self, Imask, Iseed):
Iseedfill = copy.deepcopy(Iseed)
s = ones((3, 3))
Ijmask, k = ndimage.label(Imask, s)
Ijmask2 = Ijmask * Iseedfill
A = list(unique(Ijmask2))
A.remove(0)
for i in range(0, len(A)):
x, y = where(Ijmask == A[i])
Iseedfill[x, y] = 1
return Iseedfill
示例12: tvardry
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def tvardry(rho = scipy.array([]),\
cp = scipy.array([]),\
T = scipy.array([]),\
sigma_t = scipy.array([]),\
z= float(),\
d= 0.0):
'''Function to calculate the sensible heat flux (H, in W/m2) from high
frequency temperature measurements and its standard deviation.
Source: H.F. Vugts, M.J. Waterloo, F.J. Beekman, K.F.A. Frumau and L.A.
Bruijnzeel. The temperature variance method: a powerful tool in the
estimation of actual evaporation rates. In J. S. Gladwell, editor,
Hydrology of Warm Humid Regions, Proc. of the Yokohama Symp., IAHS
Publication No. 216, pages 251-260, July 1993.
NOTE: This function holds only for free convective conditions when C2*z/L
>>1, where L is the Obhukov length.
Input:
- rho: (array of) air density values [kg m-3]
- cp: (array of) specific heat at constant temperature values [J kg-1 K-1]
- T: (array of) temperature data [Celsius]
- sigma_t: (array of) standard deviation of temperature data [Celsius]
- z: temperature measurement height above the surface [m]
- d: displacement height due to vegetation, default is zero [m]
Output:
- H: (array of) sensible heat flux [W/m2]
Example:
>>> H=tvardry(rho,cp,T,sigma_t,z,d)
>>> H
35.139511191461651
>>>
'''
k = 0.40 # von Karman constant
g = 9.81 # acceleration due to gravity [m/s^2]
C1 = 2.9 # De Bruin et al., 1992
C2 = 28.4 # De Bruin et al., 1992
# L= Obhukov-length [m]
#Free Convection Limit
H = rho * cp * scipy.sqrt((sigma_t/C1)**3 * k * g * (z-d) / (T+273.15) * C2)
#else:
# including stability correction
#zoverL = z/L
#tvardry = rho * cp * scipy.sqrt((sigma_t/C1)**3 * k*g*(z-d) / (T+273.15) *\
# (1-C2*z/L)/(-1*z/L))
#Check if we get complex numbers (square root of negative value) and remove
#I = find(zoL >= 0 | H.imag != 0);
#H(I) = scipy.ones(size(I))*NaN;
return H # sensible heat flux
示例13: _check_fiedler
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def _check_fiedler(n, p):
# This is not necessarily the recommended way to find the Fiedler vector.
np.random.seed(1234)
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
示例14: test_Gillespie_complex_contagion
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def test_Gillespie_complex_contagion(self):
def transition_rate(G, node, status, parameters):
# this function needs to return the rate at which ``node`` changes status
#
r = parameters[0]
if status[node] == 'S' and len([nbr for nbr in G.neighbors(node) if status[nbr] == 'I']) > 1:
return 1
else: # status[node] might be 0 or length might be 0 or 1.
return 0
def transition_choice(G, node, status, parameters):
# this function needs to return the new status of node. We assume going
# in that we have already calculated it is changing status.
#
# this function could be more elaborate if there were different
# possible transitions that could happen. However, for this model,
# the 'I' nodes aren't changing status, and the 'S' ones are changing to 'I'
# So if we're in this function, the node must be 'S' and becoming 'I'
#
return 'I'
def get_influence_set(G, node, status, parameters):
# this function needs to return any node whose rates might change
# because ``node`` has just changed status. That is, which nodes
# might ``node`` influence?
#
# For our models the only nodes a node might affect are the susceptible neighbors.
return {nbr for nbr in G.neighbors(node) if status[nbr] == 'S'}
parameters = (2,) # this is the threshold. Note the comma. It is needed
# for python to realize this is a 1-tuple, not just a number.
# ``parameters`` is sent as a tuple so we need the comma.
N = 60000
deg_dist = [2, 4, 6] * int(N / 3)
G = nx.configuration_model(deg_dist)
for rho in np.linspace(3. / 80, 7. / 80, 8): # 8 values from 3/80 to 7/80.
print(rho)
IC = defaultdict(lambda: 'S')
for node in G.nodes():
if np.random.random() < rho: # there are faster ways to do this random selection
IC[node] = 'I'
t, S, I = EoN.Gillespie_complex_contagion(G, transition_rate, transition_choice,
get_influence_set, IC, return_statuses=('S', 'I'),
parameters=parameters)
plt.plot(t, I)
plt.savefig('test_Gillespie_complex_contagion')
示例15: remap
# 需要導入模塊: import scipy [as 別名]
# 或者: from scipy import ones [as 別名]
def remap(inputfile, labels, outputfile, flag, chunksize):
"""."""
ref_file = inputfile
lbl_file = labels
out_file = outputfile
nchunk = chunksize
ref_img = envi.open(ref_file+'.hdr', ref_file)
ref_meta = ref_img.metadata
ref_mm = ref_img.open_memmap(interleave='source', writable=False)
ref = s.array(ref_mm[:, :])
lbl_img = envi.open(lbl_file+'.hdr', lbl_file)
lbl_meta = lbl_img.metadata
labels = lbl_img.read_band(0)
nl = int(lbl_meta['lines'])
ns = int(lbl_meta['samples'])
nb = int(ref_meta['bands'])
out_meta = dict([(k, v) for k, v in ref_meta.items()])
out_meta["samples"] = ns
out_meta["bands"] = nb
out_meta["lines"] = nl
out_meta['data type'] = ref_meta['data type']
out_meta["interleave"] = "bil"
out_img = envi.create_image(out_file+'.hdr', metadata=out_meta,
ext='', force=True)
out_mm = out_img.open_memmap(interleave='source', writable=True)
# Iterate through image "chunks," restoring as we go
for lstart in s.arange(0, nl, nchunk):
print(lstart)
del out_mm
out_mm = out_img.open_memmap(interleave='source', writable=True)
# Which labels will we extract? ignore zero index
lend = min(lstart+nchunk, nl)
lbl = labels[lstart:lend, :]
out = flag * s.ones((lbl.shape[0], nb, lbl.shape[1]))
for row in range(lbl.shape[0]):
for col in range(lbl.shape[1]):
out[row, :, col] = s.squeeze(ref[int(lbl[row, col]), :])
out_mm[lstart:lend, :, :] = out