本文整理汇总了Python中scipy.empty函数的典型用法代码示例。如果您正苦于以下问题:Python empty函数的具体用法?Python empty怎么用?Python empty使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了empty函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: v1like_filter
def v1like_filter(hin, conv_mode, filterbank, use_cache=False):
""" V1LIKE linear filtering
Perform separable convolutions on an image with a set of filters
Inputs:
hin -- input image (a 2-dimensional array)
filterbank -- TODO list of tuples with 1d filters (row, col)
used to perform separable convolution
use_cache -- Boolean, use internal fft_cache (works _well_ if the input
shapes don't vary much, otherwise you'll blow away the memory)
Outputs:
hout -- a 3-dimensional array with outputs of the filters
(width X height X n_filters)
"""
nfilters = len(filterbank)
filt0 = filterbank[0]
fft_shape = N.array(hin.shape) + N.array(filt0.shape) - 1
hin_fft = scipy.signal.fftn(hin, fft_shape)
if conv_mode == "valid":
hout_shape = list( N.array(hin.shape[:2]) - N.array(filt0.shape[:2]) + 1 ) + [nfilters]
hout_new = N.empty(hout_shape, 'f')
begy = filt0.shape[0]
endy = begy + hout_shape[0]
begx = filt0.shape[1]
endx = begx + hout_shape[1]
elif conv_mode == "same":
hout_shape = hin.shape[:2] + (nfilters,)
hout_new = N.empty(hout_shape, 'f')
begy = filt0.shape[0] / 2
endy = begy + hout_shape[0]
begx = filt0.shape[1] / 2
endx = begx + hout_shape[1]
else:
raise NotImplementedError
for i in xrange(nfilters):
filt = filterbank[i]
if use_cache:
key = (filt.tostring(), tuple(fft_shape))
if key in fft_cache:
filt_fft = fft_cache[key]
else:
filt_fft = scipy.signal.fftn(filt, fft_shape)
fft_cache[key] = filt_fft
else:
filt_fft = scipy.signal.fftn(filt, fft_shape)
res_fft = scipy.signal.ifftn(hin_fft*filt_fft)
res_fft = res_fft[begy:endy, begx:endx]
hout_new[:,:,i] = N.real(res_fft)
hout = hout_new
return hout
示例2: find_direction
def find_direction(self, grad_diffs, steps, grad, hessian_diag, idxs):
grad = grad.copy() # We will change this.
n_current_factors = len(idxs)
# TODO: find a good name for this variable.
rho = scipy.empty(n_current_factors)
# TODO: vectorize this function
for i in idxs:
rho[i] = 1 / scipy.inner(grad_diffs[i], steps[i])
# TODO: find a good name for this variable as well.
alpha = scipy.empty(n_current_factors)
for i in idxs[::-1]:
alpha[i] = rho[i] * scipy.inner(steps[i], grad)
grad -= alpha[i] * grad_diffs[i]
z = hessian_diag * grad
# TODO: find a good name for this variable (surprise!)
beta = scipy.empty(n_current_factors)
for i in idxs:
beta[i] = rho[i] * scipy.inner(grad_diffs[i], z)
z += steps[i] * (alpha[i] - beta[i])
return z, {}
示例3: setUp
def setUp(self):
# Make a positive definite noise matrix, clean map, and dirty_map.
self.nra = 10
self.ndec = 5
self.nf = 20
self.shape = (self.nf, self.nra, self.ndec)
self.size = self.nra * self.ndec * self.nf
# Clean map.
clean_map = sp.empty(self.shape, dtype=float)
clean_map = al.make_vect(clean_map, axis_names=('freq', 'ra', 'dec'))
clean_map[...] = sp.sin(sp.arange(self.nf))[:,None,None]
clean_map *= sp.cos(sp.arange(self.nra))[:,None]
clean_map *= sp.cos(sp.arange(self.ndec))
# Noise inverse matrix.
noise_inv = sp.empty(self.shape * 2, dtype=float)
noise_inv = al.make_mat(noise_inv, axis_names=('freq', 'ra', 'dec')*2,
row_axes=(0, 1, 2), col_axes=(3, 4, 5))
rand_mat = rand.randn(*((self.size,) * 2))
information_factor = 1.e6 # K**-2
rand_mat = sp.dot(rand_mat, rand_mat.transpose()) * information_factor
noise_inv.flat[...] = rand_mat.flat
# Dirty map.
dirty_map = al.partial_dot(noise_inv, clean_map)
# Store in self.
self.clean_map = clean_map
self.noise_inv = noise_inv
self.dirty_map = dirty_map
示例4: deref_array
def deref_array(data, file):
"""Take an array of references and dereference them"""
ret = sp.empty(shape=data.shape, dtype='object')
if len(data.shape) > 1:
for i in xrange(data.shape[0]):
for j in xrange(data.shape[1]):
ref = data[i, j]
dref = h5py.h5r.dereference(ref, file._id)
if isinstance(dref, h5py.h5g.GroupID):
ret[i, j] = get_data(dref)
else:
ret[i, j] = sp.empty(dref.shape, dtype=dref.dtype)
dref.read(h5py.h5s.ALL, h5py.h5s.ALL, ret[i, j])
ret[i, j] = ret[i, j].T
if isinstance(ret[i, j], sp.ndarray):
shp = ret[i, j].shape
if len(shp) == 2 and isinstance(ret[i, j][0, 0], h5py.h5r.Reference):
ret[i, j] = deref_array(ret[i, j], file)
elif len(shp) == 1 and isinstance(ret[i, j][0], h5py.h5r.Reference):
ret[i, j] = deref_array(ret[i, j], file)
else:
for i in xrange(data.shape[0]):
ref = data[i]
dref = h5py.h5r.dereference(ref, file._id)
ret[i] = sp.empty(dref.shape, dtype=dref.dtype)
dref.read(h5py.h5s.ALL, h5py.h5s.ALL, ret[i])
ret[i] = ret[i].T
if isinstance(ret[i], sp.ndarray):
shp = ret[i].shape
if len(shp) == 2 and isinstance(ret[i][0, 0], h5py.h5r.Reference):
ret[i] = deref_array(ret[i], file)
elif len(shp) == 1 and isinstance(ret[i][0], h5py.h5r.Reference):
ret[i] = deref_array(ret[i], file)
return ret
示例5: block_structure5
def block_structure5(T):
"""
computes the block structure of the upper quasi-triangular matrix T
m is the number of diagonal blocks
bb is the array containing the begin of each block
eb is the array containing the end of each block + 1
s is an array containing the sizes of the diagonal blocks
"""
n = len(T)
tol = 1e-15
i,j = 0,0
bb = sp.empty(n,dtype="int")
eb = sp.empty(n,dtype="int")
s = sp.empty(n,dtype="int")
while i < n-1:
bb[j] = i
if abs(T[i+1,i])<tol:
i +=1
s[j] = 1
eb[j] = i
else:
i +=2
s[j] = 2
eb[j] = i
j += 1
if i == n-1:
bb[j],eb[j] = i,i+1
s[j] = 1
j+=1
bb = bb[0:j]
eb = eb[0:j]
s = s[0:j]
return j, bb, eb, s
示例6: _init_arrays
def _init_arrays(self):
super(EvoMPS_TDVP_Generic, self)._init_arrays()
#Make indicies correspond to the thesis
self.K = sp.empty((self.N + 1), dtype=sp.ndarray) #Elements 1..N
self.C = sp.empty((self.N), dtype=sp.ndarray) #Elements 1..N-1
for n in xrange(1, self.N + 1):
self.K[n] = sp.zeros((self.D[n - 1], self.D[n - 1]), dtype=self.typ, order=self.odr)
if n <= self.N - self.ham_sites + 1:
ham_shape = []
for i in xrange(self.ham_sites):
ham_shape.append(self.q[n + i])
C_shape = tuple(ham_shape + [self.D[n - 1], self.D[n - 1 + self.ham_sites]])
self.C[n] = sp.empty(C_shape, dtype=self.typ, order=self.odr)
self.eta = sp.zeros((self.N + 1), dtype=self.typ)
"""The per-site contributions to the norm of the TDVP tangent vector
(projection of the exact time evolution onto the MPS tangent plane.
Only available after calling take_step()."""
self.eta.fill(sp.NaN)
self.h_expect = sp.zeros((self.N + 1), dtype=self.typ)
"""The local energy expectation values (of each Hamiltonian term),
available after calling update() or calc_K()."""
self.h_expect.fill(sp.NaN)
self.H_expect = sp.NaN
"""The energy expectation value, available after calling update()
示例7: _create_block
def _create_block(self, block_size, order, dtype):
matches_order = self.is_col_major == (order =="F")
opposite_order = "C" if order == "F" else "F"
if matches_order:
return np.empty([len(self._row),block_size], dtype=dtype, order=order), order
else:
return np.empty([len(self._row),block_size], dtype=dtype, order=opposite_order), opposite_order
示例8: plot_optimal_tau_for_mean_uncertainty_reduction
def plot_optimal_tau_for_mean_uncertainty_reduction(
results_for_exp, results_for_exp_inftau):
""" Plot the optimal tau for the mean of uncertainty reduction.
:param results_for_exp: The results of one experiment as 4-D array of the
shape (metrics, z-values, tau-values, experimental repetitions).
:type results_for_exp: 4-D array
:param result_list_inftau: The results of one experiment for `tau = inf` as
3-D array of the shape (metrics, z-values, experimental repetitions).
:type results_for_exp_inftau: 3-D array.
"""
values = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1]))
err = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1], 2, 1))
mark = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1]))
for m, metric in enumerate(cfg['metrics']):
for z in xrange(len(cfg['zs'])):
r = sp.mean(results_for_exp[m, z], axis=1)
mark[m, z] = r.max()
values[m, z] = sp.mean(cfg['time_scales'][r == r.max()]).magnitude
r = cfg['time_scales'][r > 0.8 * r.max()]
err[m, z, 0] = values[m, z] - min(r).magnitude
err[m, z, 1] = max(r).magnitude + values[m, z]
plot_param_per_metric_and_z(values, err)
plot_bool_indicator_per_metric_and_z(
sp.mean(results_for_exp_inftau, axis=2) >= mark)
示例9: estimateBeta
def estimateBeta(X,Y,K,C=None,addBiasTerm=False,numintervals0=100,ldeltamin0=-5.0,ldeltamax0=5.0):
""" compute all pvalues
If numintervalsAlt==0 use EMMA-X trick (keep delta fixed over alternative models)
"""
n,s=X.shape;
n_pheno=Y.shape[1];
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UX=SP.dot(U.T,X);
if (C==None):
Ucovariate=SP.dot(U.T,SP.ones([n,1]));
else:
if (addBiasTerm):
C_=SP.concatenate((C,SP.ones([n,1])),axis=1)
Ucovariate=SP.dot(U.T,C_);
else:
Ucovariate=SP.dot(U.T,C);
n_covar=Ucovariate.shape[1];
beta = SP.empty((n_pheno,s,n_covar+1));
LL=SP.ones((n_pheno,s))*(-SP.inf);
ldelta=SP.empty((n_pheno,s));
sigg2=SP.empty((n_pheno,s));
pval=SP.ones((n_pheno,s))*(-SP.inf);
for phen in SP.arange(n_pheno):
UY_=UY[:,phen];
ldelta[phen]=optdelta(UY_,Ucovariate,S,ldeltanull=None,numintervals=numintervals0,ldeltamin=ldeltamin0,ldeltamax=ldeltamax0);
for snp in SP.arange(s):
UX_=SP.hstack((UX[:,snp:snp+1],Ucovariate));
nLL_, beta_, sigg2_=nLLeval(ldelta[phen,snp],UY_,UX_,S,MLparams=True);
beta[phen,snp,:]=beta_;
sigg2[phen,snp]=sigg2_;
LL[phen,snp]=-nLL_;
return beta, ldelta
示例10: learn_gmm
def learn_gmm(self,x,y,tau=None):
'''
Function that learns the GMM from training samples
It is possible to add a regularizer term Sigma = Sigma + tau*I
Input:
x : the training samples
y : the labels
tau : the value of the regularizer, if tau = None (default) no regularization
Output:
the mean, covariance and proportion of each class
'''
## Get information from the data
C = int(y.max(0)) # Number of classes
n = x.shape[0] # Number of samples
d = x.shape[1] # Number of variables
## Initialization
self.ni = sp.empty((C,1)) # Vector of number of samples for each class
self.prop = sp.empty((C,1)) # Vector of proportion
self.mean = sp.empty((C,d)) # Vector of means
self.cov = sp.empty((C,d,d)) # Matrix of covariance
## Learn the parameter of the model for each class
for i in range(C):
j = sp.where(y==(i+1))[0]
self.ni[i] = float(j.size)
self.prop[i] = self.ni[i]/n
self.mean[i,:] = sp.mean(x[j,:],axis=0)
self.cov[i,:,:] = sp.cov(x[j,:],bias=1,rowvar=0) # Normalize by ni to be consistent with the update formulae
if tau is not None:
self.tau = tau*sp.eye(d)
示例11: globs
def globs(globs):
# setup mock urllib2 module to avoid downloading from mldata.org
mock_datasets = {
'mnist-original': {
'data': sp.empty((70000, 784)),
'label': sp.repeat(sp.arange(10, dtype='d'), 7000),
},
'iris': {
'data': sp.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': sp.empty((150, 4)),
'class': sp.empty((150,)),
},
}
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
global _urllib2_ref
_urllib2_ref = datasets.mldata.urllib2
globs['_urllib2_ref'] = _urllib2_ref
globs['mock_urllib2'] = mock_urllib2(mock_datasets)
return globs
示例12: crossValidate
def crossValidate(y, X, K=None, folds=3, model=None, returnModel=False):
errors = SP.empty(folds)
n = y.shape[0]
indexes = crossValidationScheme(folds,n)
predictions = SP.empty(y.shape)
alpha = []
alphas = []
msePath = []
for cvRun in SP.arange(len(indexes)):
testIndexes = indexes[cvRun]
yTrain = y[~testIndexes]
XTrain = X[~testIndexes]
if K == None:
model.fit(XTrain, yTrain)
prediction = SP.reshape(model.predict(X[testIndexes]), (-1,1))
else: # models having population structure
KTrain = K[~testIndexes]
KTrain = KTrain[:,~testIndexes]
KTest=K[testIndexes]
KTest=KTest[:,~testIndexes]
model.reset()
model.kernel = KTrain #TODO: make nice integration
model.fit(XTrain, yTrain)
prediction = SP.reshape(model.predict(X[testIndexes], k=KTest), (-1,1))
predictions[testIndexes] = prediction
errors[cvRun] = predictionError(y[testIndexes], prediction)
print(('prediction error right now is', errors[cvRun]))
if returnModel:
alpha.append(model.alpha)
alphas.append(model.alphas)
msePath.append(model.mse_path)
if returnModel:
return indexes, predictions, errors, alpha, alphas, msePath
else:
return indexes, predictions, errors
示例13: max_filter_bord
def max_filter_bord(im, size=3):
"""The function performs a local max filter on a flat image. Border's
pixels are processed.
Args:
im: the image to process
size: the size in pixels of the local square window. Default value is 3.
Returns:
out: the filtered image
"""
## Get the size of the image
[nl, nc, d] = im.shape
## Get the size of the moving window
s = (size - 1) / 2
## Initialization of the output
out = sp.empty((nl, nc, d), dtype=im.dtype.name)
temp = sp.empty((nl + 2 * s, nc + 2 * s, d), dtype=im.dtype.name) # A temporary file is created
temp[0:s, :, :] = sp.NaN
temp[:, 0:s, :] = sp.NaN
temp[-s:, :, :] = sp.NaN
temp[:, -s:, :] = sp.NaN
temp[s : s + nl, s:nc, :] = im
## Apply the max filter
for i in range(s, nl + s): # Shift the origin to remove border effect
for j in range(s, nc + s):
for k in range(d):
out[i - s, j - s, k] = sp.nanmax(temp[i - s : i + 1 + s, j - s : j + s + 1, k])
return out.astype(im.dtype.name)
示例14: rebin
def rebin(Data, n_bins_combined) :
"""The function that acctually does the rebinning on a Data Block."""
nt = Data.data.shape[0]
new_nt = nt // n_bins_combined
new_shape = (new_nt,) + Data.data.shape[1:]
unmask = sp.logical_not(ma.getmaskarray(Data.data))
data = Data.data.filled(0)
# Allowcate memeory for the rebinned data.
new_data = ma.zeros(new_shape, dtype=data.dtype)
counts = sp.zeros(new_shape, dtype=int)
# Add up the bins to be combined.
for ii in range(n_bins_combined):
new_data += data[ii:new_nt * n_bins_combined:n_bins_combined,...]
counts += unmask[ii:new_nt * n_bins_combined:n_bins_combined,...]
new_data[counts == 0] = ma.masked
counts[counts == 0] = 1
new_data /= counts
Data.set_data(new_data)
# Now deal with all the other records that aren't the main data.
for field_name in Data.field.iterkeys():
# DATE-OBS is a string field so we have to write special code for it.
if field_name == "DATE-OBS":
time_field = Data.field[field_name]
new_field = sp.empty(new_nt, dtype=Data.field[field_name].dtype)
# Convert to float, average, then convert back to a string.
time_float = utils.time2float(time_field)
for ii in range(new_nt):
tmp_time = sp.mean(time_float[n_bins_combined * ii
: n_bins_combined * (ii + 1)])
new_field[ii] = utils.float2time(tmp_time)
Data.set_field(field_name, new_field,
axis_names=Data.field_axes[field_name],
format=Data.field_formats[field_name])
continue
# Only change fields that have a 'time' axis.
try:
time_axis = list(Data.field_axes[field_name]).index('time')
except ValueError:
continue
# For now, the time axis has to be the first axis.
if time_axis != 0:
msg = "Expected time to be the first axis for all fields."
raise NotImplementedError(msg)
field_data = Data.field[field_name]
if not field_data.dtype.name == "float64":
msg = "Field data type is not float. Handle explicitly."
raise NotImplementedError(msg)
new_field = sp.empty(field_data.shape[:time_axis] + (new_nt,)
+ field_data.shape[time_axis + 1:],
dtype=field_data.dtype)
for ii in range(new_nt):
tmp_data = sp.sum(field_data[n_bins_combined * ii
:n_bins_combined * (ii + 1),...], 0)
tmp_data /= n_bins_combined
new_field[ii,...] = tmp_data
Data.set_field(field_name, new_field,
axis_names=Data.field_axes[field_name],
format=Data.field_formats[field_name])
示例15: create_block
def create_block(self, blocksize, dtype, order):
N_original = len(self.original_iids) #similar code else where -- make a method
matches_order = self.is_snp_major == (order =="F") #similar code else where -- make a method
opposite_order = "C" if order == "F" else "F"#similar code else where -- make a method
if matches_order:
return sp.empty([N_original,blocksize], dtype=dtype, order=order)
else:
return sp.empty([N_original,blocksize], dtype=dtype, order=opposite_order)