本文整理汇总了Python中numpy.ascontiguousarray函数的典型用法代码示例。如果您正苦于以下问题:Python ascontiguousarray函数的具体用法?Python ascontiguousarray怎么用?Python ascontiguousarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ascontiguousarray函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getPeakProperty
def getPeakProperty(self, p_name):
"""
Return a numpy array containing the requested property.
"""
if not p_name in self.peak_properties:
raise MultiFitterException("No such property '" + p_name + "'")
# Properties that are calculated from other properties.
if(self.peak_properties[p_name] == "compound"):
# Return 0 length array if there are no localizations.
if(self.getNFit() == 0):
return numpy.zeros(0, dtype = numpy.float64)
# Peak significance calculation.
if(p_name == "significance"):
bg_sum = self.getPeakProperty("bg_sum")
fg_sum = self.getPeakProperty("fg_sum")
return fg_sum/numpy.sqrt(bg_sum)
# Floating point properties.
elif(self.peak_properties[p_name] == "float"):
values = numpy.ascontiguousarray(numpy.zeros(self.getNFit(), dtype = numpy.float64))
self.clib.mFitGetPeakPropertyDouble(self.mfit,
values,
ctypes.c_char_p(p_name.encode()))
return values
# Integer properties.
elif(self.peak_properties[p_name] == "int"):
values = numpy.ascontiguousarray(numpy.zeros(self.getNFit(), dtype = numpy.int32))
self.clib.mFitGetPeakPropertyInt(self.mfit,
values,
ctypes.c_char_p(p_name.encode()))
return values
示例2: test_mem_layout
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
示例3: __init__
def __init__(self,tracks,colors=None, line_width=2.,affine=None):
if affine==None:
self.affine=np.eye(4)
else: self.affine=affine
self.tracks_no=len(tracks)
self.tracks_len=[len(t) for t in tracks]
self.tracks=tracks
self.vertices = np.ascontiguousarray(np.concatenate(self.tracks).astype('f4'))
if colors==None:
self.colors = np.ascontiguousarray(np.ones((len(self.vertices),4)).astype('f4'))
else:
if isinstance(colors, (list, tuple)):
self.colors = np.tile(colors,(np.sum(self.tracks_len),1))
self.colors = np.ascontiguousarray(colors.astype('f4'))
self.vptr=self.vertices.ctypes.data
self.cptr=self.colors.ctypes.data
self.count=np.array(self.tracks_len, dtype=np.int32)
self.first=np.r_[0,np.cumsum(self.count)[:-1]].astype(np.int32)
self.firstptr=self.first.ctypes.data
self.countptr=self.count.ctypes.data
self.line_width=line_width
self.items=self.tracks_no
self.show_aabb = False
mn=self.vertices.min()
mx=self.vertices.max()
self.make_aabb((np.array([mn,mn,mn]),np.array([mx,mx,mx])),margin = 0)
示例4: sorted_points_and_ids
def sorted_points_and_ids(xin, yin, zin, xperiod, yperiod, zperiod,
approx_xcell_size, approx_ycell_size, approx_zcell_size):
""" Determine the cell_id of every point, sort the points
according to cell_id, and return the sorted points as well as
the cell id indexing array.
Notes
-----
The x-coordinates of points with cell_id = icell are given by
xout[cell_id_indices[icell]:cell_id_indices[icell+1]].
"""
npts = len(xin)
num_xdivs, xcell_size = determine_cell_size(xperiod, approx_xcell_size)
num_ydivs, ycell_size = determine_cell_size(yperiod, approx_ycell_size)
num_zdivs, zcell_size = determine_cell_size(zperiod, approx_zcell_size)
ncells = num_xdivs*num_ydivs*num_zdivs
ix = digitized_position(xin, xcell_size, num_xdivs)
iy = digitized_position(yin, ycell_size, num_ydivs)
iz = digitized_position(zin, zcell_size, num_zdivs)
cell_ids = cell_id_from_cell_tuple(ix, iy, iz, num_ydivs, num_zdivs)
cell_id_sorting_indices = np.argsort(cell_ids)
cell_id_indices = np.searchsorted(cell_ids, np.arange(ncells),
sorter = cell_id_sorting_indices)
cell_id_indices = np.append(cell_id_indices, npts)
xout = np.ascontiguousarray(xin[cell_id_sorting_indices], dtype=np.float64)
yout = np.ascontiguousarray(yin[cell_id_sorting_indices], dtype=np.float64)
zout = np.ascontiguousarray(zin[cell_id_sorting_indices], dtype=np.float64)
cell_id_indices = np.ascontiguousarray(cell_id_indices, dtype=np.int64)
return xout, yout, zout, cell_id_indices
示例5: _compute_targets
def _compute_targets(rois, overlaps, labels):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return np.zeros((rois.shape[0], 5), dtype=np.float32)
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(
np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
示例6: __iter__
def __iter__(self):
''' This is were all the fun starts '''
x, y, z, g = self.a.shape
# for all seeds
for i in range(self.seed_no):
if self.seed_list == None:
rx = (x - 1) * np.random.rand()
ry = (y - 1) * np.random.rand()
rz = (z - 1) * np.random.rand()
seed = np.ascontiguousarray(
np.array([rx, ry, rz]), dtype=np.float64)
else:
seed = np.ascontiguousarray(
self.seed_list[i], dtype=np.float64)
# for all peaks
for ref in range(g):
track = eudx_both_directions(seed.copy(),
ref,
self.a,
self.ind,
self.odf_vertices,
self.a_low,
self.ang_thr,
self.step_sz,
self.total_weight,
self.max_points)
if track == None:
pass
else:
if track.shape[0] > 1:
yield track + self.voxel_shift
示例7: _handle_input
def _handle_input(image, selem, out, mask, out_dtype=None):
if image.dtype not in (np.uint8, np.uint16):
image = img_as_ubyte(image)
selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if out is None:
if out_dtype is None:
out_dtype = image.dtype
out = np.empty_like(image, dtype=out_dtype)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
is_8bit = image.dtype in (np.uint8, np.int8)
if is_8bit:
max_bin = 255
else:
max_bin = max(4, image.max())
bitdepth = int(np.log2(max_bin))
if bitdepth > 10:
warnings.warn("Bitdepth of %d may result in bad rank filter "
"performance due to large number of bins." % bitdepth)
return image, selem, out, mask, max_bin
示例8: __init__
def __init__(self, x, y, z, Lbox, cell_size):
"""
Initialize the grid.
Parameters
----------
x, y, z : arrays
Length-Npts arrays containing the spatial position of the Npts points.
Lbox : float
Length scale defining the periodic boundary conditions
cell_size : float
The approximate cell size into which the box will be divided.
"""
self.cell_size = cell_size.astype(np.float)
self.Lbox = Lbox.astype(np.float)
self.num_divs = np.floor(Lbox/cell_size).astype(int)
self.dL = Lbox/self.num_divs
#build grid tree
idx_sorted, slice_array = self.compute_cell_structure(x, y, z)
self.x = np.ascontiguousarray(x[idx_sorted],dtype=np.float64)
self.y = np.ascontiguousarray(y[idx_sorted],dtype=np.float64)
self.z = np.ascontiguousarray(z[idx_sorted],dtype=np.float64)
self.slice_array = slice_array
self.idx_sorted = idx_sorted
示例9: optimum_reparam_pair
def optimum_reparam_pair(q, time, q1, q2, lam=0.0):
"""
calculates the warping to align srsf pair q1 and q2 to q
:param q: vector of size N or array of NxM samples of first SRSF
:param time: vector of size N describing the sample points
:param q1: vector of size N or array of NxM samples samples of second SRSF
:param q2: vector of size N or array of NxM samples samples of second SRSF
:param lam: controls the amount of elasticity (default = 0.0)
:rtype: vector
:return gam: describing the warping function used to align q2 with q1
"""
if q1.ndim == 1 and q2.ndim == 1:
q_c = column_stack((q1, q2))
gam = orN.coptimum_reparam_pair(ascontiguousarray(q), time,
ascontiguousarray(q_c), lam)
if q1.ndim == 2 and q2.ndim == 2:
gam = orN.coptimum_reparamN2_pair(ascontiguousarray(q), time,
ascontiguousarray(q1),
ascontiguousarray(q2), lam)
return gam
示例10: do_parameter_selection
def do_parameter_selection(argv):
path, test_path = argv;
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'min_samples_leaf':1, 'random_state':None, 'do_consider_correct':1,
'learn_rate': 0.2, 'n1': 2000, 'n2': 1, 'tau': 0.01};
print 'loading data...'
X, dr, sr, groups = load_dataset(path)
test_X, test_rd, test_rs, test_groups = load_dataset(test_path);
# test_X = np.asfortranarray(test_X, dtype=DTYPE);
test_rd = np.ascontiguousarray(test_rd);
test_rs = np.ascontiguousarray(test_rs);
test_docpair_samples = DocPairSampler(np.random.RandomState()).sample(test_rd, test_groups, 20000);
from sklearn.grid_search import IterGrid;
param_grid = IterGrid({'n_estimators':[200,400,600,800,1000], 'n1':[1000,2000,5000], 'learn_rate':[.1,.2,.3] });
for param in param_grid:
print param;
params.update(param);
ranker = GradientBoostingRanker(**params);
ranker.fit(X, dr, sr, groups);
test_y_pred = ranker.predict(test_X);
test_pred_sort_groups = PredictSortGroups(test_y_pred, test_groups);
test_loss = ranker.loss_(test_rd, test_rs, test_y_pred, test_groups, test_pred_sort_groups, ranker.random_state, test_docpair_samples);
print ranker.train_score_[-1], test_loss;
示例11: read
def read(self):
"""Read the visibilities and return as a (data,weight) tuple. """
print "Reading " + str(self.data_size()) + " samples..."
data = numpy.ascontiguousarray(numpy.zeros(self._imagingdata.dataSize, dtype=numpy.complex128))
weights = numpy.ascontiguousarray(numpy.zeros(self._imagingdata.dataSize, dtype=numpy.float64))
_wsclean.read(self._userdata, data, weights)
return data, weights
示例12: tucker_als
def tucker_als(idx, val, shape, core_shape, iters=25, growth_tol=0.01, batch_run=False):
'''
The function computes Tucker ALS decomposition of sparse tensor
provided in COO format. Usage:
u0, u1, u2, g = newtuck(idx, val, shape, core_shape)
'''
def log_status(msg):
if not batch_run:
print msg
if not (idx.flags.c_contiguous and val.flags.c_contiguous):
raise ValueError('Warning! Imput arrays must be C-contigous.')
#TODO: it's better to implement check for future
#if np.any(idx[1:, 0]-idx[:-1, 0]) < 0):
# print 'Warning! Index array must be sorted by first column in ascending order.'
r0, r1, r2 = core_shape
u1 = np.random.rand(shape[1], r1)
u1 = np.linalg.qr(u1, mode='reduced')[0]
u2 = np.random.rand(shape[2], r2)
u2 = np.linalg.qr(u2, mode='reduced')[0]
u1 = np.ascontiguousarray(u1)
u2 = np.ascontiguousarray(u2)
g_norm_old = 0
for i in xrange(iters):
log_status('Step %i of %i' % (i+1, iters))
u0 = tensordot2(idx, val, shape, u2, u1, ((2, 0), (1, 0)))\
.reshape(shape[0], r1*r2)
uu = np.linalg.svd(u0, full_matrices=0)[0]
u0 = np.ascontiguousarray(uu[:, :r0])
u1 = tensordot2(idx, val, shape, u2, u0, ((2, 0), (0, 0)))\
.reshape(shape[1], r0*r2)
uu = np.linalg.svd(u1, full_matrices=0)[0]
u1 = np.ascontiguousarray(uu[:, :r1])
u2 = tensordot2(idx, val, shape, u1, u0, ((1, 0), (0, 0)))\
.reshape(shape[2], r0*r1)
uu, ss, vv = np.linalg.svd(u2, full_matrices=0)
u2 = np.ascontiguousarray(uu[:, :r2])
g_norm_new = np.linalg.norm(np.diag(ss[:r2]))
g_growth = (g_norm_new - g_norm_old) / g_norm_new
g_norm_old = g_norm_new
log_status('growth of the core: %f' % g_growth)
if g_growth < growth_tol:
log_status('Core is no longer growing. Norm of the core: %f' % g_norm_old)
break
g = np.diag(ss[:r2]).dot(vv[:r2, :])
g = g.reshape(r2, r1, r0).transpose(2, 1, 0)
log_status('Done')
return u0, u1, u2, g
示例13: initializeC
def initializeC(self, image):
super(MultiFitterZ, self).initializeC(image)
self.clib.daoInitializeZ(self.mfit,
numpy.ascontiguousarray(self.wx_params),
numpy.ascontiguousarray(self.wy_params),
self.min_z,
self.max_z)
示例14: read_sparse_array
def read_sparse_array(self, hdr):
''' Read sparse matrix type
Matlab (TM) 4 real sparse arrays are saved in a N+1 by 3 array
format, where N is the number of non-zero values. Column 1 values
[0:N] are the (1-based) row indices of the each non-zero value,
column 2 [0:N] are the column indices, column 3 [0:N] are the
(real) values. The last values [-1,0:2] of the rows, column
indices are shape[0] and shape[1] respectively of the output
matrix. The last value for the values column is a padding 0. mrows
and ncols values from the header give the shape of the stored
matrix, here [N+1, 3]. Complex data is saved as a 4 column
matrix, where the fourth column contains the imaginary component;
the last value is again 0. Complex sparse data do _not_ have the
header imagf field set to True; the fact that the data are complex
is only detectable because there are 4 storage columns
'''
res = self.read_sub_array(hdr)
tmp = res[:-1,:]
dims = res[-1,0:2]
I = np.ascontiguousarray(tmp[:,0],dtype='intc') #fixes byte order also
J = np.ascontiguousarray(tmp[:,1],dtype='intc')
I -= 1 # for 1-based indexing
J -= 1
if res.shape[1] == 3:
V = np.ascontiguousarray(tmp[:,2],dtype='float')
else:
V = np.ascontiguousarray(tmp[:,2],dtype='complex')
V.imag = tmp[:,3]
return scipy.sparse.coo_matrix((V,(I,J)), dims)
示例15: hist_3d_index
def hist_3d_index(x, y, z, shape):
"""
Fast 3d histogram of 3D indices with C++ inner loop optimization.
Is more than 2 orders faster than np.histogramdd() and uses less RAM.
The indices are given in x, y, z coordinates and have to fit into a histogram of the dimensions shape.
Parameters
----------
x : array like
y : array like
z : array like
shape : tuple
tuple with x,y,z dimensions: (x, y, z)
Returns
-------
np.ndarray with given shape
"""
if len(shape) != 3:
raise ValueError('The shape has to describe a 3-d histogram')
# change memory alignment for c++ library
x = np.ascontiguousarray(x.astype(np.int32))
y = np.ascontiguousarray(y.astype(np.int32))
z = np.ascontiguousarray(z.astype(np.int32))
result = np.zeros(shape=shape, dtype=np.uint32).ravel() # ravel hist in c-style, 3D --> 1D
compiled_analysis_functions.hist_3d(x, y, z, shape[0], shape[1], shape[2], result)
return np.reshape(result, shape) # rebuilt 3D hist from 1D hist