本文整理汇总了Python中scipy.sparse.vstack方法的典型用法代码示例。如果您正苦于以下问题:Python sparse.vstack方法的具体用法?Python sparse.vstack怎么用?Python sparse.vstack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.sparse
的用法示例。
在下文中一共展示了sparse.vstack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_data_matrix
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def make_data_matrix(positive_data_matrix=None,
negative_data_matrix=None,
target=None):
"""make_data_matrix."""
assert(positive_data_matrix is not None), 'ERROR: expecting non null\
positive_data_matrix'
if negative_data_matrix is None:
negative_data_matrix = positive_data_matrix.multiply(-1)
if target is None and negative_data_matrix is not None:
yp = [1] * positive_data_matrix.shape[0]
yn = [-1] * negative_data_matrix.shape[0]
y = np.array(yp + yn)
data_matrix = vstack(
[positive_data_matrix, negative_data_matrix], format="csr")
if target is not None:
data_matrix = positive_data_matrix
y = target
return data_matrix, y
示例2: auto_label
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def auto_label(graphs, n_clusters=16, **opts):
"""Label nodes with cluster id.
Cluster nodes using as features the output of vertex_vectorize.
"""
data_list = Vectorizer(**opts).vertex_transform(graphs)
data_matrix = vstack(data_list)
clu = MiniBatchKMeans(n_clusters=n_clusters, n_init=10)
clu.fit(data_matrix)
preds = clu.predict(data_matrix)
vecs = clu.transform(data_matrix)
sizes = [m.shape[0] for m in data_list]
label_list = []
vecs_list = []
pointer = 0
for size in sizes:
label_list.append(preds[pointer: pointer + size])
vecs_list.append(vecs[pointer: pointer + size])
pointer += size
return label_list, vecs_list
示例3: apply_affine
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def apply_affine(aff, coords):
'''
apply_affine(affine, coords) yields the result of applying the given affine transformation to
the given coordinate or coordinates.
This function expects coords to be a (dims X n) matrix but if the first dimension is neither 2
nor 3, coords.T is used; i.e.:
apply_affine(affine3x3, coords2xN) ==> newcoords2xN
apply_affine(affine4x4, coords3xN) ==> newcoords3xN
apply_affine(affine3x3, coordsNx2) ==> newcoordsNx2 (for N != 2)
apply_affine(affine4x4, coordsNx3) ==> newcoordsNx3 (for N != 3)
'''
if aff is None: return coords
(coords,tr) = (np.asanyarray(coords), False)
if len(coords.shape) == 1: return np.squeeze(apply_affine(np.reshape(coords, (-1,1)), aff))
elif len(coords.shape) > 2: raise ValueError('cannot apply affine to ND-array for N > 2')
if len(coords) == 2: aff = to_affine(aff, 2)
elif len(coords) == 3: aff = to_affine(aff, 3)
else: (coords,aff,tr) = (coords.T, to_affine(aff, coords.shape[1]), True)
r = np.dot(aff, np.vstack([coords, np.ones([1,coords.shape[1]])]))[:-1]
return r.T if tr else r
示例4: curve_length
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def curve_length(self, start=None, end=None, precision=0.01):
'''
Calculates the length of the curve by dividing the curve up
into pieces of parameterized-length <precision>.
'''
if start is None: start = self.t[0]
if end is None: end = self.t[-1]
from scipy import interpolate
if self.order == 1:
# we just want to add up along the steps...
ii = [ii for (ii,t) in enumerate(self.t) if start < t and t < end]
ts = np.concatenate([[start], self.t[ii], [end]])
xy = np.vstack([[self(start)], self.coordinates[:,ii].T, [self(end)]])
return np.sum(np.sqrt(np.sum((xy[1:] - xy[:-1])**2, axis=1)))
else:
t = np.linspace(start, end, int(np.ceil((end-start)/precision)))
dt = t[1] - t[0]
dx = interpolate.splev(t, self.splrep[0], der=1)
dy = interpolate.splev(t, self.splrep[1], der=1)
return np.sum(np.sqrt(dx**2 + dy**2)) * dt
示例5: subcurve
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def subcurve(self, t0, t1):
'''
curve.subcurve(t0, t1) yields a curve-spline object that is equivalent to the given
curve but that extends from curve(t0) to curve(t1) only.
'''
# if t1 is less than t0, then we want to actually do this in reverse...
if t1 == t0: raise ValueError('Cannot take subcurve of a point')
if t1 < t0:
tt = self.curve_length()
return self.reverse().subcurve(tt - t0, tt - t1)
idx = [ii for (ii,t) in enumerate(self.t) if t0 < t and t < t1]
pt0 = self(t0)
pt1 = self(t1)
coords = np.vstack([[pt0], self.coordinates.T[idx], [pt1]])
ts = np.concatenate([[t0], self.t[idx], [t1]])
dists = None if self.distances is None else np.diff(ts)
return CurveSpline(
coords.T,
order=self.order,
smoothing=self.smoothing,
periodic=False,
distances=dists,
meta_data=self.meta_data)
示例6: get_batch
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def get_batch(self, X, y):
if self.curr == 0:
self.add_obs(X, y)
return X, y
if (self.curr < self.n) and (isinstance(self.X_reserve, list)):
if not self.has_sparse:
old_X = np.concatenate(self.X_reserve, axis=0)
else:
old_X = sp_vstack(self.X_reserve)
old_y = np.concatenate(self.y_reserve, axis=0)
else:
old_X = self.X_reserve[:self.curr].copy()
old_y = self.y_reserve[:self.curr].copy()
if X.shape[0] == 0:
return old_X, old_y
else:
self.add_obs(X, y)
if not issparse(old_X) and not issparse(X):
return np.r_[old_X, X], np.r_[old_y, y]
else:
return sp_vstack([old_X, X]), np.r_[old_y, y]
示例7: sparse_to_tuple
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
示例8: _concatenate_dense_jac
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def _concatenate_dense_jac(jac_list):
# Read sequentially all jacobians.
# Convert all values to numpy arrays.
jac_ineq_list = []
jac_eq_list = []
for jac_tuple in jac_list:
J_ineq, J_eq = jac_tuple
if spc.issparse(J_ineq):
jac_ineq_list += [J_ineq.toarray()]
else:
jac_ineq_list += [np.atleast_2d(J_ineq)]
if spc.issparse(J_eq):
jac_eq_list += [J_eq.toarray()]
else:
jac_eq_list += [np.atleast_2d(J_eq)]
# Concatenate all
J_ineq = np.vstack(jac_ineq_list)
J_eq = np.vstack(jac_eq_list)
# Return
return J_ineq, J_eq
示例9: expand_offsets
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def expand_offsets(cur_rect_l, cur_rect_u, offsets):
'''
Expand offsets at different level along each dimension to generate the
final offsets for all candidate by computing the sum of each tuple in the
cross product of offset arrays.
e.g For the some dimension two level offsets [[0, 1, 0], [2, 4, 2]] will be expanded to
[2 4 2 3 5 3 2 4 2]
cur_rect_l and cur_rect_u: coordinates of the lower and upper corner of the range.
offsets: Nested array representing offsets of ranges along dimension, level of hierarchy
'''
# remove empty list(no query at this level)
offsets = [list(filter(lambda x: len(x) > 0, d)) for d in offsets]
assert all([len(d) == len(offsets[0]) for d in offsets]),\
"Shape of offsets along each dimension should match."
if len(offsets[0]) < 1:
return [], []
# expand offsets across different levels.
expanded_offsets = [HierarchicalRanges.quick_product(*d).sum(axis=0) for d in offsets]
lower = np.vstack([ l + offset for l, offset in zip(cur_rect_l, expanded_offsets)]).T
upper = np.vstack([ u + offset for u, offset in zip(cur_rect_u, expanded_offsets)]).T
return lower, upper
示例10: select
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def select(self):
QtQ = self.W.gram().dense_matrix()
n = self.domain_shape[0]
err, inv, weights, queries = self._GreedyHierByLv(
QtQ, n, 0, withRoot=False)
# form matrix from queries and weights
row_list = []
for q, w in zip(queries, weights):
if w > 0:
row = np.zeros(self.domain_shape[0])
row[q[0]:q[1] + 1] = w
row_list.append(row)
mat = np.vstack(row_list)
mat = sparse.csr_matrix(mat) if sparse.issparse(mat) is False else mat
return matrix.EkteloMatrix(mat)
示例11: compute
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def compute(self, train_features, train_labels,
test_features=None, test_labels=None):
"""Compute features for given data. Test data is optional.
Args:
train_features: csr_matrix: train features
train_labels: csr_matrix: train labels
test_features: csr_matrix: test features
test_labels: csr_matrix: test labels
"""
self.n_train_samples, self.n_features = train_features.shape
self.n_labels = train_labels.shape[1]
if test_features is not None:
self.n_test_samples = test_features.shape[0]
features = vstack([train_features, test_features]).tocsr()
labels = vstack([train_labels, test_labels]).tocsr()
else:
features = train_features
labels = train_labels
self.n_avg_samples_per_label = self.compute_avg_samples_per_label(labels)
self.n_avg_labels_per_sample = self.compute_avg_labels_per_sample(labels)
self.avg_doc_length = self.compute_avg_doc_length(features)
示例12: _extract
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def _extract(self, sparse_matrix, index):
if sputils.issequence(index) and len(index) > CSR_MATRIX_INDEX_SIZE_LIMIT:
# It happens that scipy implements the indexing of a csr_matrix with a list using
# matrix multiplication, which gets to be an issue if the size of the index list is
# large and lead to memory issues
# Reference: https://stackoverflow.com/questions/46034212/sparse-matrix-slicing-memory-error/46040827#46040827
# In order to solve this issue, simply chunk the index into smaller indices of
# size CSR_MATRIX_INDEX_SIZE_LIMIT and then stack the extracted chunks
sparse_matrix_slices = []
for offset in range(0, len(index), CSR_MATRIX_INDEX_SIZE_LIMIT):
sparse_matrix_slices.append(sparse_matrix[index[offset: offset + CSR_MATRIX_INDEX_SIZE_LIMIT]])
extracted_sparse_matrix = sparse.vstack(sparse_matrix_slices)
else:
extracted_sparse_matrix = sparse_matrix[index]
return extracted_sparse_matrix
示例13: merge_batches
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def merge_batches(self, data):
"""Merge a list of data minibatches into one single instance representing the data
Parameters
----------
data: list
List of minibatches to merge
Returns
-------
(anonymous): sparse matrix | pd.DataFrame | list
Single complete list-like data merged from given batches
"""
if isinstance(data[0], ssp.csr_matrix): return ssp.vstack(data)
if isinstance(data[0], pd.DataFrame) or isinstance(data[0], pd.Series): return pd.concat(data)
return [item for sublist in data for item in sublist]
示例14: sparse_to_tuple
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
# All of these will need to be sorted:
sort_indices = np.lexsort(np.rot90(coords))
return coords[sort_indices], values[sort_indices], shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
示例15: sparse_to_tuple
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import vstack [as 别名]
def sparse_to_tuple(sparse_mx, insert_batch=False):
"""Convert sparse matrix to tuple representation."""
"""Set insert_batch=True if you want to insert a batch dimension."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
if insert_batch:
coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()
values = mx.data
shape = (1,) + mx.shape
else:
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx