本文整理汇总了Python中scipy.sparse.hstack方法的典型用法代码示例。如果您正苦于以下问题:Python sparse.hstack方法的具体用法?Python sparse.hstack怎么用?Python sparse.hstack使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.sparse
的用法示例。
在下文中一共展示了sparse.hstack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: to_curve_spline
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def to_curve_spline(obj):
'''
to_curve_spline(obj) obj if obj is a curve spline and otherwise attempts to coerce obj into a
curve spline, raising an error if it cannot.
'''
if is_curve_spline(obj): return obj
elif is_tuple(obj) and len(obj) == 2: (crds,opts) = obj
else: (crds,opts) = (obj,{})
if pimms.is_matrix(crds) or is_curve_spline(crds): crds = [crds]
spls = [c for c in crds if is_curve_spline(c)]
opts = dict(opts)
if 'weights' not in opts and len(spls) == len(crds):
if all(c.weights is not None for c in crds):
opts['weights'] = np.concatenate([c.weights for c in crds])
if 'order' not in opts and len(spls) > 0:
opts['order'] = np.min([c.order for c in spls])
if 'smoothing' not in opts and len(spls) > 0:
sm = set([c.smoothing for c in spls])
if len(sm) == 1: opts['smoothing'] = list(sm)[0]
else: opts['smoothing'] = None
crds = [x.crds if is_curve_spline(crds) else np.asarray(x) for x in crds]
crds = [x if x.shape[0] == 2 else x.T for x in crds]
crds = np.hstack(crds)
return curve_spline(crds, **opts)
示例2: transform
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def transform(self, X):
"""Encode categorical columns into sparse matrix with one-hot-encoding.
Args:
X (pandas.DataFrame): categorical columns to encode
Returns:
(scipy.sparse.coo_matrix): sparse matrix encoding categorical
variables into dummy variables
"""
for i, col in enumerate(X.columns):
X_col = self._transform_col(X[col], i)
if X_col is not None:
if i == 0:
X_new = X_col
else:
X_new = sparse.hstack((X_new, X_col))
logger.debug('{} --> {} features'.format(
col, self.label_encoder.label_maxes[i])
)
return X_new
示例3: predict
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def predict(self, df):
X_desc = self.wb_desc.transform(df["item_description"])
X_desc = X_desc[:, self.desc_indices]
X_name = 2 * self.cv_name.transform(df["name"])
X_name2 = 0.5 * self.cv_name2.transform(df["name"])
X_category0 = self.cv_cat0.transform(df['subcat_0'])
X_category1 = self.cv_cat1.transform(df['subcat_1'])
X_category2 = self.cv_cat2.transform(df['subcat_2'])
X_brand = self.cv_brand.transform(df['brand_name'])
X_condition = self.cv_condition.transform((df['item_condition_id'] + 10 * df["shipping"]).apply(str))
df["cat_brand"] = [a + " " + b for a, b in zip(df["category_name"], df["brand_name"])]
X_cat_brand = self.cv_cat_brand.transform(df["cat_brand"])
X_desc3 = self.desc3.transform(df["item_description"])
X = hstack((X_condition,
X_desc, X_brand,
X_category0, X_category1, X_category2,
X_name, X_name2,
X_cat_brand, X_desc3)).tocsr()
return self.model.predict(X)
示例4: get_compound_features
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def get_compound_features(train_data, test_data, feature_gen_methods):
train_features_list = []
test_features_list = []
for m in feature_gen_methods:
train_features, test_features = m(train_data, test_data)
train_features_list.append(train_features)
test_features_list.append(test_features)
train_features = train_features_list[0]
test_features = test_features_list[0]
for i in xrange(1,len(feature_gen_methods)):
train_features = hstack((train_features, train_features_list[i]))
test_features = hstack((test_features, test_features_list[i]))
return train_features, test_features
示例5: cat_onehot_encoder_m
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def cat_onehot_encoder_m(df,y,col,selection=True):
## ZJN: test raise memory error
# raise MemoryError
mlbs = MultiLabelBinarizer(sparse_output=True).fit(df.values)
from scipy.sparse import csr_matrix
features_tmp = mlbs.transform(df.values)
features_tmp = csr_matrix(features_tmp,dtype=float).tocsr()
models = None
auc_score = None
if selection is True:
auc_score, models = train_lightgbm_for_feature_selection(features_tmp, y)
print(col, "auc", auc_score)
#new_feature = pd.DataFrame(features_tmp,columns=["mul_feature_"+col])
new_feature = features_tmp
from scipy.sparse import hstack
return new_feature,mlbs,models,auc_score
示例6: multi_features_for_test
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def multi_features_for_test(df,columns,mlbs,models):
new_features = {}
#from multiprocessing import Pool
#pool = Pool(processes=len(columns))
for col in columns:
if col in mlbs:
mlb = mlbs[col]
#model = models[col]
model = None
new_features[col] = multi_feature_for_one_col(df[col], mlb, model,col) #pool.apply_async(multi_feature_for_one_col, args=(df[col], mlb, model,col))
new_features_list = []
for col in columns:
if col in new_features:
new_features_list.append(new_features[col])
from scipy.sparse import hstack
new_features = hstack(new_features_list,dtype=float)
#new_features = pd.concat(new_features_list,axis=1)
return new_features
示例7: fit_transform
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def fit_transform(self, X, y=None, **fit_params):
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(
transformer=trans,
X=X,
y=y,
weight=weight,
**fit_params)
for name, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = self.merge_dataframes_by_column(Xs)
return Xs
示例8: transform
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def transform(self, X):
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(
transformer=trans,
X=X,
y=None,
weight=weight)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = self.merge_dataframes_by_column(Xs)
return Xs
示例9: _propagate_features
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def _propagate_features(self, task):
"""Propagate features from input array to output array."""
p_out, p_in = self.job.predict_out, self.job.predict_in
# Check for loss of obs between layers (i.e. with blendindex)
n_in, n_out = p_in.shape[0], p_out.shape[0]
r = int(n_in - n_out)
if not issparse(p_in):
# Simple item setting
p_out[:, :task.n_feature_prop] = p_in[r:, task.propagate_features]
else:
# Need to populate propagated features using scipy sparse hstack
self.job.predict_out = hstack(
[p_in[r:, task.propagate_features],
p_out[:, task.n_feature_prop:]]
).tolil()
示例10: transform
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
示例11: _setup_metric
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def _setup_metric(X, true_labels, inv_psp=None, k=5):
assert compatible_shapes(X, true_labels), \
"ground truth and prediction matrices must have same shape."
num_instances, num_labels = true_labels.shape
indices = _get_topk(X, num_labels, k)
ps_indices = None
if inv_psp is not None:
ps_indices = _get_topk(
true_labels.dot(
sp.spdiags(inv_psp, diags=0,
m=num_labels, n=num_labels)),
num_labels, k)
inv_psp = np.hstack([inv_psp, np.zeros((1))])
true_labels = sp.hstack([true_labels,
sp.lil_matrix((num_instances, 1),
dtype=np.int32)]).tocsr()
return indices, true_labels, ps_indices, inv_psp
示例12: __init__
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def __init__(self, labels_ops):
"""
Encapsulates a set of linearly independent operators.
:param (list|tuple) labels_ops: Sequence of tuples (label, operator) where label is a string
and operator a qutip.Qobj operator representation.
"""
self.ops_by_label = OrderedDict(labels_ops)
self.labels = list(self.ops_by_label.keys())
self.ops = list(self.ops_by_label.values())
self.dim = len(self.ops)
# the basis change transformation matrix from a representation in the operator basis
# to the original basis. We enforce CSR sparse matrix representation to have efficient
# matrix vector products.
self.basis_transform = sphstack([qt.operator_to_vector(opj).data
for opj in self.ops]).tocsr()
self._metric = None
self._is_orthonormal = None
self._all_hermitian = None
示例13: transform
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
paral_params = [[X[t['col_pick']] if hasattr(t, 'col_pick') else X, t] for _, t, _ in self._iter()]
Xs = Apply(transform_one, self.batcher).transform(paral_params)
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if self.concatenate:
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
示例14: generate_train_batch
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def generate_train_batch(self):
users, pos_items, neg_items = self._generate_train_cf_batch()
u_sp = self.user_one_hot[users]
pos_i_sp = self.kg_feat_mat[pos_items]
neg_i_sp = self.kg_feat_mat[neg_items]
# Horizontally stack sparse matrices to get single positive & negative feature matrices
pos_feats = sp.hstack([u_sp, pos_i_sp])
neg_feats = sp.hstack([u_sp, neg_i_sp])
batch_data = {}
batch_data['pos_feats'] = pos_feats
batch_data['neg_feats'] = neg_feats
return batch_data
示例15: generate_test_feed_dict
# 需要导入模块: from scipy import sparse [as 别名]
# 或者: from scipy.sparse import hstack [as 别名]
def generate_test_feed_dict(self, model, user_batch, item_batch, drop_flag=True):
user_list = np.repeat(user_batch, len(item_batch)).tolist()
item_list = list(item_batch) * len(user_batch)
u_sp = self.user_one_hot[user_list]
pos_i_sp = self.kg_feat_mat[item_list]
# Horizontally stack sparse matrices to get single positive & negative feature matrices
pos_feats = sp.hstack([u_sp, pos_i_sp])
pos_indices, pos_values, pos_shape = self._extract_sp_info(pos_feats)
feed_dict = {
model.pos_indices: pos_indices,
model.pos_values: pos_values,
model.pos_shape: pos_shape,
model.mess_dropout: [0.] * len(eval(self.args.layer_size))
}
return feed_dict