本文整理汇总了Python中scipy.vstack方法的典型用法代码示例。如果您正苦于以下问题:Python scipy.vstack方法的具体用法?Python scipy.vstack怎么用?Python scipy.vstack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy
的用法示例。
在下文中一共展示了scipy.vstack方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Intersection_DML
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import vstack [as 别名]
def Intersection_DML(feat, M, query=None,
is_sparse=False, is_trans=False):
""" Intersection distance with DML.
"""
raise Exception("Untested function")
if query is None:
query = feat
qnum = query.shape[0]
fnum = feat.shape[0]
dist = np.zeros((qnum, fnum))
for i in range(qnum):
query = query[i, :].dot(M)
for j in range(fnum):
dist[i, j] = sp.vstack((query, feat[j, :])).min(0).sum()
return -query.dot(M).dot(feat.T)
示例2: __getitem__
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import vstack [as 别名]
def __getitem__(self, item): # TODO should be right for all the common use... But better write down a TestCase
if hasattr(self, 'process_all') and self.process_all: # keep attr check!
return self.data[item]
if isinstance(item, int):
return self.get_context(item=item)
if isinstance(item, tuple):
if len(item) == 2:
rows, columns = item
if isinstance(rows, int) and isinstance(columns, int): # TODO check here
# do you want the particular element?
return self.get_context(item=rows)[columns]
else:
raise TypeError('NOT IMPLEMENTED <|>')
if isinstance(rows, slice):
rows = range(*rows.indices(self.shape[0]))
return np.vstack([self.get_context(r) for r in rows])[:, columns]
else:
if isinstance(item, slice):
item = range(*item.indices(self.shape[0]))
return np.vstack([self.get_context(r) for r in item])
示例3: Intersection
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import vstack [as 别名]
def Intersection(feat, query=None,
is_sparse=False, is_trans=False):
""" Intersection distance.
"""
raise Exception("Untested function")
if query is None:
query = feat
qnum = query.shape[0]
fnum = feat.shape[0]
dist = np.zeros((qnum, fnum))
for i in range(qnum):
for j in range(fnum):
dist[i, j] = sp.vstack((query[i, :], feat[j, :])).min(0).sum()
return dist
示例4: stack
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import vstack [as 别名]
def stack(*datasets):
"""
Assuming that the datasets have same structure, stucks data and targets
:param datasets:
:return: stacked dataset
"""
return Dataset(data=vstack([d.data for d in datasets]),
target=stack_or_concat([d.target for d in datasets]),
sample_info=stack_or_concat([d.sample_info for d in datasets]),
info={k: [d.info.get(k, None) for d in datasets]
for k in merge_dicts(*[d.info for d in datasets])})
示例5: stack_or_concat
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import vstack [as 别名]
def stack_or_concat(list_of_arays):
func = np.concatenate if list_of_arays[0].ndim == 1 else np.vstack
return func(list_of_arays)
示例6: vstack
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import vstack [as 别名]
def vstack(lst):
"""
Vstack that considers sparse matrices
:param lst:
:return:
"""
return sp.vstack(lst) if sp and isinstance(lst[0], sp.sparse.csr.csr_matrix) else np.vstack(lst)
示例7: train
# 需要导入模块: import scipy [as 别名]
# 或者: from scipy import vstack [as 别名]
def train(self, datadir, pickle_model=""):
texts= []
labels= []
training_data = os.listdir(datadir)
rcount= 0
texts2= []
batchsize= 100000
batch_data = BatchData()
p_input= None
for jsonfile in training_data:
with open(datadir + "/" + jsonfile, 'r') as inputfile:
for line in inputfile:
#if rcount > 1000000: break
try: line= json.loads(line.strip())
except: continue
for review in line["Reviews"]:
rcount+= 1
if rcount % 100000 == 0: print(rcount)
if rcount % 8 != 0: continue
if "Overall" not in review["Ratings"]: continue
texts.append(review["Content"])
labels.append((float(review["Ratings"]["Overall"]) - 3) *0.5)
if len(texts) % batchsize == 0:
if p_input != None:
p_input.join()
texts2.append(batch_data.texts)
p_input = threading.Thread(target=self.transform_batch, args=(texts, batch_data))
p_input.start()
texts= []
if p_input != None:
p_input.join()
texts2.append(batch_data.texts)
texts2.append(self.wb.partial_fit_transform(texts))
del(texts)
texts= sp.vstack(texts2)
self.wb.dictionary_freeze = True
test= (np.array(texts[-1000:]), np.array(labels[-1000:]))
train = (np.array(texts[:-1000]), np.array(labels[:-1000]))
self.model.fit(train[0], train[1], batch_size=2048, epochs=2, validation_data=(test[0], test[1]))
if pickle_model != "":
self.model.save(pickle_model)
backend = self.wb.batcher.backend
backend_handle = self.wb.batcher.backend_handle
self.wb.batcher.backend = "serial"
self.wb.batcher.backend_handle = None
with gzip.open(pickle_model + ".wb", 'wb') as model_file: pkl.dump(self.wb, model_file, protocol=2)
self.wb.batcher.backend = backend
self.wb.batcher.backend_handle = backend_handle