本文整理汇总了Python中numpy.hstack方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.hstack方法的具体用法?Python numpy.hstack怎么用?Python numpy.hstack使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.hstack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_intercept
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def add_intercept(self, X):
"""Add 1's to data as last features."""
# Data shape
N, D = X.shape
# Check if there's not already an intercept column
if np.any(np.sum(X, axis=0) == N):
# Report
print('Intercept is not the last feature. Swapping..')
# Find which column contains the intercept
intercept_index = np.argwhere(np.sum(X, axis=0) == N)
# Swap intercept to last
X = X[:, np.setdiff1d(np.arange(D), intercept_index)]
# Add intercept as last column
X = np.hstack((X, np.ones((N, 1))))
# Append column of 1's to data, and increment dimensionality
return X, D+1
示例2: test_one_hot
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def test_one_hot():
"""Check if one_hot returns correct label matrices."""
# Generate label vector
y = np.hstack((np.ones((10,))*0,
np.ones((10,))*1,
np.ones((10,))*2))
# Map to matrix
Y, labels = one_hot(y)
# Check for only 0's and 1's
assert len(np.setdiff1d(np.unique(Y), [0, 1])) == 0
# Check for correct labels
assert np.all(labels == np.unique(y))
# Check correct shape of matrix
assert Y.shape[0] == y.shape[0]
assert Y.shape[1] == len(labels)
示例3: build
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def build(self):
#{{{
import numpy as np;
self.W = shared((self.input_dim, 4 * self.output_dim),
name='{}_W'.format(self.name))
self.U = shared((self.output_dim, 4 * self.output_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.output_dim),
K.get_value(self.forget_bias_init(
(self.output_dim,))),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
#self.c_0 = shared((self.output_dim,), name='{}_c_0'.format(self.name) )
#self.h_0 = shared((self.output_dim,), name='{}_h_0'.format(self.name) )
self.c_0=np.zeros(self.output_dim).astype(theano.config.floatX);
self.h_0=np.zeros(self.output_dim).astype(theano.config.floatX);
self.params=[self.W,self.U,
self.b,
# self.c_0,self.h_0
];
#}}}
示例4: _get_rois_blob
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois_blob_real = []
for i in range(len(im_scale_factors)):
rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
rois_blob = np.hstack((levels, rois))
rois_blob_real.append(rois_blob.astype(np.float32, copy=False))
return rois_blob_real
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:18,代码来源:test.py
示例5: output_shrink
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def output_shrink(K, L):
"""
shrink the convolution output to half the size.
used when both the annihilating filter and the uniform samples of sinusoids satisfy
Hermitian symmetric.
:param K: the annihilating filter size: K + 1
:param L: length of the (complex-valued) b vector
:return:
"""
out_len = L - K
if out_len % 2 == 0:
half_out_len = np.int(out_len / 2.)
mtx_r = np.hstack((np.eye(half_out_len),
np.zeros((half_out_len, half_out_len))))
mtx_i = mtx_r
else:
half_out_len = np.int((out_len + 1) / 2.)
mtx_r = np.hstack((np.eye(half_out_len),
np.zeros((half_out_len, half_out_len - 1))))
mtx_i = np.hstack((np.eye(half_out_len - 1),
np.zeros((half_out_len - 1, half_out_len))))
return linalg.block_diag(mtx_r, mtx_i)
示例6: _N
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def _N(self,s,r):
"""
Lagrange's interpolate function
params:
s,r:natural position of evalue point.2-array.
returns:
2x(2x4) shape function matrix.
"""
la1=(1-s)/2
la2=(1+s)/2
lb1=(1-r)/2
lb2=(1+r)/2
N1=la1*lb1
N2=la1*lb2
N3=la2*lb1
N4=la2*lb2
N=np.hstack(N1*np.eye(2),N2*np.eye(2),N3*np.eye(2),N4*np.eye(2))
return N
示例7: wave2input_image
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def wave2input_image(wave, window, pos=0, pad=0):
wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
wave_image *= window
spectrum_image = np.fft.fft(wave_image, axis=1)
input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)
np.clip(input_image, 1000, None, out=input_image)
np.log(input_image, out=input_image)
input_image += bias
input_image /= scale
if np.max(input_image) > 0.95:
print('input image max bigger than 0.95', np.max(input_image))
if np.min(input_image) < 0.05:
print('input image min smaller than 0.05', np.min(input_image))
return input_image
示例8: backward_process
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def backward_process(self, x, y, W, neuron_output):
backward_output = []
layer_num = len(neuron_output)
score = np.dot(np.hstack((1, neuron_output[layer_num - 2])), W[layer_num - 1])
error_gradient = np.array([-2 * (y - neuron_output[layer_num - 1][0]) * self.tanh_prime(score)])
# error_gradient = np.array([np.sum(-2 * (y - score) * np.hstack((1, neuron_output[layer_num-2])))])
backward_output.insert(0, error_gradient)
# Hidden layer
for i in range(layer_num - 2, -1, -1):
if i == 0:
score = np.dot(x, W[i])
else:
score = np.dot(np.hstack((1, neuron_output[i - 1])), W[i])
error_gradient = np.dot(error_gradient, W[i + 1][1:].transpose()) * self.tanh_prime(score)
backward_output.insert(0, error_gradient)
return backward_output
示例9: make_data_iter_plan
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def make_data_iter_plan(self):
"make a random data iteration plan"
# truncate each bucket into multiple of batch-size
bucket_n_batches = []
for i in range(len(self.data)):
bucket_n_batches.append(len(self.data[i]) / self.batch_size)
self.data[i] = self.data[i][:int(bucket_n_batches[i]*self.batch_size)]
bucket_plan = np.hstack([np.zeros(n, int)+i for i, n in enumerate(bucket_n_batches)])
np.random.shuffle(bucket_plan)
bucket_idx_all = [np.random.permutation(len(x)) for x in self.data]
self.bucket_plan = bucket_plan
self.bucket_idx_all = bucket_idx_all
self.bucket_curr_idx = [0 for x in self.data]
self.data_buffer = []
self.label_buffer = []
for i_bucket in range(len(self.data)):
data = np.zeros((self.batch_size, self.buckets[i_bucket]))
label = np.zeros((self.batch_size, self.buckets[i_bucket]))
self.data_buffer.append(data)
self.label_buffer.append(label)
示例10: test_lstm_forget_bias
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def test_lstm_forget_bias():
forget_bias = 2.0
stack = gluon.rnn.SequentialRNNCell()
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))
dshape = (32, 1, 200)
data = mx.sym.Variable('data')
sym, _ = stack.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))
expected_bias = np.hstack([np.zeros((100,)),
forget_bias * np.ones(100, ), np.zeros((2 * 100,))])
assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)
示例11: test_lstm_forget_bias
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def test_lstm_forget_bias():
forget_bias = 2.0
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(100, forget_bias=forget_bias, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(100, forget_bias=forget_bias, prefix='l1_'))
dshape = (32, 1, 200)
data = mx.sym.Variable('data')
sym, _ = stack.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))
expected_bias = np.hstack([np.zeros((100,)),
forget_bias * np.ones(100, ), np.zeros((2 * 100,))])
assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)
示例12: breastcancer_cont
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def breastcancer_cont(replication=2):
f = open(path + "breast_cancer_wisconsin_cont.txt", "r")
data = np.loadtxt(f, delimiter=",", dtype=np.string0)
x_train = np.array(data[:, range(0, 9)])
y_train = np.array(data[:, 9])
for j in range(replication - 1):
x_train = np.vstack([x_train, data[:, range(0, 9)]])
y_train = np.hstack([y_train, data[:, 9]])
x_train = np.array(x_train, dtype=np.float)
f = open(path + "breast_cancer_wisconsin_cont_test.txt")
data = np.loadtxt(f, delimiter=",", dtype=np.string0)
x_test = np.array(data[:, range(0, 9)])
y_test = np.array(data[:, 9])
for j in range(replication - 1):
x_test = np.vstack([x_test, data[:, range(0, 9)]])
y_test = np.hstack([y_test, data[:, 9]])
x_test = np.array(x_test, dtype=np.float)
return x_train, y_train, x_test, y_test
示例13: breastcancer_disc
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def breastcancer_disc(replication=2):
f = open(path + "breast_cancer_wisconsin_disc.txt")
data = np.loadtxt(f, delimiter=",")
x_train = data[:, range(1, 10)]
y_train = data[:, 10]
for j in range(replication - 1):
x_train = np.vstack([x_train, data[:, range(1, 10)]])
y_train = np.hstack([y_train, data[:, 10]])
f = open(path + "breast_cancer_wisconsin_disc_test.txt")
data = np.loadtxt(f, delimiter=",")
x_test = data[:, range(1, 10)]
y_test = data[:, 10]
for j in range(replication - 1):
x_test = np.vstack([x_test, data[:, range(1, 10)]])
y_test = np.hstack([y_test, data[:, 10]])
return x_train, y_train, x_test, y_test
示例14: iris
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def iris(replication=2):
f = open(path + "iris.txt")
data = np.loadtxt(f, delimiter=",", dtype=np.string0)
x_train = np.array(data[:, range(0, 4)], dtype=np.float)
y_train = data[:, 4]
for j in range(replication - 1):
x_train = np.vstack([x_train, data[:, range(0, 4)]])
y_train = np.hstack([y_train, data[:, 4]])
x_train = np.array(x_train, dtype=np.float)
f = open(path + "iris_test.txt")
data = np.loadtxt(f, delimiter=",", dtype=np.string0)
x_test = np.array(data[:, range(0, 4)], dtype=np.float)
y_test = data[:, 4]
for j in range(replication - 1):
x_test = np.vstack([x_test, data[:, range(0, 4)]])
y_test = np.hstack([y_test, data[:, 4]])
x_test = np.array(x_test, dtype=np.float)
return x_train, y_train, x_test, y_test
示例15: regression_data
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import hstack [as 别名]
def regression_data():
f = open(path + "regression_data1.txt")
data = np.loadtxt(f, delimiter=",")
x1 = np.insert(data[:, 0].reshape(len(data), 1), 0, np.ones(len(data)), axis=1)
y1 = data[:, 1]
f = open(path + "regression_data2.txt")
data = np.loadtxt(f, delimiter=",")
x2 = np.insert(data[:, 0].reshape(len(data), 1), 0, np.ones(len(data)), axis=1)
y2 = data[:, 1]
x1 = np.vstack((x1, x2))
y1 = np.hstack((y1, y2))
f = open(path + "regression_data_test1.txt")
data = np.loadtxt(f, delimiter=",")
x1_test = np.insert(data[:, 0].reshape(len(data), 1), 0, np.ones(len(data)), axis=1)
y1_test = data[:, 1]
f = open(path + "regression_data_test2.txt")
data = np.loadtxt(f, delimiter=",")
x2_test = np.insert(data[:, 0].reshape(len(data), 1), 0, np.ones(len(data)), axis=1)
y2_test = data[:, 1]
x1_test = np.vstack((x1_test, x2_test))
y1_test = np.hstack((y1_test, y2_test))
return x1, y1, x1_test, y1_test