本文整理汇总了Python中numpy.hstack函数的典型用法代码示例。如果您正苦于以下问题:Python hstack函数的具体用法?Python hstack怎么用?Python hstack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了hstack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_roc_score
def get_roc_score(edges_pos, edges_neg, score_matrix, apply_sigmoid=False):
# Edge case
if len(edges_pos) == 0 or len(edges_neg) == 0:
return (None, None, None)
# Store positive edge predictions, actual values
preds_pos = []
pos = []
for edge in edges_pos:
if apply_sigmoid == True:
preds_pos.append(sigmoid(score_matrix[edge[0], edge[1]]))
else:
preds_pos.append(score_matrix[edge[0], edge[1]])
pos.append(1) # actual value (1 for positive)
# Store negative edge predictions, actual values
preds_neg = []
neg = []
for edge in edges_neg:
if apply_sigmoid == True:
preds_neg.append(sigmoid(score_matrix[edge[0], edge[1]]))
else:
preds_neg.append(score_matrix[edge[0], edge[1]])
neg.append(0) # actual value (0 for negative)
# Calculate scores
preds_all = np.hstack([preds_pos, preds_neg])
labels_all = np.hstack([np.ones(len(preds_pos)), np.zeros(len(preds_neg))])
roc_score = roc_auc_score(labels_all, preds_all)
# roc_curve_tuple = roc_curve(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
# return roc_score, roc_curve_tuple, ap_score
return roc_score, ap_score
示例2: gen_coastline
def gen_coastline(lon, lat, bathy, depth=0):
"""
Given lon, lat, and bathymetry, generate vectors of line segments
of the coastline. This can be exported to matlab (via savemat) to be
used with the 'editmask' routine for creating grid masks.
Input
-----
lon : array,
longitudes of bathymetry locations
lat : array,
latitudes of bathymetry locations
bathy : array,
bathymetry (negative for ocean, positive for land) values
depth : float,
depth to use as the definition of the coast
Returns
-------
lon : ndarray,
vector of coastlines, separated by nan (matlab-style)
lat : ndarray,
vector of coastlines, separated by nan (matlab-style)
"""
CS = plt.contour(lon, lat, bathy, [depth - 0.25, depth + 0.25])
lon = list()
lat = list()
for col in CS.collections:
for path in col.get_paths():
lon.append(path.vertices[:, 0])
lon.append(np.nan)
lat.append(path.vertices[:, 1])
lat.append(np.nan)
return (np.hstack(lon), np.hstack(lat))
示例3: torgerson
def torgerson(distances, n_components=2):
"""
Perform classical mds (Torgerson scaling).
..note ::
If the distances are euclidean then this is equivalent to projecting
the original data points to the first `n` principal components.
"""
distances = np.asarray(distances)
assert distances.shape[0] == distances.shape[1]
N = distances.shape[0]
# O ^ 2
D_sq = distances ** 2
# double center the D_sq
rsum = np.sum(D_sq, axis=1, keepdims=True)
csum = np.sum(D_sq, axis=0, keepdims=True)
total = np.sum(csum)
D_sq -= rsum / N
D_sq -= csum / N
D_sq += total / (N ** 2)
B = np.multiply(D_sq, -0.5, out=D_sq)
U, L, _ = np.linalg.svd(B)
if n_components > N:
U = np.hstack((U, np.zeros((N, n_components - N))))
L = np.hstack((L, np.zeros((n_components - N))))
U = U[:, :n_components]
L = L[:n_components]
D = np.diag(np.sqrt(L))
return np.dot(U, D)
示例4: display_layer
def display_layer(X, filename="../images/layer.png"):
"""
Produces an image, composed of the given N images, patches or neural network weights,
stored in the array X. Saves it with the given filename.
:param X: numpy array of size (NxD) — N images, patches or neural network weights
:param filename: a string, the name of the produced file
:return: None
"""
if not isinstance(X, np.ndarray):
raise TypeError("'X' must be a numpy array")
N, D = X.shape
d = get_reshaped_image_size(D)
if N == 1:
return X.reshape(d, d, 3)
divizors = [n for n in range(1, N) if N % n == 0]
im_sizes = divizors[int(len(divizors) / 2)], int(N / divizors[int(len(divizors) / 2)])
for i in range(im_sizes[0]):
# img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
img_row = np.hstack((np.zeros((d, 1, 3)), np.array(X[i * im_sizes[0], :].reshape(d, d, 3))))
img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
for j in range(1, im_sizes[1]):
img_row = np.hstack((img_row, X[i * im_sizes[1] + j, :].reshape(d, d, 3)))
img_row = np.hstack((img_row, np.zeros((d, 1, 3))))
if i == 0:
img = img_row
else:
img = np.vstack((img, img_row))
img = np.vstack((img, np.zeros((1, img.shape[1], 3))))
img = np.vstack((np.zeros((1, img.shape[1], 3)), img))
imsave(filename, img)
return img
示例5: __mul__
def __mul__(self, df):
"""
extract and stack poles and zeros
TODO : handling simplification
"""
b1 = self.b
a1 = self.a
b2 = df.b
a2 = df.a
pb1 = np.poly1d(b1)
pa1 = np.poly1d(a1)
pb2 = np.poly1d(b2)
pa2 = np.poly1d(a2)
rpb1 = pb1.r
rpb2 = pb2.r
rpa1 = pa1.r
rpa2 = pa2.r
F = DF()
F.p = np.hstack((rpa1, rpa2))
F.z = np.hstack((rpb1, rpb2))
F.simplify()
return F
示例6: save
def save(self,filename):
num_objs = len(self.objects)
data_size = 0
desc = []
data = []
for obj in self.objects:
if isinstance(obj,self.scalars):
desc.append(0)
data.append(obj)
data_size += 1
else:
assert(isinstance(obj,np.ndarray))
desc.append(len(obj.shape))
desc.append(obj.shape)
data.append(obj.flatten(order='F'))
data_size += np.prod(obj.shape)
desc = np.hstack(desc)
header_size = 3 + desc.size
output = np.hstack([num_objs,header_size,data_size]
+ [desc]
+ data).astype(np.double)
assert((header_size + data_size,) == output.shape)
output.tofile(filename)
示例7: getScalars
def getScalars(self, inputData):
"""
Returns a numpy array containing the sub-field scalar value(s) for
each sub-field of the inputData. To get the associated field names for each of
the scalar values, call getScalarNames().
For a simple scalar encoder, the scalar value is simply the input unmodified.
For category encoders, it is the scalar representing the category string
that is passed in. For the datetime encoder, the scalar value is the
the number of seconds since epoch.
The intent of the scalar representation of a sub-field is to provide a
baseline for measuring error differences. You can compare the scalar value
of the inputData with the scalar value returned from topDownCompute() on a
top-down representation to evaluate prediction accuracy, for example.
@param inputData The data from the source. This is typically a object with
members
@returns array of scalar values
"""
retVals = numpy.array([])
if self.encoders is not None:
for (name, encoder, offset) in self.encoders:
values = encoder.getScalars(self._getInputValue(inputData, name))
retVals = numpy.hstack((retVals, values))
else:
retVals = numpy.hstack((retVals, inputData))
return retVals
示例8: load_sdss_data_both_catalogs
def load_sdss_data_both_catalogs(hemi):
lowz = load_sdss_data('lowz', hemi)
cmass = load_sdss_data('cmass', hemi)
ra = np.hstack([lowz['ra'],cmass['ra']])
dec = np.hstack([lowz['dec'],cmass['dec']])
z = np.hstack([lowz['z'],cmass['z']])
return {'ra':ra, 'dec':dec, 'z':z}
示例9: offsetPlane
def offsetPlane(plane, x, y):
"""
Takes a numpy 2D array and returns the same plane offset by x and y,
adding rows and columns of 0 values
"""
height, width = plane.shape
dataType = plane.dtype
# shift x by cropping, creating a new array of columns and stacking
# horizontally
if abs(x) > 0:
newCols = zeros((height, abs(x)), dataType)
x1 = max(0, 0 - x)
x2 = min(width, width - x)
crop = plane[0:height, x1:x2]
if x > 0:
plane = hstack((newCols, crop))
else:
plane = hstack((crop, newCols))
# shift y by cropping, creating a new array of rows and stacking
# vertically
if abs(y) > 0:
newRows = zeros((abs(y), width), dataType)
y1 = max(0, 0 - y)
y2 = min(height, height - y)
crop = plane[y1:y2, 0:width]
if y > 0:
plane = vstack((newRows, crop))
else:
plane = vstack((crop, newRows))
return plane
示例10: sample_trajectory
def sample_trajectory(M, n_states):
# Samples trajectories from random nodes
# in our domain (M)
G, W = M.get_graph_inv()
N = G.shape[0]
if N >= n_states:
rand_ind = np.random.permutation(N)
else:
rand_ind = np.tile(np.random.permutation(N), (1, 10))
init_states = rand_ind[0:n_states].flatten()
goal_s = M.map_ind_to_state(M.targetx, M.targety)
states = []
states_xy = []
states_one_hot = []
# Get optimal path from graph
g_dense = W
g_masked = np.ma.masked_values(g_dense, 0)
g_sparse = csr_matrix(g_dense)
d, pred = dijkstra(g_sparse, indices=goal_s, return_predecessors=True)
for i in range(n_states):
path = trace_path(pred, goal_s, init_states[i])
path = np.flip(path, 0)
states.append(path)
for state in states:
L = len(state)
r, c = M.get_coords(state)
row_m = np.zeros((L, M.n_row))
col_m = np.zeros((L, M.n_col))
for i in range(L):
row_m[i, r[i]] = 1
col_m[i, c[i]] = 1
states_one_hot.append(np.hstack((row_m, col_m)))
states_xy.append(np.hstack((r, c)))
return states_xy, states_one_hot
示例11: phase_step_spike_fq
def phase_step_spike_fq(self, spikes_time, full_step, nb_block, fs):
stance_spike_fq=[]
swing_spike_fq=[]
for step in full_step:
stance_block_duration = (step[1]-step[0])/nb_block
swing_block_duration = (step[2]-step[1])/nb_block
step_stance_count = []
step_swing_count = []
for i in range(nb_block):
step_stance_count.append(0)
step_swing_count.append(0)
for spike_time in spikes_time:
#if stance phase
if step[0] < spike_time/fs < step[1]:
list_block = np.arange(step[0], step[1], stance_block_duration)
list_block = np.hstack((list_block, step[1]))
for i in range(nb_block):
if list_block[i] < spike_time/fs < list_block[i+1]:
step_stance_count[i] += 1
#if swing phase
elif step[1] < spike_time/fs < step[2]:
list_block = np.arange(step[1], step[2], swing_block_duration)
list_block = np.hstack((list_block, step[2]))
for i in range(nb_block):
if list_block[i] < spike_time/fs < list_block[i+1]:
step_swing_count[i] += 1
# elif spike_time/fs > step[2]:
# break
stance_spike_fq.append(np.array(step_stance_count) / stance_block_duration)
swing_spike_fq.append(np.array(step_swing_count) / swing_block_duration)
return stance_spike_fq, swing_spike_fq
示例12: test_fuzz
def test_fuzz(self):
# try a bunch of crazy inputs
rfuncs = (
np.random.uniform,
np.random.normal,
np.random.standard_cauchy,
np.random.exponential)
ntests = 100
for i in range(ntests):
rfunc = random.choice(rfuncs)
target_norm_1 = random.expovariate(1.0)
n = random.randrange(2, 16)
A_original = rfunc(size=(n,n))
E_original = rfunc(size=(n,n))
A_original_norm_1 = scipy.linalg.norm(A_original, 1)
scale = target_norm_1 / A_original_norm_1
A = scale * A_original
E = scale * E_original
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expected_expm = scipy.linalg.expm(A)
expected_frechet = scipy.linalg.expm(M)[:n, n:]
observed_expm, observed_frechet = expm_frechet(A, E)
assert_allclose(expected_expm, observed_expm)
assert_allclose(expected_frechet, observed_frechet)
示例13: run_classify
def run_classify(X_groups_train, y_train, X_groups_validate, y_validate):
"""
Although this function is given groups, it actually doesn't utilize the groups at all in the criterion
"""
method_label = "gridsearch_lasso"
X_validate = np.hstack(X_groups_validate)
max_power = np.log(50)
min_power = np.log(1e-4)
lambda_guesses = np.power(np.e, np.arange(min_power, max_power, (max_power - min_power - 1e-5) / (NUM_LAMBDAS - 1)))
print method_label, "lambda_guesses", lambda_guesses
X_train = np.hstack(X_groups_train)
problem_wrapper = LassoClassifyProblemWrapper(X_train, y_train, [])
best_cost = 1e5
best_betas = []
best_regularization = lambda_guesses[0]
for l1 in reversed(lambda_guesses):
betas = problem_wrapper.solve([l1])
current_cost, _ = testerror_logistic_grouped(X_validate, y_validate, betas)
if best_cost > current_cost:
best_cost = current_cost
best_betas = betas
best_regularization = l1
print method_label, "best_cost so far", best_cost, "best_regularization", best_regularization
sys.stdout.flush()
print method_label, "best_validation_error", best_cost
print method_label, "best lambdas:", best_regularization
return best_betas, best_cost
示例14: _plot_traj
def _plot_traj(self, z, axes, units):
"""Plots spacecraft trajectory.
Args:
- z (``tuple``, ``list``, ``numpy.ndarray``): Decision chromosome.
- axes (``matplotlib.axes._subplots.Axes3DSubplot``): 3D axes to use for the plot
- units (``float``, ``int``): Length unit by which to normalise data.
Examples:
>>> prob.extract(pykep.trajopt.indirect_or2or).plot_traj(pop.champion_x)
"""
# times
t0 = pk.epoch(0)
tf = pk.epoch(z[0])
# Mean Anomalies
M0 = z[1] - self.elem0[1] * np.sin(z[1])
Mf = z[2] - self.elemf[1] * np.sin(z[2])
elem0 = np.hstack([self.elem0[:5], [M0]])
elemf = np.hstack([self.elemf[:5], [Mf]])
# Keplerian points
kep0 = pk.planet.keplerian(t0, elem0)
kepf = pk.planet.keplerian(tf, elemf)
# planets
pk.orbit_plots.plot_planet(
kep0, t0=t0, units=units, ax=axes, color=(0.8, 0.8, 0.8))
pk.orbit_plots.plot_planet(
kepf, t0=tf, units=units, ax=axes, color=(0.8, 0.8, 0.8))
示例15: _stimcorr_core
def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
"""
Core routine for determining stimulus correlation
"""
if not cwd:
cwd = os.getcwd()
# read in motion parameters
mc_in = np.loadtxt(motionfile)
g_in = np.loadtxt(intensityfile)
g_in.shape = g_in.shape[0], 1
dcol = designmatrix.shape[1]
mccol = mc_in.shape[1]
concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
cm = np.corrcoef(concat_matrix, rowvar=0)
corrfile = self._get_output_filenames(motionfile, cwd)
# write output to outputfile
file = open(corrfile, 'w')
file.write("Stats for:\n")
file.write("Stimulus correlated motion:\n%s\n" % motionfile)
for i in range(dcol):
file.write("SCM.%d:" % i)
for v in cm[i, dcol + np.arange(mccol)]:
file.write(" %.2f" % v)
file.write('\n')
file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
for i in range(dcol):
file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
file.close()