本文整理汇总了Python中numpy.stack函数的典型用法代码示例。如果您正苦于以下问题:Python stack函数的具体用法?Python stack怎么用?Python stack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了stack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: arrow3d
def arrow3d(base, r1, r2, ort, l, h, m = 13, pivot = 'tail'):
x = np.array([1., 0., 0.])
y = np.array([0., 1., 0.])
th = np.linspace(0, np.pi*2, m).reshape(-1,1)
ort = norm_vec(ort)
if np.sum(ort * x) == 0:
d1 = norm_vec(np.cross(ort, y))
else:
d1 = norm_vec(np.cross(ort, x))
if pivot == 'tip':
base = base - (l+h)*ort
elif pivot == 'mid':
base = base - (l+h)*ort/2.
else:
pass
d2 = np.cross(ort, d1)
p = base + l*r1* (d1*np.cos(th) + d2*np.sin(th))
q = p + l*ort
p2 = base + l*r2* (d1*np.cos(th) + d2*np.sin(th)) + l*ort
p3 = base + (l+h)*ort
p3 = np.array([p3]*m).reshape(-1, 3)
t1 = np.stack((p[:-1], q[:-1], p[1:]), axis=1)
t2 = np.stack((p[1:], q[:-1], q[1:]), axis=1)
t3 = np.stack((p2[:-1], p3[:-1], p2[1:]), axis=1)
#t2 = np.dstack((p[1:], q[:-1], q[1:]))
t1 = np.vstack((t1, t2, t3))
return t1
示例2: check_rnn_forward
def check_rnn_forward(layer, inputs, deterministic=True):
if isinstance(inputs, mx.nd.NDArray):
inputs.attach_grad()
else:
for x in inputs:
x.attach_grad()
layer.collect_params().initialize()
with mx.autograd.record():
out = layer.unroll(3, inputs, merge_outputs=False)[0]
mx.autograd.backward(out)
out = layer.unroll(3, inputs, merge_outputs=True)[0]
out.backward()
np_out = out.asnumpy()
if isinstance(inputs, mx.nd.NDArray):
np_dx = inputs.grad.asnumpy()
else:
np_dx = np.stack([x.grad.asnumpy() for x in inputs], axis=1)
layer.hybridize()
with mx.autograd.record():
out = layer.unroll(3, inputs, merge_outputs=False)[0]
mx.autograd.backward(out)
out = layer.unroll(3, inputs, merge_outputs=True)[0]
out.backward()
if isinstance(inputs, mx.nd.NDArray):
input_grads = inputs.grad.asnumpy()
else:
input_grads = np.stack([x.grad.asnumpy() for x in inputs], axis=1)
if deterministic:
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)
mx.test_utils.assert_almost_equal(np_dx, input_grads, rtol=1e-3, atol=1e-5)
示例3: step
def step(self, action):
"""Forward a batch of actions to the wrapped environments.
Args:
action: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
actions = action
for index, (env, action) in enumerate(zip(self._envs, actions)):
if not env.action_space.contains(action):
message = 'Invalid action at index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [
env.step(action)
for env, action in zip(self._envs, actions)]
else:
transitions = [
env.step(action, blocking=False)
for env, action in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
observs, rewards, dones, infos = zip(*transitions)
observ = np.stack(observs)
reward = np.stack(rewards)
done = np.stack(dones)
info = tuple(infos)
return observ, reward, done, info
示例4: split_data
def split_data(chars, batch_size, num_steps, split_frac=0.9):
"""
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches * slice_size]
y = chars[1: n_batches * slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the first split_frac batches for training
split_idx = int(n_batches * split_frac)
train_x, train_y = x[:, :split_idx * num_steps], y[:, :split_idx * num_steps]
val_x, val_y = x[:, split_idx * num_steps:], y[:, split_idx * num_steps:]
return train_x, train_y, val_x, val_y
示例5: load_mask_labels
def load_mask_labels():
'''Load both target and style masks.
A mask image (nr x nc) with m labels/colors will be loaded
as a 4D boolean tensor: (1, m, nr, nc) for 'th' or (1, nr, nc, m) for 'tf'
'''
target_mask_img = load_img(target_mask_path,
target_size=(img_nrows, img_ncols))
target_mask_img = img_to_array(target_mask_img)
style_mask_img = load_img(style_mask_path,
target_size=(img_nrows, img_ncols))
style_mask_img = img_to_array(style_mask_img)
if K.image_dim_ordering() == 'th':
mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T,
target_mask_img.reshape((3, -1)).T])
else:
mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)),
target_mask_img.reshape((-1, 3))])
labels = kmeans(mask_vecs, nb_labels)
style_mask_label = labels[:img_nrows *
img_ncols].reshape((img_nrows, img_ncols))
target_mask_label = labels[img_nrows *
img_ncols:].reshape((img_nrows, img_ncols))
stack_axis = 0 if K.image_dim_ordering() == 'th' else -1
style_mask = np.stack([style_mask_label == r for r in xrange(nb_labels)],
axis=stack_axis)
target_mask = np.stack([target_mask_label == r for r in xrange(nb_labels)],
axis=stack_axis)
return (np.expand_dims(style_mask, axis=0),
np.expand_dims(target_mask, axis=0))
示例6: formatPeaksArbitraryPSF
def formatPeaksArbitraryPSF(peaks, peaks_type):
"""
Input peaks array formatter for arbitrary PSFs.
Based on peaks_type, create a properly formatted ndarray to pass
to the C library. This is primarily for internal use by newPeaks().
"""
# These come from the finder, or the unit test code, create peaks
# as (N,3) with columns x, y, z.
#
if (peaks_type == "testing") or (peaks_type == "finder"):
c_peaks = numpy.stack((peaks["x"],
peaks["y"],
peaks["z"]), axis = 1)
# These come from pre-specified peak fitting locations, create peaks
# as (N,5) with columns x, y, z, background, height.
#
elif (peaks_type == "text") or (peaks_type == "hdf5"):
c_peaks = numpy.stack((peaks["x"],
peaks["y"],
peaks["z"],
peaks["background"],
peaks["height"]), axis = 1)
else:
raise MultiFitterException("Unknown peaks type '" + peaks_type + "'")
return numpy.ascontiguousarray(c_peaks, dtype = numpy.float64)
示例7: converter
def converter(batch, device, max_caption_length=None):
"""Optional preprocessing of the batch before forward pass."""
pad = max_caption_length is not None
imgs = []
captions = []
for img, caption in batch:
# Preproess the caption by either fixing the length by padding (LSTM)
# or by simply wrapping each caption in an ndarray (NStepLSTM)
if pad:
arr = np.full(max_caption_length, _ignore, dtype=np.int32)
# Clip to max length if necessary
arr[:len(caption)] = caption[:max_caption_length]
caption = arr
else:
caption = to_device(device, np.asarray(caption, dtype=np.int32))
imgs.append(img)
captions.append(caption)
if pad:
captions = to_device(device, np.stack(captions))
imgs = to_device(device, np.stack(imgs))
return imgs, captions
示例8: get_filters
def get_filters(R, filter_size, P=None, n_rings=None):
"""Perform single-frequency DFT on each ring of a polar-resampled patch"""
k = filter_size
filters = {}
N = n_samples(k)
from scipy.linalg import dft
for m, r in R.iteritems():
rsh = r.shape
# Get the basis matrices
weights = get_interpolation_weights(k, m, n_rings=n_rings)
DFT = dft(N)[m,:]
LPF = np.dot(DFT, weights).T
cosine = np.real(LPF).astype(np.float32)
sine = np.imag(LPF).astype(np.float32)
# Project taps on to rotational basis
r = np.reshape(r, np.stack([rsh[0],rsh[1]*rsh[2]]))
ucos = np.reshape(np.dot(cosine, r), np.stack([k, k, rsh[1], rsh[2]]))
usin = np.reshape(np.dot(sine, r), np.stack([k, k, rsh[1], rsh[2]]))
if P is not None:
# Rotate basis matrices
ucos_ = np.cos(P[m])*ucos + np.sin(P[m])*usin
usin = -np.sin(P[m])*ucos + np.cos(P[m])*usin
ucos = ucos_
filters[m] = (ucos, usin)
return filters
示例9: _read
def _read(self, key):
ifnone = lambda a, b: b if a is None else a
y = key[1]
x = key[2]
if isinstance(x, slice):
xstart = ifnone(x.start,0)
xstop = ifnone(x.stop,self.raster_size[0])
xstep = xstop - xstart
else:
raise TypeError("Loc style access elements must be slices, e.g., [:] or [10:100]")
if isinstance(y, slice):
ystart = ifnone(y.start, 0)
ystop = ifnone(y.stop, self.raster_size[1])
ystep = ystop - ystart
else:
raise TypeError("Loc style access elements must be slices, e.g., [:] or [10:100]")
pixels = (xstart, ystart, xstep, ystep)
if isinstance(key[0], (int, np.integer)):
return self.read_array(band=int(key[0]+1), pixels=pixels)
elif isinstance(key[0], slice):
# Given some slice iterate over the bands and get the bands and pixel space requested
arrs = []
for band in list(list(range(1, self.nbands + 1))[key[0]]):
arrs.append(self.read_array(band, pixels = pixels))
return np.stack(arrs)
else:
arrs = []
for b in key[0]:
arrs.append(self.read_array(band=int(b+1), pixels=pixels))
return np.stack(arrs)
示例10: translist_to_traj
def translist_to_traj(tlist):
obs_T_Do = np.stack([trans[0] for trans in tlist]); assert obs_T_Do.shape == (len(tlist), self.obs_space.storage_size)
obsfeat_T_Df = np.stack([trans[1] for trans in tlist]); assert obsfeat_T_Df.shape[0] == len(tlist)
adist_T_Pa = np.stack([trans[2] for trans in tlist]); assert adist_T_Pa.ndim == 2 and adist_T_Pa.shape[0] == len(tlist)
a_T_Da = np.stack([trans[3] for trans in tlist]); assert a_T_Da.shape == (len(tlist), self.action_space.storage_size)
r_T = np.stack([trans[4] for trans in tlist]); assert r_T.shape == (len(tlist),)
return Trajectory(obs_T_Do, obsfeat_T_Df, adist_T_Pa, a_T_Da, r_T)
示例11: get_non_missing
def get_non_missing(ids, x, y, real_codes):
"""
Takes lists of the data and removes missing data!
:param ids:
:param x:
:param y:
:param real_codes:
:return:
"""
dataset = zip(ids, x, y, real_codes)
dataset = np.array(dataset, dtype=object)
non_miss = dataset[~(dataset[:,3] == '""')]
id_clean = non_miss[:,0].tolist() ##Takes first column of non_missing matrix to writes it to a list
text_clean = non_miss[:,1]
code_clean = non_miss[:,2]
real_codes_clean = non_miss[:,3].tolist()
real_codes_clean = [float(i) for i in real_codes_clean] ##Turns real_codes into floats for memory efficiency
real_codes_clean = np.array(real_codes_clean)
text_clean = np.stack(text_clean, axis=0) ## Makes everything a 2D array instead of array of arrays...
code_clean = np.stack(code_clean, axis=0)
return [id_clean, text_clean, code_clean, real_codes_clean]
示例12: read
def read(self, input_path):
'''
Reads in the data from input files
'''
self.lr_inputs = None
self.sr_outputs = None
print(input_path)
filenames = glob.glob(input_path + '*')
#TODO: remove assertion
assert len(filenames) > 0
random.shuffle(filenames)
filenames = filenames[0:150]
print('Length: ' + str(len(filenames)))
filenames.sort()
outputs = []
inputs = []
for filename in filenames:
output_img = cv2.imread(filename)
# Asserts the image is read correctly and not empty
assert output_img.shape[0] > 0
assert output_img.shape[1] > 0
#TODO: read in actual depth
output_depth = np.random.random((output_img.shape[0], output_img.shape[1], 1))
#print(type(output_img))
output_img = np.concatenate((output_img, output_depth), 2)
#print(type(output_img))
outputs.append(output_img)
input_img = compute_lr_input(
output_img, downsampling_factor_x=2,
downsampling_factor_y=2, blur_sigma=1.6, noise_sigma=0.03)
inputs.append(input_img)
self.sr_outputs = np.stack(outputs, axis=0)
self.lr_inputs = np.stack(inputs, axis=0)
示例13: step
def step(self, action):
# x = np.argmax(action[:image_width])
# r = (np.argmax(action[image_width:]) - 1)
# pic = self.canvas[:, :, 0]
# if (r != -1):
# r = 2 ** r
# for i in range(image_width):
# if(np.sum(pic[i, x : x + r + 1])):
# self.draw(x, i, r)
# break
x = (action[:image_width] + 1) / 2.
y = (action[image_width:] + 1) / 2.
grey = x * y.reshape(image_width, 1)
grey = grey.reshape((image_width, image_width, 1))
grey = (grey * (255, 255, 255) / 4).astype('uint8')
grey = np.minimum(grey, self.canvas)
self.canvas -= grey
diff = self.diff()
reward = (self.lastdiff - diff) / self.rewardscale # reward is positive if diff increased
self.lastdiff = diff
self.stepnum += 1
ob = self.observation()
self.canvas = np.stack(np.rot90(self.canvas))
self.target = np.stack(np.rot90(self.target))
self.time += 1. / max_step
return ob, reward, (self.stepnum >= max_step), None # o,r,d,i
示例14: main
def main(args):
# load the model
model = load_model(args.model_filename, custom_objects={
'SubPixelUpscaling': SubPixelUpscaling
})
print model.layers
# load the images and bucket them by shape
images_by_size = defaultdict(list)
for filename in glob.glob(args.image_glob):
img = Image.open(filename)
img = img.resize(map(lambda x: int(x * args.output_scale), img.size)) # scale up
images_by_size[img.size].append(img)
# apply the model to the images
for size, imgs in images_by_size.items():
images = map(img_to_array, imgs)
images = (np.stack(images) / 127.5) - 1.
# NOTE: :(
x = input_layer = Input(shape=images.shape[1:])
for layer in model.layers[1:]:
x = layer(x)
this_model = Model([input_layer], [x])
this_model.compile(optimizer='sgd', loss='mse')
# END :(
new_images = images
for _ in range(args.apply_n):
new_images = this_model.predict(new_images, verbose=False)
# save before/after images
for i in range(new_images.shape[0]):
new_image = new_images[i]
image = images[i]
samples = np.stack([image, new_image])
filename = '{}_{}.png'.format(size, i)
filename = os.path.join(args.output_path, filename)
print('saving sample', samples.shape, filename)
save_sample_grid(samples, filename)
示例15: calc_score
def calc_score(self):
cardtype_names = np.array(
['highcard', 'pair', 'twopair', 'threeofakind', 'straight', 'flush', 'fullhouse', 'fourofakind',
'straightflush'])
self.cardtype_multiplier = np.array(
[self.highcard_multiplier, self.pair_multiplier, self.twopair_multiplier, self.threeofakind_multiplier,
self.straight_multiplier, self.flush_multiplier, self.fullhouse_multiplier, self.fourofakind_multiplier,
self.straighflush_multiplier])
self.detected_types = np.stack((self.highcard, self.pair, self.twopair, self.threeofakind,
self.straight, self.flush, self.fullhouse, self.fourofakind,
self.straightflush), axis=0)
self.hand_vals = np.stack((self.highCardsVal, self.pairScore, self.twoPairScore, self.threeScore,self.straightScore,
self.flushScore,self.fullhouseScore,self.fourofakindScore,self.straightflush_score),axis = 0)
detected_types = self.detected_types * 1
self.active_multiplier = self.cardtype_multiplier[:,None,None] * detected_types * self.hand_vals
self.ordered_multiplier = np.sort(self.active_multiplier,axis = 0)[::-1,:,:]
highestVals = np.argmax(self.ordered_multiplier[0,:,:], axis=1)
Winners = (self.ordered_multiplier[0, ::] == np.amax(self.ordered_multiplier[0, :, :], axis=1)[:, None])
MyWinnerMask = np.zeros(self.player_amount, dtype=int)
MyWinnerMask[0] = 1
MyWinnArray = (Winners == MyWinnerMask).all(1)
MyWins = np.sum(MyWinnArray,axis = 0)
# print('cardtype_multiplier \n {}'.format(self.cardtype_multiplier))
# print('detected_types \n {}'.format(detected_types))
# print('hand_vals \n {}'.format(self.hand_vals))
# print('active_multiplier \n {}'.format(self.active_multiplier))
# print('ordered_multiplier \n {}'.format(self.ordered_multiplier))
# print('highest vals \n {}'.format(highestVals))
# print('My Wins \n {}'.format(MyWins))
return MyWins / self.iterations