本文整理汇总了Python中keras.backend.repeat方法的典型用法代码示例。如果您正苦于以下问题:Python backend.repeat方法的具体用法?Python backend.repeat怎么用?Python backend.repeat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.repeat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def call(self, x):
mean = K.mean(x, axis=-1)
std = K.std(x, axis=-1)
if len(x.shape) == 3:
mean = K.permute_dimensions(
K.repeat(mean, x.shape.as_list()[-1]),
[0,2,1]
)
std = K.permute_dimensions(
K.repeat(std, x.shape.as_list()[-1]),
[0,2,1]
)
elif len(x.shape) == 2:
mean = K.reshape(
K.repeat_elements(mean, x.shape.as_list()[-1], 0),
(-1, x.shape.as_list()[-1])
)
std = K.reshape(
K.repeat_elements(mean, x.shape.as_list()[-1], 0),
(-1, x.shape.as_list()[-1])
)
return self._g * (x - mean) / (std + self._epsilon) + self._b
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def call(self, x, mask=None):
input_shape = self.input_spec[0].shape
en_seq = x
x_input = x[:, input_shape[1]-1, :]
x_input = K.repeat(x_input, input_shape[1])
initial_states = self.get_initial_states(x_input)
constants = super(PointerLSTM, self).get_constants(x_input)
constants.append(en_seq)
preprocessed_input = self.preprocess_input(x_input)
last_output, outputs, states = K.rnn(self.step, preprocessed_input,
initial_states,
go_backwards=self.go_backwards,
constants=constants,
input_length=input_shape[1])
return outputs
示例3: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def step(self, x_input, states):
#print "x_input:", x_input, x_input.shape
# <TensorType(float32, matrix)>
input_shape = self.input_spec[0].shape
en_seq = states[-1]
_, [h, c] = super(PointerLSTM, self).step(x_input, states[:-1])
# vt*tanh(W1*e+W2*d)
dec_seq = K.repeat(h, input_shape[1])
Eij = time_distributed_dense(en_seq, self.W1, output_dim=1)
Dij = time_distributed_dense(dec_seq, self.W2, output_dim=1)
U = self.vt * tanh(Eij + Dij)
U = K.squeeze(U, 2)
# make probability tensor
pointer = softmax(U)
return pointer, [h, c]
示例4: test
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def test(self):
G_weights_dir = os.path.join(self.model_save_dir, 'G_weights.hdf5')
if not os.path.isfile(G_weights_dir):
print("Don't find weight's generator model")
else:
self.G.load_weights(G_weights_dir)
data_iter = get_loader(self.Image_data_class.test_dataset, self.Image_data_class.test_dataset_label, self.Image_data_class.test_dataset_fix_label,
image_size=self.image_size, batch_size=self.batch_size, mode=self.mode)
n_batches = int(len(self.sample_step) / self.batch_size)
total_samples = n_batches * self.batch_size
for i in range(n_batches):
imgs, orig_labels, target_labels, fix_labels, names = next(data_iter)
for j in range(self.batch_size):
preds = self.G.predict([np.repeat(np.expand_dims(imgs[j], axis = 0), len(self.selected_attrs), axis = 0), fix_labels[j]])
for k in range(len(self.selected_attrs)):
Image.fromarray((preds[k]*127.5 + 127.5).astype(np.uint8)).save(os.path.join(self.result_dir, names[j].split(os.path.sep)[-1].split('.')[0] + f'_{k + 1}.png'))
示例5: custom
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def custom(self):
G_weights_dir = os.path.join(self.model_save_dir, 'G_weights.hdf5')
if not os.path.isfile(G_weights_dir):
print("Don't find weight's generator model")
else:
self.G.load_weights(G_weights_dir)
path = os.path.join(self.sample_dir, self.custom_image_name)
target_list = create_labels([self.custom_image_label], selected_attrs=self.selected_attrs)[0]
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = resize_keep_aspect_ratio(image, width = self.image_size, height = self.image_size)
image = np.array([image])/127.5 - 1
preds = self.G.predict([np.repeat(image, len(self.selected_attrs), axis = 0), target_list])
for k in range(len(self.selected_attrs)):
Image.fromarray((preds[k]*127.5 + 127.5).astype(np.uint8)).save(os.path.join(self.sample_dir, self.custom_image_name.split('.')[0] + f'_{k + 1}.png'))
示例6: get_batch
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def get_batch(self, model, batch_size, gamma=0.9):
if self.fast:
return self.get_batch_fast(model, batch_size, gamma)
if len(self.memory) < batch_size:
batch_size = len(self.memory)
nb_actions = model.get_output_shape_at(0)[-1]
samples = np.array(sample(self.memory, batch_size))
input_dim = np.prod(self.input_shape)
S = samples[:, 0 : input_dim]
a = samples[:, input_dim]
r = samples[:, input_dim + 1]
S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2]
game_over = samples[:, 2 * input_dim + 2]
r = r.repeat(nb_actions).reshape((batch_size, nb_actions))
game_over = game_over.repeat(nb_actions).reshape((batch_size, nb_actions))
S = S.reshape((batch_size, ) + self.input_shape)
S_prime = S_prime.reshape((batch_size, ) + self.input_shape)
X = np.concatenate([S, S_prime], axis=0)
Y = model.predict(X)
Qsa = np.max(Y[batch_size:], axis=1).repeat(nb_actions).reshape((batch_size, nb_actions))
delta = np.zeros((batch_size, nb_actions))
a = np.cast['int'](a)
delta[np.arange(batch_size), a] = 1
targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa)
return S, targets
示例7: set_batch_function
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def set_batch_function(self, model, input_shape, batch_size, nb_actions, gamma):
input_dim = np.prod(input_shape)
samples = K.placeholder(shape=(batch_size, input_dim * 2 + 3))
S = samples[:, 0 : input_dim]
a = samples[:, input_dim]
r = samples[:, input_dim + 1]
S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2]
game_over = samples[:, 2 * input_dim + 2 : 2 * input_dim + 3]
r = K.reshape(r, (batch_size, 1))
r = K.repeat(r, nb_actions)
r = K.reshape(r, (batch_size, nb_actions))
game_over = K.repeat(game_over, nb_actions)
game_over = K.reshape(game_over, (batch_size, nb_actions))
S = K.reshape(S, (batch_size, ) + input_shape)
S_prime = K.reshape(S_prime, (batch_size, ) + input_shape)
X = K.concatenate([S, S_prime], axis=0)
Y = model(X)
Qsa = K.max(Y[batch_size:], axis=1)
Qsa = K.reshape(Qsa, (batch_size, 1))
Qsa = K.repeat(Qsa, nb_actions)
Qsa = K.reshape(Qsa, (batch_size, nb_actions))
delta = K.reshape(self.one_hot(a, nb_actions), (batch_size, nb_actions))
targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa)
self.batch_function = K.function(inputs=[samples], outputs=[S, targets])
示例8: _time_distributed_dense
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def _time_distributed_dense(x, w, b=None, dropout=None,
input_dim=None, output_dim=None,
timesteps=None, training=None):
"""Apply `y . w + b` for every temporal slice y of x.
# Arguments
x: input tensor.
w: weight matrix.
b: optional bias vector.
dropout: wether to apply dropout (same dropout mask
for every temporal slice of the input).
input_dim: integer; optional dimensionality of the input.
output_dim: integer; optional dimensionality of the output.
timesteps: integer; optional number of timesteps.
training: training phase tensor or boolean.
# Returns
Output tensor.
"""
if not input_dim:
input_dim = K.shape(x)[2]
if not timesteps:
timesteps = K.shape(x)[1]
if not output_dim:
output_dim = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)
# maybe below is more clear implementation compared to older keras
# at least it works the same for tensorflow, but not tested on other backends
x = K.dot(x, w)
if b is not None:
x = K.bias_add(x, b)
return x
示例9: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def call(self, inputs):
features = inputs[0]
rois = inputs[1]
n_roi_boxes = K.shape(rois)[1]
# roisには[0,0,0,0]のRoIも含むが、バッチ毎の要素数を合わせるため、そのまま処理する。
# crop_and_resizeの準備
# roisを0軸目を除き(バッチを示す次元を除き)、フラットにする。
roi_unstack = K.concatenate(tf.unstack(rois), axis=0)
# roi_unstackの各roiに対応するバッチを指すindex
batch_pos = K.flatten(
K.repeat(K.reshape(K.arange(self.batch_size), [-1, 1]),
n_roi_boxes))
# RoiAlignの代わりにcrop_and_resizeを利用。
# crop_and_resize内部でbilinear interporlationしてようなので、アルゴリズム的には同じっぽい
crop_boxes = tf.image.crop_and_resize(features,
roi_unstack, batch_pos,
self.out_shape)
# (N * n_rois, out_size, out_size, channels)
# から
# (N, n_rois, out_size, out_size, channels)
# へ変換
crop_boxes = K.reshape(crop_boxes,
[self.batch_size, n_roi_boxes]
+ self.out_shape + [-1])
log.tfprint(crop_boxes, "crop_boxes: ")
return crop_boxes
示例10: repeat_vector
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def repeat_vector(x, rep, axis):
return K.repeat(x, rep, axis)
示例11: time_distributed_dense
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def time_distributed_dense(x, w, b=None, dropout=None,
input_dim=None, units=None, timesteps=None):
"""Apply `y . w + b` for every temporal slice y of x.
# Arguments
x: input tensor.
w: weight matrix.
b: optional bias vector.
dropout: wether to apply dropout (same dropout mask
for every temporal slice of the input).
input_dim: integer; optional dimensionality of the input.
units: integer; optional dimensionality of the output.
timesteps: integer; optional number of timesteps.
# Returns
Output tensor.
"""
if not input_dim:
input_dim = K.shape(x)[2]
if not timesteps:
timesteps = K.shape(x)[1]
if not units:
units = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x)
# collapse time dimension and batch dimension together
x = K.reshape(x, (-1, input_dim))
x = K.dot(x, w)
if b:
x += b
# reshape to 3D tensor
if K.backend() == 'tensorflow':
x = K.reshape(x, K.stack([-1, timesteps, units]))
x.set_shape([None, None, units])
else:
x = K.reshape(x, (-1, timesteps, units))
return x
示例12: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def call(self, inputs, mask=None):
imgs, embs = inputs
reshaped_shape = imgs.shape[:3].concatenate(embs.shape[1])
embs = K.repeat(embs, imgs.shape[1] * imgs.shape[2])
embs = K.reshape(embs, reshaped_shape)
return K.concatenate([imgs, embs], axis=3)
示例13: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def step(self, x_input, states):
input_shape = self.input_spec[0].shape
en_seq = states[-1]
_, [h, c] = super(PointerLSTM, self).step(x_input, states[:-1])
# vt*tanh(W1*e+W2*d)
dec_seq = K.repeat(h, input_shape[1])
Eij = time_distributed_dense(en_seq, self.W1, output_dim=1)
Dij = time_distributed_dense(dec_seq, self.W2, output_dim=1)
U = self.vt * tanh(Eij + Dij)
U = K.squeeze(U, 2)
# make probability tensor
pointer = softmax(U)
return pointer, [h, c]
示例14: build_generator
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def build_generator(self):
"""Generator network."""
# Input tensors
inp_c = Input(shape = (self.c_dim, ))
inp_img = Input(shape = (self.image_size, self.image_size, 3))
# Replicate spatially and concatenate domain information
c = Lambda(lambda x: K.repeat(x, self.image_size**2))(inp_c)
c = Reshape((self.image_size, self.image_size, self.c_dim))(c)
x = Concatenate()([inp_img, c])
# First Conv2D
x = Conv2D(filters = self.g_conv_dim, kernel_size = 7, strides = 1, padding = 'same', use_bias = False)(x)
x = InstanceNormalization(axis = -1)(x)
x = ReLU()(x)
# Down-sampling layers
curr_dim = self.g_conv_dim
for i in range(2):
x = ZeroPadding2D(padding = 1)(x)
x = Conv2D(filters = curr_dim*2, kernel_size = 4, strides = 2, padding = 'valid', use_bias = False)(x)
x = InstanceNormalization(axis = -1)(x)
x = ReLU()(x)
curr_dim = curr_dim * 2
# Bottleneck layers.
for i in range(self.g_repeat_num):
x = self.ResidualBlock(x, curr_dim)
# Up-sampling layers
for i in range(2):
x = UpSampling2D(size = 2)(x)
x = Conv2D(filters = curr_dim // 2, kernel_size = 4, strides = 1, padding = 'same', use_bias = False)(x)
x = InstanceNormalization(axis = -1)(x)
x = ReLU()(x)
curr_dim = curr_dim // 2
# Last Conv2D
x = ZeroPadding2D(padding = 3)(x)
out = Conv2D(filters = 3, kernel_size = 7, strides = 1, padding = 'valid', activation = 'tanh', use_bias = False)(x)
return Model(inputs = [inp_img, inp_c], outputs = out)
示例15: build_model
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import repeat [as 别名]
def build_model(self, input_shape):
input_dim = input_shape[-1]
output_dim = self.output_dim
input_length = input_shape[1]
hidden_dim = self.hidden_dim
x = Input(batch_shape=input_shape)
h_tm1 = Input(batch_shape=(input_shape[0], hidden_dim))
c_tm1 = Input(batch_shape=(input_shape[0], hidden_dim))
W1 = Dense(hidden_dim * 4,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer)
W2 = Dense(output_dim,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer)
W3 = Dense(1,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer)
U = Dense(hidden_dim * 4,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer)
C = Lambda(lambda x: K.repeat(x, input_length), output_shape=(input_length, input_dim))(c_tm1)
_xC = concatenate([x, C])
_xC = Lambda(lambda x: K.reshape(x, (-1, input_dim + hidden_dim)), output_shape=(input_dim + hidden_dim,))(_xC)
alpha = W3(_xC)
alpha = Lambda(lambda x: K.reshape(x, (-1, input_length)), output_shape=(input_length,))(alpha)
alpha = Activation('softmax')(alpha)
_x = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=(1, 1)), output_shape=(input_dim,))([alpha, x])
z = add([W1(_x), U(h_tm1)])
z0, z1, z2, z3 = get_slices(z, 4)
i = Activation(self.recurrent_activation)(z0)
f = Activation(self.recurrent_activation)(z1)
c = add([multiply([f, c_tm1]), multiply([i, Activation(self.activation)(z2)])])
o = Activation(self.recurrent_activation)(z3)
h = multiply([o, Activation(self.activation)(c)])
y = Activation(self.activation)(W2(h))
return Model([x, h_tm1, c_tm1], [y, h, c])