本文整理汇总了Python中tensorflow.keras.Input方法的典型用法代码示例。如果您正苦于以下问题:Python keras.Input方法的具体用法?Python keras.Input怎么用?Python keras.Input使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras
的用法示例。
在下文中一共展示了keras.Input方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_single_mode
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def _test_single_mode(layer, **kwargs):
sparse = kwargs.pop('sparse', False)
A_in = Input(shape=(None,), sparse=sparse)
X_in = Input(shape=(F,))
inputs = [X_in, A_in]
if sparse:
input_data = [X, sp_matrix_to_sp_tensor(A)]
else:
input_data = [X, A]
if kwargs.pop('edges', None):
E_in = Input(shape=(S, ))
inputs.append(E_in)
input_data.append(E_single)
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
assert output.shape == (N, kwargs['channels'])
示例2: _test_batch_mode
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def _test_batch_mode(layer, **kwargs):
A_batch = np.stack([A] * batch_size)
X_batch = np.stack([X] * batch_size)
A_in = Input(shape=(N, N))
X_in = Input(shape=(N, F))
inputs = [X_in, A_in]
input_data = [X_batch, A_batch]
if kwargs.pop('edges', None):
E_batch = np.stack([E] * batch_size)
E_in = Input(shape=(N, N, S))
inputs.append(E_in)
input_data.append(E_batch)
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
assert output.shape == (batch_size, N, kwargs['channels'])
示例3: ConvDiscriminator
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def ConvDiscriminator(input_shape=(64, 64, 3),
dim=64,
n_downsamplings=4,
norm='batch_norm',
name='ConvDiscriminator'):
Norm = _get_norm_layer(norm)
# 0
h = inputs = keras.Input(shape=input_shape)
# 1: downsamplings, ... -> 16x16 -> 8x8 -> 4x4
h = keras.layers.Conv2D(dim, 4, strides=2, padding='same')(h)
h = tf.nn.leaky_relu(h, alpha=0.2) # or keras.layers.LeakyReLU(alpha=0.2)(h)
for i in range(n_downsamplings - 1):
d = min(dim * 2 ** (i + 1), dim * 8)
h = keras.layers.Conv2D(d, 4, strides=2, padding='same', use_bias=False)(h)
h = Norm()(h)
h = tf.nn.leaky_relu(h, alpha=0.2) # or h = keras.layers.LeakyReLU(alpha=0.2)(h)
# 2: logit
h = keras.layers.Conv2D(1, 4, strides=1, padding='valid')(h)
return keras.Model(inputs=inputs, outputs=h, name=name)
示例4: _test_mixed_mode
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def _test_mixed_mode(layer, **kwargs):
sparse = kwargs.pop('sparse', False)
X_batch = np.stack([X] * batch_size)
A_in = Input(shape=(N,), sparse=sparse)
X_in = Input(shape=(N, F))
inputs = [X_in, A_in]
if sparse:
input_data = [X_batch, sp_matrix_to_sp_tensor(A)]
else:
input_data = [X_batch, A]
layer_instance = layer(**kwargs)
output = layer_instance(inputs)
model = Model(inputs, output)
output = model(input_data)
assert output.shape == (batch_size, N, kwargs['channels'])
示例5: test_shape_1
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def test_shape_1(self):
# model definition
i1 = Input(shape=(10,), name='i1')
i2 = Input(shape=(10,), name='i2')
a = Dense(1, name='fc1')(i1)
b = Dense(1, name='fc2')(i2)
c = concatenate([a, b], name='concat')
d = Dense(1, name='out')(c)
model = Model(inputs=[i1, i2], outputs=[d])
# inputs to the model
x = [np.random.uniform(size=(32, 10)),
np.random.uniform(size=(32, 10))]
# call to fetch the activations of the model.
activations = get_activations(model, x, auto_compile=True)
# OrderedDict so its ok to .values()
self.assertListEqual([a.shape for a in activations.values()],
[(32, 10), (32, 10), (32, 1), (32, 1), (32, 2), (32, 1)])
示例6: test_inputs_order
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def test_inputs_order(self):
i10 = Input(shape=(10,), name='i1')
i40 = Input(shape=(40,), name='i4')
i30 = Input(shape=(30,), name='i3')
i20 = Input(shape=(20,), name='i2')
a = Dense(1, name='fc1')(concatenate([i10, i40, i30, i20], name='concat'))
model = Model(inputs=[i40, i30, i20, i10], outputs=[a])
x = [
np.random.uniform(size=(1, 40)),
np.random.uniform(size=(1, 30)),
np.random.uniform(size=(1, 20)),
np.random.uniform(size=(1, 10))
]
acts = get_activations(model, x)
self.assertListEqual(list(acts['i1'].shape), [1, 10])
self.assertListEqual(list(acts['i2'].shape), [1, 20])
self.assertListEqual(list(acts['i3'].shape), [1, 30])
self.assertListEqual(list(acts['i4'].shape), [1, 40])
示例7: build_model
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def build_model(hp):
inputs = keras.Input(shape=(28, 28))
x = keras.layers.Reshape((28 * 28,))(inputs)
for i in range(hp.Int('num_layers', 1, 4)):
x = keras.layers.Dense(
units=hp.Int('units_' + str(i), 128, 512, 32, default=256),
activation='relu')(x)
x = keras.layers.Dropout(hp.Float('dp', 0., 0.6, 0.1, default=0.5))(x)
outputs = keras.layers.Dense(10, activation='softmax')(x)
model = keras.Model(inputs, outputs)
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice('learning_rate', [1e-2, 2e-3, 5e-4])),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
示例8: build_discriminator
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def build_discriminator(self):
"""Builds a discriminator network based on the SRGAN design."""
def d_block(layer_input, filters, strides=1, bn=True):
"""Discriminator layer block.
Args:
layer_input: Input feature map for the convolutional block.
filters: Number of filters in the convolution.
strides: The stride of the convolution.
bn: Whether to use batch norm or not.
"""
d = keras.layers.Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)
if bn:
d = keras.layers.BatchNormalization(momentum=0.8)(d)
d = keras.layers.LeakyReLU(alpha=0.2)(d)
return d
# Input img
d0 = keras.layers.Input(shape=self.hr_shape)
d1 = d_block(d0, self.df, bn=False)
d2 = d_block(d1, self.df, strides=2)
d3 = d_block(d2, self.df)
d4 = d_block(d3, self.df, strides=2)
d5 = d_block(d4, self.df * 2)
d6 = d_block(d5, self.df * 2, strides=2)
d7 = d_block(d6, self.df * 2)
d8 = d_block(d7, self.df * 2, strides=2)
validity = keras.layers.Conv2D(1, kernel_size=1, strides=1, activation='sigmoid', padding='same')(d8)
return keras.models.Model(d0, validity)
示例9: main
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def main():
args = parser.parse_args()
# Get all image paths
image_paths = [os.path.join(args.image_dir, x) for x in os.listdir(args.image_dir)]
# Change model input shape to accept all size inputs
model = keras.models.load_model('models/generator.h5')
inputs = keras.Input((None, None, 3))
output = model(inputs)
model = keras.models.Model(inputs, output)
# Loop over all images
for image_path in image_paths:
# Read image
low_res = cv2.imread(image_path, 1)
# Convert to RGB (opencv uses BGR as default)
low_res = cv2.cvtColor(low_res, cv2.COLOR_BGR2RGB)
# Rescale to 0-1.
low_res = low_res / 255.0
# Get super resolution image
sr = model.predict(np.expand_dims(low_res, axis=0))[0]
# Rescale values in range 0-255
sr = ((sr + 1) / 2.) * 255
# Convert back to BGR for opencv
sr = cv2.cvtColor(sr, cv2.COLOR_RGB2BGR)
# Save the results:
cv2.imwrite(os.path.join(args.output_dir, os.path.basename(image_path)), sr)
示例10: _test_single_mode
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def _test_single_mode(layer, **kwargs):
X = np.random.normal(size=(N, F))
if 'target_shape' in kwargs:
target_output_shape = kwargs.pop('target_shape')
else:
target_output_shape = (1, kwargs.get('channels', F))
X_in = Input(shape=(F,))
layer_instance = layer(**kwargs)
output = layer_instance(X_in)
model = Model(X_in, output)
output = model(X)
assert output.shape == target_output_shape
assert output.shape == layer_instance.compute_output_shape(X.shape)
_check_output_and_model_output_shapes(output.shape, model.output_shape)
示例11: _test_batch_mode
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def _test_batch_mode(layer, **kwargs):
X = np.random.normal(size=(batch_size, N, F))
if 'target_shape' in kwargs:
target_output_shape = kwargs.pop('target_shape')
else:
target_output_shape = (batch_size, kwargs.get('channels', F))
X_in = Input(shape=(N, F))
layer_instance = layer(**kwargs)
output = layer_instance(X_in)
model = Model(X_in, output)
output = model(X)
assert output.shape == target_output_shape
assert output.shape == layer_instance.compute_output_shape(X.shape)
_check_output_and_model_output_shapes(output.shape, model.output_shape)
示例12: _test_single_mode
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def _test_single_mode(layer, **kwargs):
A = np.ones((N, N))
X = np.random.normal(size=(N, F))
sparse = kwargs.pop('sparse', None) is not None
A_in = Input(shape=(None,), sparse=sparse)
X_in = Input(shape=(F,))
layer_instance = layer(**kwargs)
output = layer_instance([X_in, A_in])
model = Model([X_in, A_in], output)
output = model([X, A])
X_pool, A_pool, mask = output
if 'ratio' in kwargs.keys():
N_exp = kwargs['ratio'] * N
elif 'k' in kwargs.keys():
N_exp = kwargs['k']
else:
raise ValueError('Need k or ratio.')
N_pool_expected = int(np.ceil(N_exp))
N_pool_true = A_pool.shape[-1]
_check_number_of_nodes(N_pool_expected, N_pool_true)
assert X_pool.shape == (N_pool_expected, F)
assert A_pool.shape == (N_pool_expected, N_pool_expected)
output_shape = [o.shape for o in output]
_check_output_and_model_output_shapes(output_shape, model.output_shape)
示例13: _test_batch_mode
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def _test_batch_mode(layer, **kwargs):
A = np.ones((batch_size, N, N))
X = np.random.normal(size=(batch_size, N, F))
A_in = Input(shape=(N, N))
X_in = Input(shape=(N, F))
layer_instance = layer(**kwargs)
output = layer_instance([X_in, A_in])
model = Model([X_in, A_in], output)
output = model([X, A])
X_pool, A_pool, mask = output
if 'ratio' in kwargs.keys():
N_exp = kwargs['ratio'] * N
elif 'k' in kwargs.keys():
N_exp = kwargs['k']
else:
raise ValueError('Need k or ratio.')
N_pool_expected = int(np.ceil(N_exp))
N_pool_true = A_pool.shape[-1]
_check_number_of_nodes(N_pool_expected, N_pool_true)
assert X_pool.shape == (batch_size, N_pool_expected, F)
assert A_pool.shape == (batch_size, N_pool_expected, N_pool_expected)
output_shape = [o.shape for o in output]
_check_output_and_model_output_shapes(output_shape, model.output_shape)
示例14: _test_disjoint_mode
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def _test_disjoint_mode(layer, **kwargs):
A = sp.block_diag([np.ones((N1, N1)), np.ones(
(N2, N2)), np.ones((N3, N3))]).todense()
X = np.random.normal(size=(N, F))
I = np.array([0] * N1 + [1] * N2 + [2] * N3).astype(int)
sparse = kwargs.pop('sparse', None) is not None
A_in = Input(shape=(None,), sparse=sparse)
X_in = Input(shape=(F,))
I_in = Input(shape=(), dtype=tf.int32)
layer_instance = layer(**kwargs)
output = layer_instance([X_in, A_in, I_in])
model = Model([X_in, A_in, I_in], output)
output = model([X, A, I])
X_pool, A_pool, I_pool, mask = output
N_pool_expected = np.ceil(kwargs['ratio'] * N1) + \
np.ceil(kwargs['ratio'] * N2) + \
np.ceil(kwargs['ratio'] * N3)
N_pool_expected = int(N_pool_expected)
N_pool_true = A_pool.shape[0]
_check_number_of_nodes(N_pool_expected, N_pool_true)
assert X_pool.shape == (N_pool_expected, F)
assert A_pool.shape == (N_pool_expected, N_pool_expected)
assert I_pool.shape == (N_pool_expected,)
output_shape = [o.shape for o in output]
_check_output_and_model_output_shapes(output_shape, model.output_shape)
示例15: make_model
# 需要导入模块: from tensorflow import keras [as 别名]
# 或者: from tensorflow.keras import Input [as 别名]
def make_model(**kwargs) -> tf.keras.Model:
# Model is based on MicronNet: https://arxiv.org/abs/1804.00497v3
img_size = 48
NUM_CLASSES = 43
eps = 1e-6
inputs = Input(shape=(img_size, img_size, 3))
x = Conv2D(1, (1, 1), padding="same")(inputs)
x = BatchNormalization(epsilon=eps)(x)
x = Activation("relu")(x)
x = Conv2D(29, (5, 5), padding="same")(x)
x = BatchNormalization(epsilon=eps)(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
x = Conv2D(59, (3, 3), padding="same")(x)
x = BatchNormalization(epsilon=eps)(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
x = Conv2D(74, (3, 3), padding="same")(x)
x = BatchNormalization(epsilon=eps)(x)
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
x = Flatten()(x)
x = Dense(300)(x)
x = Activation("relu")(x)
x = BatchNormalization(epsilon=eps)(x)
x = Dense(300, activation="relu")(x)
predictions = Dense(NUM_CLASSES, activation="softmax")(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(
optimizer=tf.keras.optimizers.SGD(
lr=0.01, decay=1e-6, momentum=0.9, nesterov=True
),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"],
)
return model