本文整理汇总了Python中tensorflow.keras.layers.Input方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Input方法的具体用法?Python layers.Input怎么用?Python layers.Input使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.Input方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_encoder
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def _create_encoder(self, n_layers, dropout):
"""Create the encoder as a tf.keras.Model."""
input = self._create_features()
gather_indices = Input(shape=(2,), dtype=tf.int32)
prev_layer = input
for i in range(len(self._filter_sizes)):
filter_size = self._filter_sizes[i]
kernel_size = self._kernel_sizes[i]
if dropout > 0.0:
prev_layer = Dropout(rate=dropout)(prev_layer)
prev_layer = Conv1D(
filters=filter_size, kernel_size=kernel_size,
activation=tf.nn.relu)(prev_layer)
prev_layer = Flatten()(prev_layer)
prev_layer = Dense(
self._decoder_dimension, activation=tf.nn.relu)(prev_layer)
prev_layer = BatchNormalization()(prev_layer)
return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer)
示例2: compute_output_shape
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def compute_output_shape(self, input_shape):
if len(input_shape) != 3:
raise RuntimeError("Expects 3 inputs: L, mu, a")
for i, shape in enumerate(input_shape):
if len(shape) != 2:
raise RuntimeError("Input {} has {} dimensions but should have 2".format(i, len(shape)))
assert self.mode in ('full','diag')
if self.mode == 'full':
expected_elements = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
elif self.mode == 'diag':
expected_elements = self.nb_actions
else:
expected_elements = None
assert expected_elements is not None
if input_shape[0][1] != expected_elements:
raise RuntimeError("Input 0 (L) should have {} elements but has {}".format(input_shape[0][1]))
if input_shape[1][1] != self.nb_actions:
raise RuntimeError(
"Input 1 (mu) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
if input_shape[2][1] != self.nb_actions:
raise RuntimeError(
"Input 2 (action) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
return input_shape[0][0], 1
示例3: test_single_ddpg_input
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def test_single_ddpg_input():
nb_actions = 2
actor = Sequential()
actor.add(Flatten(input_shape=(2, 3)))
actor.add(Dense(nb_actions))
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(2, 3), name='observation_input')
x = Concatenate()([action_input, Flatten()(observation_input)])
x = Dense(1)(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
memory = SequentialMemory(limit=10, window_length=2)
agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory,
nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4)
agent.compile('sgd')
agent.fit(MultiInputTestEnv((3,)), nb_steps=10)
示例4: test_single_continuous_dqn_input
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def test_single_continuous_dqn_input():
nb_actions = 2
V_model = Sequential()
V_model.add(Flatten(input_shape=(2, 3)))
V_model.add(Dense(1))
mu_model = Sequential()
mu_model.add(Flatten(input_shape=(2, 3)))
mu_model.add(Dense(nb_actions))
L_input = Input(shape=(2, 3))
L_input_action = Input(shape=(nb_actions,))
x = Concatenate()([Flatten()(L_input), L_input_action])
x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
L_model = Model(inputs=[L_input_action, L_input], outputs=x)
memory = SequentialMemory(limit=10, window_length=2)
agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
memory=memory, nb_steps_warmup=5, batch_size=4)
agent.compile('sgd')
agent.fit(MultiInputTestEnv((3,)), nb_steps=10)
示例5: construct_q_network
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def construct_q_network(self):
# replacement of the Convolution layers by Dense layers, and change the size of the input space and output space
# Uses the network architecture found in DeepMind paper
self.model = Sequential()
input_layer = Input(shape=(self.observation_size * self.training_param.NUM_FRAMES,))
layer1 = Dense(self.observation_size * self.training_param.NUM_FRAMES)(input_layer)
layer1 = Activation('relu')(layer1)
layer2 = Dense(self.observation_size)(layer1)
layer2 = Activation('relu')(layer2)
layer3 = Dense(self.observation_size)(layer2)
layer3 = Activation('relu')(layer3)
layer4 = Dense(2 * self.action_size)(layer3)
layer4 = Activation('relu')(layer4)
output = Dense(self.action_size)(layer4)
self.model = Model(inputs=[input_layer], outputs=[output])
self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
self.target_model = Model(inputs=[input_layer], outputs=[output])
self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
self.target_model.set_weights(self.model.get_weights())
示例6: _build_q_NN
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def _build_q_NN(self):
input_states = Input(shape=(self.observation_size,))
input_action = Input(shape=(self.action_size,))
input_layer = Concatenate()([input_states, input_action])
lay1 = Dense(self.observation_size)(input_layer)
lay1 = Activation('relu')(lay1)
lay2 = Dense(self.observation_size)(lay1)
lay2 = Activation('relu')(lay2)
lay3 = Dense(2*self.action_size)(lay2)
lay3 = Activation('relu')(lay3)
advantage = Dense(1, activation = 'linear')(lay3)
model = Model(inputs=[input_states, input_action], outputs=[advantage])
model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
return model
示例7: qdense_util
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def qdense_util(layer_cls,
kwargs=None,
input_data=None,
weight_data=None,
expected_output=None):
"""qlayer test utility."""
input_shape = input_data.shape
input_dtype = input_data.dtype
layer = layer_cls(**kwargs)
x = Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
layer.set_weights(weight_data)
model = Model(x, y)
actual_output = model.predict(input_data)
if expected_output is not None:
assert_allclose(actual_output, expected_output, rtol=1e-4)
示例8: build_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def build_model(dist: Union[Distribution, PixelCNN], input_shape: tuple = None, filepath: str = None) \
-> Tuple[tf.keras.Model, Union[Distribution, PixelCNN]]:
"""
Create tf.keras.Model from TF distribution.
Parameters
----------
dist
TensorFlow distribution.
input_shape
Input shape of the model.
Returns
-------
TensorFlow model.
"""
x_in = Input(shape=input_shape)
log_prob = dist.log_prob(x_in)
model = Model(inputs=x_in, outputs=log_prob)
model.add_loss(-tf.reduce_mean(log_prob))
if isinstance(filepath, str):
model.load_weights(filepath)
return model, dist
示例9: build_model
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def build_model(self, n_inputs, n_outputs):
"""Q Network is 256-256-256 MLP
Arguments:
n_inputs (int): input dim
n_outputs (int): output dim
Return:
q_model (Model): DQN
"""
inputs = Input(shape=(n_inputs, ), name='state')
x = Dense(256, activation='relu')(inputs)
x = Dense(256, activation='relu')(x)
x = Dense(256, activation='relu')(x)
x = Dense(n_outputs,
activation='linear',
name='action')(x)
q_model = Model(inputs, x)
q_model.summary()
return q_model
示例10: _build_graph
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def _build_graph(self):
inputs = Input(dtype=tf.float32, shape=(self.feature_dim,), name="Input")
out1 = Dense(units=self.hidden_layer_size, activation='relu')(inputs)
final = Dense(units=self.n_tasks, activation='sigmoid')(out1)
outputs = [final]
output_types = ['prediction']
loss = dc.models.losses.BinaryCrossEntropy()
model = tf.keras.Model(inputs=[inputs], outputs=outputs)
return model, loss, output_types
示例11: test_compute_model_performance_multitask_classifier
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def test_compute_model_performance_multitask_classifier(self):
n_data_points = 20
n_features = 1
n_tasks = 2
n_classes = 2
X = np.ones(shape=(n_data_points // 2, n_features)) * -1
X1 = np.ones(shape=(n_data_points // 2, n_features))
X = np.concatenate((X, X1))
class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
y1 = np.concatenate((class_0, class_1))
y2 = np.concatenate((class_1, class_0))
y = np.stack([y1, y2], axis=1)
dataset = NumpyDataset(X, y)
features = layers.Input(shape=(n_data_points // 2, n_features))
dense = layers.Dense(n_tasks * n_classes)(features)
logits = layers.Reshape((n_tasks, n_classes))(dense)
output = layers.Softmax()(logits)
keras_model = tf.keras.Model(inputs=features, outputs=[output, logits])
model = dc.models.KerasModel(
keras_model,
dc.models.losses.SoftmaxCrossEntropy(),
output_types=['prediction', 'loss'],
learning_rate=0.01,
batch_size=n_data_points)
model.fit(dataset, nb_epoch=1000)
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
scores = model.evaluate_generator(
model.default_generator(dataset), [metric], per_task_metrics=True)
scores = list(scores[1].values())
# Loosening atol to see if tests stop failing sporadically
assert np.all(np.isclose(scores, [1.0, 1.0], atol=0.50))
示例12: test_compute_model_performance_singletask_classifier
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def test_compute_model_performance_singletask_classifier(self):
n_data_points = 20
n_features = 10
X = np.ones(shape=(int(n_data_points / 2), n_features)) * -1
X1 = np.ones(shape=(int(n_data_points / 2), n_features))
X = np.concatenate((X, X1))
class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
y = np.concatenate((class_0, class_1))
dataset = NumpyDataset(X, y)
features = layers.Input(shape=(n_features,))
dense = layers.Dense(2)(features)
output = layers.Softmax()(dense)
keras_model = tf.keras.Model(inputs=features, outputs=[output])
model = dc.models.KerasModel(
keras_model, dc.models.losses.SoftmaxCrossEntropy(), learning_rate=0.1)
model.fit(dataset, nb_epoch=1000)
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
scores = model.evaluate_generator(
model.default_generator(dataset), [metric], per_task_metrics=True)
scores = list(scores[1].values())
assert np.isclose(scores, [1.0], atol=0.05)
示例13: test_compute_model_performance_multitask_regressor
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def test_compute_model_performance_multitask_regressor(self):
random_seed = 42
n_data_points = 20
n_features = 2
n_tasks = 2
np.random.seed(seed=random_seed)
X = np.random.rand(n_data_points, n_features)
y1 = np.array([0.5 for x in range(n_data_points)])
y2 = np.array([-0.5 for x in range(n_data_points)])
y = np.stack([y1, y2], axis=1)
dataset = NumpyDataset(X, y)
features = layers.Input(shape=(n_features,))
dense = layers.Dense(n_tasks)(features)
keras_model = tf.keras.Model(inputs=features, outputs=[dense])
model = dc.models.KerasModel(
keras_model, dc.models.losses.L2Loss(), learning_rate=0.1)
model.fit(dataset, nb_epoch=1000)
metric = [
dc.metrics.Metric(
dc.metrics.mean_absolute_error, np.mean, mode="regression"),
]
scores = model.evaluate_generator(
model.default_generator(dataset), metric, per_task_metrics=True)
scores = list(scores[1].values())
assert np.all(np.isclose(scores, [0.0, 0.0], atol=1.0))
示例14: get_noise_input_shape
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def get_noise_input_shape(self):
"""Get the shape of the generator's noise input layer.
Subclasses must override this to return a tuple giving the shape of the
noise input. The actual Input layer will be created automatically. The
dimension corresponding to the batch size should be omitted.
"""
raise NotImplementedError("Subclasses must implement this.")
示例15: get_conditional_input_shapes
# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Input [as 别名]
def get_conditional_input_shapes(self):
"""Get the shapes of any conditional inputs.
Subclasses may override this to return a list of tuples, each giving the
shape of one of the conditional inputs. The actual Input layers will be
created automatically. The dimension corresponding to the batch size should
be omitted.
The default implementation returns an empty list, meaning there are no
conditional inputs.
"""
return []