本文整理汇总了Python中tensorflow.python.layers.core.Dense方法的典型用法代码示例。如果您正苦于以下问题:Python core.Dense方法的具体用法?Python core.Dense怎么用?Python core.Dense使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.layers.core
的用法示例。
在下文中一共展示了core.Dense方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_config
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
示例2: __init__
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def __init__(self,
num_mixtures,
memory,
memory_sequence_length=None,
check_inner_dims_defined=True,
score_mask_value=None,
name='GmmAttention'):
self.dtype = memory.dtype
self.num_mixtures = num_mixtures
self.query_layer = tf.layers.Dense(3 * num_mixtures, name='gmm_query_layer', use_bias=True, dtype=self.dtype)
with tf.name_scope(name, 'GmmAttentionMechanismInit'):
if score_mask_value is None:
score_mask_value = 0.
self._maybe_mask_score = functools.partial(
_maybe_mask_score,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value)
self._value = _prepare_memory(
memory, memory_sequence_length, check_inner_dims_defined)
self._batch_size = (
self._value.shape[0].value or tf.shape(self._value)[0])
self._alignments_size = (
self._value.shape[1].value or tf.shape(self._value)[1])
示例3: monkeypatch_tf_layers
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def monkeypatch_tf_layers():
if get_tf_version_tuple() < (1, 4):
if not hasattr(tf.layers, 'Dense'):
from tensorflow.python.layers.core import Dense
tf.layers.Dense = Dense
from tensorflow.python.layers.normalization import BatchNormalization
tf.layers.BatchNormalization = BatchNormalization
from tensorflow.python.layers.convolutional import Conv2DTranspose, Conv2D
tf.layers.Conv2DTranspose = Conv2DTranspose
tf.layers.Conv2D = Conv2D
from tensorflow.python.layers.pooling import MaxPooling2D, AveragePooling2D
tf.layers.MaxPooling2D = MaxPooling2D
tf.layers.AveragePooling2D = AveragePooling2D
示例4: _project_lstm_state_tuple
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def _project_lstm_state_tuple(state_tuple, num_units):
r"""
Concatenates all the `c` and `h` members from a list of `LSTMStateTuple`
and projects them to a space of dimension `num_units`
Args:
state_tuple: a list of `LSTMStateTuple` objects
num_units: output dimension
Returns:
projected_state: a single `LSTMStateTuple` with `c` and `h` of dimension `num_units`
"""
state_proj_layer = Dense(num_units, name='state_projection', use_bias=False)
cat_c = tf.concat([state.c for state in state_tuple], axis=-1)
cat_h = tf.concat([state.h for state in state_tuple], axis=-1)
proj_c = state_proj_layer(cat_c)
proj_h = state_proj_layer(cat_h)
projected_state = tf.contrib.rnn.LSTMStateTuple(c=proj_c, h=proj_h)
return projected_state
示例5: _maybe_add_dense_layers
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def _maybe_add_dense_layers(self):
r"""
Optionally passes self._input through several Fully Connected (Dense) layers
with the configuration defined by the self._input_dense_layers tuple
Returns
-------
The output of the network of Dense layers
"""
layer_inputs = self._inputs
if self._hparams.input_dense_layers[0] > 0:
fc = [Dense(units,
activation=tf.nn.selu,
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0001))
for units in self._hparams.input_dense_layers]
for layer in fc:
layer_inputs = layer(layer_inputs)
else:
pass
return layer_inputs
示例6: _init_decoder
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def _init_decoder(self):
with tf.variable_scope("Decoder"):
self._decoder_cells = build_rnn_layers(
cell_type=self._hparams.cell_type,
num_units_per_layer=self._hparams.decoder_units_per_layer,
use_dropout=self._hparams.use_dropout,
dropout_probability=self._hparams.decoder_dropout_probability,
mode=self._mode,
dtype=self._hparams.dtype,
)
self._decoder_initial_state = self._decoder_cells.zero_state(
batch_size=self._batch_size, dtype=self._hparams.dtype)
self._dense_layer = Dense(
self._vocab_size,
name='my_dense',
dtype=self._hparams.dtype)
self._build_decoder_train() # used for both training and evaluation
示例7: build
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self._build_kernel(input_shape)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
示例8: build_rnn
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def build_rnn(self):
self.initial_state = tf.cond(
self.beam_search_decoding, lambda: seq2seq.tile_batch(
self.features["state"], self.hparams.beam_width),
lambda: self.features["state"], name="initial_state")
self.build_embeddings()
cell_list = self.build_deep_cell(return_raw_list=True)
if self.hparams.use_attention:
cell_list[-1] = self.build_attention(cell_list[-1])
if self.hparams.depth > 1:
self.initial_state[-1] = final_cell.zero_state(batch_size=self.batch_size)
else:
self.initial_state = final_cell.zero_state(batch_size=self.batch_size)
with tf.name_scope('rnn_cell'):
self.cell = self.build_deep_cell(cell_list)
self.output_layer = Dense(self.vocab.size(), name='output_layer')
示例9: decoder
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def decoder(x, decoder_inputs, keep_prob, sequence_length, memory, memory_length, first_attention):
with tf.variable_scope("Decoder") as scope:
label_embeddings = tf.get_variable(name="embeddings", shape=[n_classes, embedding_size], dtype=tf.float32)
train_inputs_embedded = tf.nn.embedding_lookup(label_embeddings, decoder_inputs)
lstm = rnn.LayerNormBasicLSTMCell(n_hidden, dropout_keep_prob=keep_prob)
output_l = layers_core.Dense(n_classes, use_bias=True)
encoder_state = rnn.LSTMStateTuple(x, x)
attention_mechanism = BahdanauAttention(embedding_size, memory=memory, memory_sequence_length=memory_length)
cell = AttentionWrapper(lstm, attention_mechanism, output_attention=False)
cell_state = cell.zero_state(dtype=tf.float32, batch_size=train_batch_size)
cell_state = cell_state.clone(cell_state=encoder_state, attention=first_attention)
train_helper = TrainingHelper(train_inputs_embedded, sequence_length)
train_decoder = BasicDecoder(cell, train_helper, cell_state, output_layer=output_l)
decoder_outputs_train, decoder_state_train, decoder_seq_train = dynamic_decode(train_decoder, impute_finished=True)
tiled_inputs = tile_batch(memory, multiplier=beam_width)
tiled_sequence_length = tile_batch(memory_length, multiplier=beam_width)
tiled_first_attention = tile_batch(first_attention, multiplier=beam_width)
attention_mechanism = BahdanauAttention(embedding_size, memory=tiled_inputs, memory_sequence_length=tiled_sequence_length)
x2 = tile_batch(x, beam_width)
encoder_state2 = rnn.LSTMStateTuple(x2, x2)
cell = AttentionWrapper(lstm, attention_mechanism, output_attention=False)
cell_state = cell.zero_state(dtype=tf.float32, batch_size=test_batch_size * beam_width)
cell_state = cell_state.clone(cell_state=encoder_state2, attention=tiled_first_attention)
infer_decoder = BeamSearchDecoder(cell, embedding=label_embeddings, start_tokens=[GO] * test_len, end_token=EOS,
initial_state=cell_state, beam_width=beam_width, output_layer=output_l)
decoder_outputs_infer, decoder_state_infer, decoder_seq_infer = dynamic_decode(infer_decoder, maximum_iterations=4)
return decoder_outputs_train, decoder_outputs_infer, decoder_state_infer
示例10: __create_decoder_cell
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def __create_decoder_cell(self):
self.decoder_cell = tf.nn.rnn_cell.GRUCell(self.config.DECODER_RNN_HIDDEN_SIZE)
# fully connected layer to change size of Encoder Last state to Decoder Hidden size
decoder_hidden_state_reshape = Dense(self.config.DECODER_RNN_HIDDEN_SIZE)
self.decoder_initial_state = (decoder_hidden_state_reshape(self.encoder_last_state), )
示例11: __create_decoder_cell
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def __create_decoder_cell(self):
gru = tf.nn.rnn_cell.GRUCell(self.config.DECODER_RNN_HIDDEN_SIZE)
self.decoder_cell_list = [gru] * self.config.NUM_LAYERS
self.decoder_cell = tf.nn.rnn_cell.MultiRNNCell(self.decoder_cell_list)
decoder_hidden_state_reshape = Dense(self.config.DECODER_RNN_HIDDEN_SIZE) # reshape last state of encoder to decoder hidden size
self.decoder_initial_state = (decoder_hidden_state_reshape(self.encoder_triples_last_state), )
示例12: __create_decoder_attention_cell
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def __create_decoder_attention_cell(self):
"""
create decoder RNN with attention
:return:
"""
self.attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units=self.config.TRIPLES_EMBEDDING_SIZE, # the depth of the Attention layer
memory=self.encoder_triples_inputs_embedded,
name="Attention"
)
# create decoder cell:
gru = self.__build_single_rnn_cell(self.config.DECODER_RNN_HIDDEN_SIZE)
self.decoder_cell_list = [gru] * self.config.NUM_LAYERS
decoder_hidden_state_reshape = Dense(self.config.DECODER_RNN_HIDDEN_SIZE) # reshape last state of encoder to decoder hidden size
self.decoder_cell_list[-1] = tf.contrib.seq2seq.AttentionWrapper(
cell=self.decoder_cell_list[-1],
attention_layer_size=self.config.DECODER_RNN_HIDDEN_SIZE, # the output hidden size of the last decoder
attention_mechanism=self.attention_mechanism,
initial_cell_state= decoder_hidden_state_reshape(self.encoder_triples_last_state),
alignment_history=False,
name="Attention_Wrapper"
)
self.decoder_cell = tf.nn.rnn_cell.MultiRNNCell(self.decoder_cell_list)
# To be compatible with AttentionWrapper, the encoder last state
# of the top layer should be converted into the AttentionWrapperState form
# We can easily do this by calling AttentionWrapper.zero_state
# self.decoder_initial_state = self.encoder_last_state
init_state = self.decoder_cell_list[-1].zero_state(
batch_size=self.batch_size,
dtype=tf.float32
)
# a tuple because decode initial state has to take a tuple
self.decoder_initial_state = (init_state,)
示例13: __init__
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
# Inheritance call order:
# 1) tf.layers.Dense, 2) keras.layers.Layer, 3) tf.layers.Layer
super(Dense, self).__init__(
units,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
示例14: build
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def build(self, input_shape):
super(Dense, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.kernel_constraint:
self.constraints[self.kernel] = self.kernel_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
示例15: self_attention_2
# 需要导入模块: from tensorflow.python.layers import core [as 别名]
# 或者: from tensorflow.python.layers.core import Dense [as 别名]
def self_attention_2(self, inputs, name):
"""
:param inputs_a: audio input (B, T, dim)
:param inputs_v: video input (B, T, dim)
:param inputs_t: text input (B, T, dim)
:param name: scope name
:return:
"""
t = inputs.get_shape()[1].value
share_param = True
hidden_size = inputs.shape[-1].value # D value - hidden size of the RNN layer
if share_param:
scope_name = 'self_attn_2'
else:
scope_name = 'self_attn_2' + name
# print(scope_name)
# inputs = tf.transpose(inputs, [2, 0, 1, 3])
# dense = Dense(hidden_size)
# init1 = tf.random_normal_initializer(seed=self.seed, dtype=tf.float32,stddev=0.01)
attention_size = hidden_size
w_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.01, seed=self.seed))
b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.01, seed=self.seed))
# dense_attention_2 = Dense(attention_size, activation=None,kernel_initializer=init1,kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
params = {'w_omega': w_omega,
'b_omega': b_omega,
# 'dense': dense_attention_2
}
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):
outputs = []
for x in range(t):
t_x = inputs[:, x, :]
output = self.attention(inputs, t_x, hidden_size, params, self.mask) # (b, d)
outputs.append(output)
final_output = tf.concat(outputs, axis=1)
return final_output