本文整理汇总了Python中tensorflow.python.ops.nn_ops.dropout方法的典型用法代码示例。如果您正苦于以下问题:Python nn_ops.dropout方法的具体用法?Python nn_ops.dropout怎么用?Python nn_ops.dropout使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.nn_ops
的用法示例。
在下文中一共展示了nn_ops.dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def call(self, inputs, state):
"""LSTM cell with layer normalization and recurrent dropout."""
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input")
j = self._norm(j, "transform")
f = self._norm(f, "forget")
o = self._norm(o, "output")
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state")
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
示例2: pre
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def pre(self, inputs, scope=None):
"""Preprocess inputs to be used by the cell. Assumes [N, J, *]
[x, u]"""
is_train = self._is_train
keep_prob = self._keep_prob
gate_size = self._gate_size
with tf.variable_scope(scope or "pre"):
x, u, _, _ = tf.split(2, 4, tf.slice(inputs, [0, 0, gate_size], [-1, -1, -1])) # [N, J, d]
a_raw = linear([x * u], gate_size, True, scope='a_raw', var_on_cpu=self._var_on_cpu,
wd=self._wd, initializer=self._initializer)
a = tf.sigmoid(a_raw - self._forget_bias, name='a')
if keep_prob < 1.0:
x = tf.cond(is_train, lambda: tf.nn.dropout(x, keep_prob), lambda: x)
u = tf.cond(is_train, lambda: tf.nn.dropout(u, keep_prob), lambda: u)
v_t = tf.nn.tanh(linear([x, u], self._num_units, True,
var_on_cpu=self._var_on_cpu, wd=self._wd, scope='v_raw'), name='v')
new_inputs = tf.concat(2, [a, x, u, v_t]) # [N, J, 3*d + 1]
return new_inputs
示例3: __call__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input",
self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = self._cell(inputs, state, scope=scope)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
#shallow_filtered_substructure = nest.get_traverse_shallow_structure(
# self._dropout_state_filter, new_state)
shallow_filtered_substructure = self._dropout_state_filter(new_state) # TODO: GK hack
new_state = self._dropout(new_state, "state",
self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output",
self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
示例4: __call__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input",
self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = self._cell(inputs, state, scope)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state",
self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output",
self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:rnn_cell_impl.py
示例5: __call__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared zoneouts."""
# compute output and new state as before
output, new_state = self._cell(inputs, state, scope)
# if either hidden state or memory cell zoneout is applied, then split state and process
if self._has_hidden_state_zoneout or self._has_memory_cell_zoneout:
# split state
c_old, m_old = state
c_new, m_new = new_state
# apply zoneout to memory cell and hidden state
c_and_m = []
for s_old, s_new, p, has_zoneout in [(c_old, c_new, self._memory_cell_keep_prob, self._has_memory_cell_zoneout),
(m_old, m_new, self._hidden_state_keep_prob, self._has_hidden_state_zoneout)]:
if has_zoneout:
if self._is_training:
mask = nn_ops.dropout(array_ops.ones_like(s_new), p, seed=self._seed) * p # this should just random ops instead. See dropout code for how.
s = ((1. - mask) * s_old) + (mask * s_new)
else:
s = ((1. - p) * s_old) + (p * s_new)
else:
s = s_new
c_and_m.append(s)
# package final results
new_state = LSTMStateTuple(*c_and_m)
output = new_state.h
return output, new_state
示例6: _variational_recurrent_dropout_value
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def _variational_recurrent_dropout_value(
self, index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
示例7: _dropout
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob):
"""Decides whether to perform standard dropout or recurrent dropout."""
if not self._variational_recurrent:
def dropout(i, v):
return nn_ops.dropout(
v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i))
return _enumerated_map_structure(dropout, values)
else:
def dropout(i, v, n):
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
return _enumerated_map_structure(dropout, values, recurrent_noise)
示例8: __init__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __init__(self, num_units, forget_bias=1.0,
input_size=None, activation=math_ops.tanh,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
dropout_keep_prob=1.0, dropout_prob_seed=None,
reuse=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
recurrent dropout probability value. If float and 1.0, no dropout will
be applied.
dropout_prob_seed: (optional) integer, the randomness seed.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._g = norm_gain
self._b = norm_shift
self._reuse = reuse
示例9: __init__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __init__(self, num_units, forget_bias=1.0,
input_size=None, activation=math_ops.tanh,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
dropout_keep_prob=1.0, dropout_prob_seed=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
recurrent dropout probability value. If float and 1.0, no dropout will
be applied.
dropout_prob_seed: (optional) integer, the randomness seed.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._g = norm_gain
self._b = norm_shift
示例10: __call__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __call__(self, inputs, state, scope=None):
"""LSTM cell with layer normalization and recurrent dropout."""
with vs.variable_scope(scope or "layer_norm_basic_lstm_cell"):
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input")
j = self._norm(j, "transform")
f = self._norm(f, "forget")
o = self._norm(o, "output")
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state")
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = core_rnn_cell.LSTMStateTuple(new_c, new_h)
return new_h, new_state
示例11: __init__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
示例12: __call__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed)
output, new_state = self._cell(inputs, state, scope)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed)
return output, new_state
示例13: classifier
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def classifier(x, phase, enc_phase=1, trim=0, scope='class', reuse=None, internal_update=False, getter=None):
with tf.variable_scope(scope, reuse=reuse, custom_getter=getter):
with arg_scope([leaky_relu], a=0.1), \
arg_scope([conv2d, dense], activation=leaky_relu, bn=True, phase=phase), \
arg_scope([batch_norm], internal_update=internal_update):
preprocess = instance_norm if args.inorm else tf.identity
layout = [
(preprocess, (), {}),
(conv2d, (96, 3, 1), {}),
(conv2d, (96, 3, 1), {}),
(conv2d, (96, 3, 1), {}),
(max_pool, (2, 2), {}),
(dropout, (), dict(training=phase)),
(noise, (1,), dict(phase=phase)),
(conv2d, (192, 3, 1), {}),
(conv2d, (192, 3, 1), {}),
(conv2d, (192, 3, 1), {}),
(max_pool, (2, 2), {}),
(dropout, (), dict(training=phase)),
(noise, (1,), dict(phase=phase)),
(conv2d, (192, 3, 1), {}),
(conv2d, (192, 3, 1), {}),
(conv2d, (192, 3, 1), {}),
(avg_pool, (), dict(global_pool=True)),
(dense, (args.Y,), dict(activation=None))
]
if enc_phase:
start = 0
end = len(layout) - trim
else:
start = len(layout) - trim
end = len(layout)
for i in xrange(start, end):
with tf.variable_scope('l{:d}'.format(i)):
f, f_args, f_kwargs = layout[i]
x = f(x, *f_args, **f_kwargs)
return x
示例14: __init__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None, is_train=None):
"""Create a cell with added input and/or output dropout.
Dropout is never used on the state.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is float and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is float and 1, no output dropout will be added.
seed: (optional) integer, the randomness seed.
is_train: boolean tensor (often placeholder). If indicated, then when
is_train is False, dropout is not applied.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if keep_prob is not between 0 and 1.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not a RNNCell.")
if (isinstance(input_keep_prob, float) and
not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% input_keep_prob)
if (isinstance(output_keep_prob, float) and
not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)):
raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d"
% output_keep_prob)
self._cell = cell
self._input_keep_prob = input_keep_prob
self._output_keep_prob = output_keep_prob
self._seed = seed
self._is_train = is_train
示例15: __call__
# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import dropout [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
if (not isinstance(self._input_keep_prob, float) or
self._input_keep_prob < 1):
do_inputs = dropout(inputs, self._input_keep_prob, seed=self._seed)
inputs = tf.cond(self._is_train, lambda: do_inputs, lambda: inputs)
output, new_state = self._cell(inputs, state)
if (not isinstance(self._output_keep_prob, float) or
self._output_keep_prob < 1):
do_output = dropout(output, self._output_keep_prob, seed=self._seed)
output = tf.cond(self._is_train, lambda: do_output, lambda: output)
return output, new_state