本文整理汇总了Python中tflearn.activations.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: activation
def activation(incoming, activation='linear', name='activation'):
""" Activation.
Apply given activation to incoming tensor.
Arguments:
incoming: A `Tensor`. The incoming tensor.
activation: `str` (name) or `function` (returning a `Tensor`).
Activation applied to this layer (see tflearn.activations).
Default: 'linear'.
"""
if isinstance(activation, str):
x = activations.get(activation)(incoming)
elif hasattr(incoming, '__call__'):
x = activation(incoming)
else:
raise ValueError('Unknown activation type.')
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, x)
return x
示例2: block8
def block8(net, scale=1.0, activation="relu"):
tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
net += scale * tower_out
if activation:
if isinstance(activation, str):
net = activations.get(activation)(net)
elif hasattr(activation, '__call__'):
net = activation(net)
else:
raise ValueError("Invalid Activation.")
return net
示例3: activation
def activation(incoming, activation='linear'):
""" Activation.
Apply given activation to incoming tensor.
Arguments:
incoming: A `Tensor`. The incoming tensor.
activation: `str` (name) or `Tensor`. Activation applied to this layer.
(see tflearn.activations). Default: 'linear'.
"""
if isinstance(activation, str):
return activations.get(activation)(incoming)
elif hasattr(incoming, '__call__'):
return activation(incoming)
else:
raise ValueError('Unknown activation type.')
示例4: fully_connected
def fully_connected(incoming, n_units, activation='linear', bias=True,
weights_init='truncated_normal', bias_init='zeros',
regularizer=None, weight_decay=0.001, trainable=True,
restore=True, reuse=False, scope=None,
name="FullyConnected"):
""" Fully Connected.
A fully connected layer.
Input:
(2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.
Output:
2D Tensor [samples, n_units].
Arguments:
incoming: `Tensor`. Incoming (2+)D Tensor.
n_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`).
Activation applied to this layer (see tflearn.activations).
Default: 'linear'.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(see tflearn.initializations) Default: 'truncated_normal'.
bias_init: `str` (name) or `Tensor`. Bias initialization.
(see tflearn.initializations) Default: 'zeros'.
regularizer: `str` (name) or `Tensor`. Add a regularizer to this
layer weights (see tflearn.regularizers). Default: None.
weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: A name for this layer (optional). Default: 'FullyConnected'.
Attributes:
scope: `Scope`. This layer scope.
W: `Tensor`. Variable representing units weights.
b: `Tensor`. Variable representing biases.
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
n_inputs = int(np.prod(input_shape[1:]))
# Build variables and inference.
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer:
W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
W = va.variable('W', shape=[n_inputs, n_units], regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
if isinstance(bias, str):
bias_init = initializations.get(bias_init)()
b = va.variable('b', shape=[n_units], initializer=bias_init,
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
inference = incoming
# If input is not 2d, flatten it.
if len(input_shape) > 2:
inference = tf.reshape(inference, [-1, n_inputs])
inference = tf.matmul(inference, W)
if b: inference = tf.nn.bias_add(inference, b)
if isinstance(activation, str):
inference = activations.get(activation)(inference)
elif hasattr(activation, '__call__'):
inference = activation(inference)
else:
raise ValueError("Invalid Activation.")
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)
# Add attributes to Tensor to easy access weights.
inference.scope = scope
inference.W = W
inference.b = b
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)
return inference
示例5: highway
def highway(incoming, n_units, activation='linear', transform_dropout=None,
weights_init='truncated_normal', bias_init='zeros',
regularizer=None, weight_decay=0.001, trainable=True,
restore=True, reuse=False, scope=None,
name="FullyConnectedHighway"):
""" Fully Connected Highway.
A fully connected highway network layer, with some inspiration from
[https://github.com/fomorians/highway-fcn](https://github.com/fomorians/highway-fcn).
Input:
(2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.
Output:
2D Tensor [samples, n_units].
Arguments:
incoming: `Tensor`. Incoming (2+)D Tensor.
n_units: `int`, number of units for this layer.
activation: `str` (name) or `function` (returning a `Tensor`).
Activation applied to this layer (see tflearn.activations).
Default: 'linear'.
transform_dropout: `float`: Keep probability on the highway transform gate.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(see tflearn.initializations) Default: 'truncated_normal'.
bias_init: `str` (name) or `Tensor`. Bias initialization.
(see tflearn.initializations) Default: 'zeros'.
regularizer: `str` (name) or `Tensor`. Add a regularizer to this
layer weights (see tflearn.regularizers). Default: None.
weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: A name for this layer (optional). Default: 'FullyConnectedHighway'.
Attributes:
scope: `Scope`. This layer scope.
W: `Tensor`. Variable representing units weights.
W_t: `Tensor`. Variable representing units weights for transform gate.
b: `Tensor`. Variable representing biases.
b_t: `Tensor`. Variable representing biases for transform gate.
Links:
[https://arxiv.org/abs/1505.00387](https://arxiv.org/abs/1505.00387)
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
n_inputs = int(np.prod(input_shape[1:]))
# Build variables and inference.
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer:
W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
W = va.variable('W', shape=[n_inputs, n_units], regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
b = va.variable('b', shape=[n_units], initializer=bias_init,
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
# Weight and bias for the transform gate
W_T = va.variable('W_T', shape=[n_inputs, n_units],
regularizer=None, initializer=W_init,
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W_T)
b_T = va.variable('b_T', shape=[n_units],
initializer=tf.constant_initializer(-1),
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b_T)
# If input is not 2d, flatten it.
if len(input_shape) > 2:
incoming = tf.reshape(incoming, [-1, n_inputs])
if isinstance(activation, str):
activation = activations.get(activation)
elif hasattr(activation, '__call__'):
activation = activation
else:
raise ValueError("Invalid Activation.")
H = activation(tf.matmul(incoming, W) + b)
T = tf.sigmoid(tf.matmul(incoming, W_T) + b_T)
#.........这里部分代码省略.........
示例6: single_unit
def single_unit(incoming, activation='linear', bias=True, trainable=True,
restore=True, reuse=False, scope=None, name="Linear"):
""" Single Unit.
A single unit (Linear) Layer.
Input:
1-D Tensor [samples]. If not 2D, input will be flatten.
Output:
1-D Tensor [samples].
Arguments:
incoming: `Tensor`. Incoming Tensor.
activation: `str` (name) or `function`. Activation applied to this
layer (see tflearn.activations). Default: 'linear'.
bias: `bool`. If True, a bias is used.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: A name for this layer (optional). Default: 'Linear'.
Attributes:
W: `Tensor`. Variable representing weight.
b: `Tensor`. Variable representing bias.
"""
input_shape = utils.get_incoming_shape(incoming)
n_inputs = int(np.prod(input_shape[1:]))
# Build variables and inference.
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
W = va.variable('W', shape=[n_inputs],
initializer=tf.constant_initializer(np.random.randn()),
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
b = va.variable('b', shape=[n_inputs],
initializer=tf.constant_initializer(np.random.randn()),
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
inference = incoming
# If input is not 2d, flatten it.
if len(input_shape) > 1:
inference = tf.reshape(inference, [-1])
inference = tf.mul(inference, W)
if b: inference = tf.add(inference, b)
if isinstance(activation, str):
inference = activations.get(activation)(inference)
elif hasattr(activation, '__call__'):
inference = activation(inference)
else:
raise ValueError("Invalid Activation.")
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)
# Add attributes to Tensor to easy access weights.
inference.scope = scope
inference.W = W
inference.b = b
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)
return inference
示例7: fully_connected
def fully_connected(incoming, n_units, activation='linear', bias=True,
weights_init='truncated_normal', bias_init='zeros',
regularizer=None, weight_decay=0.001, trainable=True,
restore=True, name="FullyConnected"):
""" Fully Connected.
A fully connected layer.
Input:
(2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.
Output:
2D Tensor [samples, n_units].
Arguments:
incoming: `Tensor`. Incoming (2+)D Tensor.
n_units: `int`, number of units for this layer.
activation: `str` (name) or `Tensor`. Activation applied to this layer.
(see tflearn.activations). Default: 'linear'.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(see tflearn.initializations) Default: 'truncated_normal'.
bias_init: `str` (name) or `Tensor`. Bias initialization.
(see tflearn.initializations) Default: 'zeros'.
regularizer: `str` (name) or `Tensor`. Add a regularizer to this
layer weights (see tflearn.regularizers). Default: None.
weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model
name: A name for this layer (optional). Default: 'FullyConnected'.
Attributes:
scope: `Scope`. This layer scope.
W: `Tensor`. Variable representing units weights.
b: `Tensor`. Variable representing biases.
"""
input_shape = utils.get_incoming_shape(incoming)
n_inputs = int(np.prod(input_shape[1:]))
# Build variables and inference.
with tf.name_scope(name) as scope:
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer:
W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
W = va.variable(scope + 'W', shape=[n_inputs, n_units],
regularizer=W_regul, initializer=W_init,
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)
b = None
if bias:
b_init = initializations.get(bias_init)()
b = va.variable(scope + 'b', shape=[n_units],
initializer=b_init, trainable=trainable,
restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)
inference = incoming
# If input is not 2d, flatten it.
if len(input_shape) > 2:
inference = tf.reshape(inference, [-1, n_inputs])
inference = tf.matmul(inference, W)
if b: inference = tf.nn.bias_add(inference, b)
inference = activations.get(activation)(inference)
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)
# Add attributes to Tensor to easy access weights.
inference.scope = scope
inference.W = W
inference.b = b
return inference
示例8: single_unit
def single_unit(incoming, activation='linear', bias=True, trainable=True,
restore=True, name="Linear"):
""" Single Unit.
A single unit (Linear) Layer.
Input:
1-D Tensor [samples]. If not 2D, input will be flatten.
Output:
1-D Tensor [samples].
Arguments:
incoming: `Tensor`. Incoming Tensor.
activation: `str` (name) or `Tensor`. Activation applied to this layer.
(see tflearn.activations). Default: 'linear'.
bias: `bool`. If True, a bias is used.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
name: A name for this layer (optional). Default: 'Dense'.
Attributes:
W: `Tensor`. Variable representing weight.
b: `Tensor`. Variable representing bias.
"""
input_shape = utils.get_incoming_shape(incoming)
n_inputs = int(np.prod(input_shape[1:]))
# Build variables and inference.
with tf.name_scope(name) as scope:
W = va.variable(scope + 'W', shape=[n_inputs],
initializer=tf.constant_initializer(np.random.randn()),
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)
b = None
if bias:
b = va.variable(scope + 'b', shape=[n_inputs],
initializer=tf.constant_initializer(np.random.randn()),
trainable=trainable, restore=restore)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)
inference = incoming
# If input is not 2d, flatten it.
if len(input_shape) > 1:
inference = tf.reshape(inference, [-1])
inference = tf.mul(inference, W)
if b: inference = tf.add(inference, b)
inference = activations.get(activation)(inference)
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)
# Add attributes to Tensor to easy access weights.
inference.scope = scope
inference.W = W
inference.b = b
return inference
示例9: conv_2d_BN
def conv_2d_BN(incoming, nb_filter, filter_size, strides=1, padding='same',
activation='linear', bias=True, weights_init='xavier',
bias_init='zeros', regularizer=None, weight_decay=0.001,
trainable=True, restore=True, reuse=False, scope=None,
name="Conv2D", batch_norm=False):
""" Convolution 2D.
Input:
4-D Tensor [batch, height, width, in_channels].
Output:
4-D Tensor [batch, new height, new width, nb_filter].
Arguments:
incoming: `Tensor`. Incoming 4-D Tensor.
nb_filter: `int`. The number of convolutional filters.
filter_size: `int` or `list of int`. Size of filters.
strides: 'int` or list of `int`. Strides of conv operation.
Default: [1 1 1 1].
padding: `str` from `"same", "valid"`. Padding algo to use.
Default: 'same'.
activation: `str` (name) or `function` (returning a `Tensor`).
Activation applied to this layer (see tflearn.activations).
Default: 'linear'.
bias: `bool`. If True, a bias is used.
weights_init: `str` (name) or `Tensor`. Weights initialization.
(see tflearn.initializations) Default: 'truncated_normal'.
bias_init: `str` (name) or `Tensor`. Bias initialization.
(see tflearn.initializations) Default: 'zeros'.
regularizer: `str` (name) or `Tensor`. Add a regularizer to this
layer weights (see tflearn.regularizers). Default: None.
weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: A name for this layer (optional). Default: 'Conv2D'.
batch_norm: If true, add batch normalization with default TFLearn
parameters before the activation layer
Attributes:
scope: `Scope`. This layer scope.
W: `Variable`. Variable representing filter weights.
b: `Variable`. Variable representing biases.
"""
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
filter_size = utils.autoformat_filter_conv2d(filter_size,
input_shape[-1],
nb_filter)
strides = utils.autoformat_kernel_2d(strides)
padding = utils.autoformat_padding(padding)
# Variable Scope fix for older TF
try:
vscope = tf.variable_scope(scope, default_name=name, values=[incoming],
reuse=reuse)
except Exception:
vscope = tf.variable_op_scope([incoming], scope, name, reuse=reuse)
with vscope as scope:
name = scope.name
W_init = weights_init
if isinstance(weights_init, str):
W_init = initializations.get(weights_init)()
W_regul = None
if regularizer:
W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
W = vs.variable('W', shape=filter_size, regularizer=W_regul,
initializer=W_init, trainable=trainable,
restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)
b = None
if bias:
if isinstance(bias_init, str):
bias_init = initializations.get(bias_init)()
b = vs.variable('b', shape=nb_filter, initializer=bias_init,
trainable=trainable, restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)
inference = tf.nn.conv2d(incoming, W, strides, padding)
if b: inference = tf.nn.bias_add(inference, b)
if batch_norm:
inference = batch_normalization(inference)
if isinstance(activation, str):
if activation == 'softmax':
shapes = inference.get_shape()
inference = activations.get(activation)(inference)
elif hasattr(activation, '__call__'):
inference = activation(inference)
else:
raise ValueError("Invalid Activation.")
#.........这里部分代码省略.........