本文整理汇总了Python中tensorflow.python.layers.base.InputSpec方法的典型用法代码示例。如果您正苦于以下问题:Python base.InputSpec方法的具体用法?Python base.InputSpec怎么用?Python base.InputSpec使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.layers.base
的用法示例。
在下文中一共展示了base.InputSpec方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activity_regularizer = activity_regularizer
self.input_spec = base.InputSpec(min_ndim=2)
示例2: build
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
示例3: __init__
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def __init__(self,
num_units,
recurrent_min_abs=0,
recurrent_max_abs=None,
recurrent_kernel_initializer=None,
input_kernel_initializer=None,
activation=None,
reuse=None,
name=None):
super(IndRNNCell, self).__init__(_reuse=reuse, name=name)
# Inputs must be 2-dimensional.
self.input_spec = base_layer.InputSpec(ndim=2)
self._num_units = num_units
self._recurrent_min_abs = recurrent_min_abs
self._recurrent_max_abs = recurrent_max_abs
self._recurrent_initializer = recurrent_kernel_initializer
self._input_initializer = input_kernel_initializer
self._activation = activation or nn_ops.relu
示例4: __init__
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def __init__(self,
num_units,
recurrent_min_abs=0,
recurrent_max_abs=None,
recurrent_kernel_initializer=None,
input_kernel_initializer=None,
activation=None,
reuse=None,
name=None):
super(IndRNNCell, self).__init__(_reuse=reuse, name=name)
self.input_spec = base_layer.InputSpec(ndim=2)
# initialization
self._num_units = num_units
self._recurrent_min_abs = recurrent_min_abs
self._recurrent_max_abs = recurrent_max_abs
self._recurrent_recurrent_kernel_initializer = recurrent_kernel_initializer
self._input_kernel_initializer = input_kernel_initializer
self._activation = activation or nn_ops.relu
示例5: __init__
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def __init__(self,
inverse=False,
rectify=False,
gamma_init=.1,
data_format="channels_last",
beta_parameterizer=_default_beta_param,
gamma_parameterizer=_default_gamma_param,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(GDN, self).__init__(trainable=trainable, name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.inverse = bool(inverse)
self.rectify = bool(rectify)
self._gamma_init = float(gamma_init)
self.data_format = data_format
self._beta_parameterizer = beta_parameterizer
self._gamma_parameterizer = gamma_parameterizer
self._channel_axis() # trigger ValueError early
self.input_spec = base.InputSpec(min_ndim=2)
示例6: build
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def build(self, input_shape):
channel_axis = self._channel_axis()
input_shape = tensor_shape.TensorShape(input_shape)
num_channels = input_shape[channel_axis].value
if num_channels is None:
raise ValueError("The channel dimension of the inputs to `GDN` "
"must be defined.")
self._input_rank = input_shape.ndims
self.input_spec = base.InputSpec(ndim=input_shape.ndims,
axes={channel_axis: num_channels})
self.beta = self._beta_parameterizer(
name="beta", shape=[num_channels], dtype=self.dtype,
getter=self.add_variable, initializer=init_ops.Ones())
self.gamma = self._gamma_parameterizer(
name="gamma", shape=[num_channels, num_channels], dtype=self.dtype,
getter=self.add_variable,
initializer=init_ops.Identity(gain=self._gamma_init))
self.built = True
示例7: build
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self._build_kernel(input_shape)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
示例8: build
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:27,代码来源:core.py
示例9: build
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
# dense kernel
self.kernel_pre = self.add_variable(name='kernel_pre',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
conv_th = tf.ones_like(self.kernel_pre) * self.sparse_th
conv_zero = tf.zeros_like(self.kernel_pre)
cond = tf.less(tf.abs(self.kernel_pre), conv_th)
self.kernel = tf.where(cond, conv_zero, self.kernel_pre, name='kernel')
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
示例10: __init__
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(_Pooling1D, self).__init__(name=name, **kwargs)
self.pool_function = pool_function
self.pool_size = utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = utils.normalize_tuple(strides, 1, 'strides')
self.padding = utils.normalize_padding(padding)
self.data_format = utils.normalize_data_format(data_format)
self.input_spec = base.InputSpec(ndim=3)
示例11: build
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis].value
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_variable(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_variable(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = base.InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
示例12: __init__
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
**kwargs)
self.input_spec = base.InputSpec(ndim=4)
示例13: __init__
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False,
reuse=None,
dtype=None,
name="lstm_cell"):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Default is no cell clipping.
use_peephole: Whether to use peephole connections or not.
reuse: (optional) boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
dtype: the dtype of variables of this layer.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases. By default this is "lstm_cell", for variable-name compatibility
with `tf.nn.rnn_cell.LSTMCell`.
"""
super(LSTMBlockFusedCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype)
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip if cell_clip is not None else -1
self._use_peephole = use_peephole
# Inputs must be 3-dimensional.
self.input_spec = base_layer.InputSpec(ndim=3)
示例14: build
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._channel_axis
input_channels = input_shape[channel_axis].value
if input_channels is None:
raise ValueError("The channel dimension of the inputs must be defined.")
kernel_shape = self.kernel_support + (input_channels, self.filters)
if self.channel_separable:
output_channels = self.filters * input_channels
else:
output_channels = self.filters
if self.kernel_parameterizer is None:
getter = self.add_variable
else:
getter = functools.partial(
self.kernel_parameterizer, getter=self.add_variable)
self._kernel = getter(
name="kernel", shape=kernel_shape, dtype=self.dtype,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer)
if self.bias_parameterizer is None:
getter = self.add_variable
else:
getter = functools.partial(
self.bias_parameterizer, getter=self.add_variable)
self._bias = None if not self.use_bias else getter(
name="bias", shape=(output_channels,), dtype=self.dtype,
initializer=self.bias_initializer, regularizer=self.bias_regularizer)
self.input_spec = base.InputSpec(
ndim=self._rank + 2, axes={channel_axis: input_channels})
super(_SignalConv, self).build(input_shape)
示例15: __init__
# 需要导入模块: from tensorflow.python.layers import base [as 别名]
# 或者: from tensorflow.python.layers.base import InputSpec [as 别名]
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(BilinearUpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)