当前位置: 首页>>代码示例>>Python>>正文


Python initializers.get方法代码示例

本文整理汇总了Python中tensorflow.python.keras.initializers.get方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.get方法的具体用法?Python initializers.get怎么用?Python initializers.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.keras.initializers的用法示例。


在下文中一共展示了initializers.get方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(self,
                 input_dim,
                 output_dim,
                 dropout_rate=0.0,
                 activation='tanh',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros'):
        super(HighwayLayer, self).__init__()
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.dropout_rate = dropout_rate

        self.shape = (input_dim, output_dim)
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.kernel = None
        self.bias = None 
开发者ID:nju-websoft,项目名称:AliNet,代码行数:22,代码来源:alinet_layer.py

示例2: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(self,units,
                 activation=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 use_bias=False,
                 bias_initializer="zeros",
                 trainable=True,
                 name=None):
        super(Dense3D,self).__init__(trainable=trainable,name=name)
        self.units = units
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.use_bias=use_bias
        self.bias_initializer = bias_initializer 
开发者ID:LongxingTan,项目名称:Time-series-prediction,代码行数:19,代码来源:wavenet_layer.py

示例3: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(
        self,
        units,
        activation=None,
        use_bias=True,
        kernel_initializer="glorot_uniform",
        bias_initializer="zeros",
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None,
        **kwargs,
    ):

        super(Dense, self).__init__(**kwargs)

        self.units = int(units)
        self.activation_identifier = activation
        self.activation = activations.get(self.activation_identifier)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        # Not implemented arguments
        default_args_check(kernel_regularizer, "kernel_regularizer", "Dense")
        default_args_check(bias_regularizer, "bias_regularizer", "Dense")
        default_args_check(activity_regularizer, "activity_regularizer", "Dense")
        default_args_check(kernel_constraint, "kernel_constraint", "Dense")
        default_args_check(bias_constraint, "bias_constraint", "Dense") 
开发者ID:tf-encrypted,项目名称:tf-encrypted,代码行数:33,代码来源:dense.py

示例4: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(self, alpha_fwd=0.999, alpha_bkw=0.99,
                 axis=1, epsilon=1e-5,
                 stream_mu_initializer='zeros', stream_var_initializer='ones',
                 u_ctrl_initializer='zeros', v_ctrl_initializer='zeros',
                 trainable=True, name=None, **kwargs):
        super(Norm, self).__init__(trainable=trainable, name=name, **kwargs)
        # setup mixed precesion
        self.dtype_policy = self._mixed_precision_policy \
            if self._mixed_precision_policy.name == "infer_float32_vars" \
                else self._dtype

        if isinstance(self.dtype_policy, Policy):
            self.mixed_precision = True
            self.fp_type = tf.float32 # full precision
            self.mp_type = tf.float16 # reduced precision
        else:
            self.mixed_precision = False
            self.fp_type = self._dtype if self._dtype else tf.float32 # full precision
            self.mp_type = self.fp_type # reduced precision

        assert axis == 1, 'kernel requires channels_first data_format'

        self.axis = axis
        self.norm_ax = None
        self.epsilon = epsilon

        self.alpha_fwd = alpha_fwd
        self.alpha_bkw = alpha_bkw

        self.stream_mu_initializer = initializers.get(stream_mu_initializer)
        self.stream_var_initializer = initializers.get(stream_var_initializer)
        self.u_ctrl_initializer = initializers.get(u_ctrl_initializer)
        self.v_ctrl_initializer = initializers.get(v_ctrl_initializer) 
开发者ID:Cerebras,项目名称:online-normalization,代码行数:35,代码来源:online_norm.py

示例5: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(self,
                 input_dim,
                 output_dim,
                 adj,
                 num_features_nonzero,
                 dropout_rate=0.0,
                 is_sparse_inputs=False,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer='l2',
                 bias_regularizer='l2',
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(GraphConvolution, self).__init__()
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.kernels = list()
        self.bias = None
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.is_sparse_inputs = is_sparse_inputs
        self.num_features_nonzero = num_features_nonzero
        self.adjs = [tf.SparseTensor(indices=am[0], values=am[1], dense_shape=am[2]) for am in adj]
        self.dropout_rate = dropout_rate 
开发者ID:nju-websoft,项目名称:AliNet,代码行数:37,代码来源:layers.py

示例6: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(self,
                 input_dim,
                 output_dim,
                 adj,
                 num_features_nonzero,
                 dropout_rate=0.0,
                 num_base=-1,
                 is_sparse_inputs=False,
                 featureless=False,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer="l2",
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(RGraphConvolutionLayer, self).__init__()
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.bias = None
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.is_sparse_inputs = is_sparse_inputs
        self.featureless = featureless
        self.num_features_nonzero = num_features_nonzero
        self.support = len(adj)
        self.adj_list = [tf.SparseTensor(indices=adj[i][0], values=adj[i][1], dense_shape=adj[i][2])
                         for i in range(len(adj))]
        self.dropout_rate = dropout_rate
        self.num_bases = num_base
        self.W = list() 
开发者ID:nju-websoft,项目名称:AliNet,代码行数:41,代码来源:layers.py

示例7: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(self,
                 kernel_initializer = 'glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
            
        super(StressIntensityRange, self).__init__(**kwargs)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint  = constraints.get(kernel_constraint) 
开发者ID:PML-UCF,项目名称:pinn,代码行数:14,代码来源:physics.py

示例8: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(self,
                 kernel_initializer = 'glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 table_shape=(1,4,4,1),
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(TableInterpolation, self).__init__(**kwargs)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint  = constraints.get(kernel_constraint)
        
        self.table_shape = table_shape 
开发者ID:PML-UCF,项目名称:pinn,代码行数:16,代码来源:core.py

示例9: build

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def build(self, input_shape):
        """Build `Layer`"""
        input_shape = tensor_shape.TensorShape(input_shape).as_list()
        self.input_spec = InputSpec(shape=input_shape)

        if not self.layer.built:
            self.layer.build(input_shape)
            self.layer.built = False

            if not hasattr(self.layer, "kernel"):
                raise ValueError(
                    "`WeightNorm` must wrap a layer that" " contains a `kernel` for weights"
                )

            # The kernel's filter or unit dimension is -1
            self.layer_depth = int(self.layer.kernel.shape[-1])
            self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))

            self.layer.v = self.layer.kernel
            self.layer.g = self.layer.add_variable(
                name="g",
                shape=(self.layer_depth,),
                initializer=initializers.get("ones"),
                dtype=self.layer.kernel.dtype,
                trainable=True,
            )

            with ops.control_dependencies([self.layer.g.assign(self._init_norm(self.layer.v))]):
                self._compute_weights()

            self.layer.built = True

        super(WeightNorm, self).build()
        self.built = True

    # pylint: disable=arguments-differ 
开发者ID:NervanaSystems,项目名称:nlp-architect,代码行数:38,代码来源:temporal_convolutional_network.py

示例10: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(
        self,
        filters,
        kernel_size,
        strides=(1, 1),
        padding="valid",
        data_format=None,
        dilation_rate=(1, 1),
        activation=None,
        use_bias=True,
        kernel_initializer="glorot_uniform",
        bias_initializer="zeros",
        kernel_regularizer=None,
        bias_regularizer=None,
        activity_regularizer=None,
        kernel_constraint=None,
        bias_constraint=None,
        **kwargs,
    ):

        super(Conv2D, self).__init__(**kwargs)

        self.rank = 2
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, self.rank, "kernel_size"
        )
        if self.kernel_size[0] != self.kernel_size[1]:
            raise NotImplementedError(
                "TF Encrypted currently only supports same "
                "stride along the height and the width."
                "You gave: {}".format(self.kernel_size)
            )
        self.strides = conv_utils.normalize_tuple(strides, self.rank, "strides")
        self.padding = conv_utils.normalize_padding(padding).upper()
        self.data_format = conv_utils.normalize_data_format(data_format)
        if activation is not None:
            logger.info(
                "Performing an activation before a pooling layer can result "
                "in unnecessary performance loss. Check model definition in "
                "case of missed optimization."
            )
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        # Not implemented arguments
        default_args_check(dilation_rate, "dilation_rate", "Conv2D")
        default_args_check(kernel_regularizer, "kernel_regularizer", "Conv2D")
        default_args_check(bias_regularizer, "bias_regularizer", "Conv2D")
        default_args_check(activity_regularizer, "activity_regularizer", "Conv2D")
        default_args_check(kernel_constraint, "kernel_constraint", "Conv2D")
        default_args_check(bias_constraint, "bias_constraint", "Conv2D") 
开发者ID:tf-encrypted,项目名称:tf-encrypted,代码行数:56,代码来源:convolutional.py

示例11: __init__

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def __init__(self,
                 units,
                 relations,
                 kernel_basis_size=None,
                 activation=None,
                 use_bias=False,
                 batch_normalisation=False,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 feature_dropout=None,
                 support_dropout=None,
                 name='relational_graph_conv',
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)

        super(RelationalGraphConv, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            name=name, **kwargs)

        self.units = int(units)
        self.relations = int(relations)
        self.kernel_basis_size = (int(kernel_basis_size)
                                  if kernel_basis_size is not None else None)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.batch_normalisation = batch_normalisation
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.feature_dropout = feature_dropout
        self.support_dropout = support_dropout

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)

        self.dense_layer = rgat_layers.BasisDecompositionDense(
            units=self.units * self.relations,
            basis_size=self.kernel_basis_size,
            coefficients_size=self.relations,
            use_bias=False,
            kernel_initializer=self.kernel_initializer,
            kernel_regularizer=self.kernel_regularizer,
            kernel_constraint=self.kernel_constraint,
            name=name + '_basis_decomposition_dense',
            **kwargs)
        if self.batch_normalisation:
            self.batch_normalisation_layer = tf.layers.BatchNormalization() 
开发者ID:babylonhealth,项目名称:rgat,代码行数:58,代码来源:relational_graph_convolution.py

示例12: build_network_graph

# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import get [as 别名]
def build_network_graph(self, x, last_timepoint=False):
        """
        Given the input placeholder x, build the entire TCN graph
        Args:
            x: Input placeholder
            last_timepoint: Whether or not to select only the last timepoint to output

        Returns:
            output of the TCN
        """
        # loop and define multiple residual blocks
        with tf.variable_scope("tcn"):
            for i in range(self.n_hidden_layers):
                dilation_size = 2 ** i
                in_channels = self.n_features_in if i == 0 else self.hidden_sizes[i - 1]
                out_channels = self.hidden_sizes[i]
                with tf.variable_scope("residual_block_" + str(i)):
                    x = self._residual_block(
                        x,
                        in_channels,
                        out_channels,
                        dilation_size,
                        (self.kernel_size - 1) * dilation_size,
                    )
                    x = tf.nn.relu(x)
                self.layer_activations.append(x)
            self.sequence_output = x

            # get outputs
            if not last_timepoint:
                prediction = self.sequence_output
            else:
                # last time point size (batch_size, hidden_sizes_encoder)
                width = self.sequence_output.shape[1].value
                lt = tf.squeeze(
                    tf.slice(self.sequence_output, [0, width - 1, 0], [-1, 1, -1]), axis=1
                )
                prediction = tf.layers.Dense(
                    1,
                    kernel_initializer=tf.initializers.random_normal(0, 0.01),
                    bias_initializer=tf.initializers.random_normal(0, 0.01),
                )(lt)

        return prediction 
开发者ID:NervanaSystems,项目名称:nlp-architect,代码行数:46,代码来源:temporal_convolutional_network.py


注:本文中的tensorflow.python.keras.initializers.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。