當前位置: 首頁>>代碼示例>>Python>>正文


Python fluid.ParamAttr方法代碼示例

本文整理匯總了Python中paddle.fluid.ParamAttr方法的典型用法代碼示例。如果您正苦於以下問題:Python fluid.ParamAttr方法的具體用法?Python fluid.ParamAttr怎麽用?Python fluid.ParamAttr使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在paddle.fluid的用法示例。


在下文中一共展示了fluid.ParamAttr方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def __init__(self,
                 num_layers,
                 input_size,
                 hidden_size,
                 dropout_prob=0.,
                 init_scale=0.1):
        super(GRUEncoderCell, self).__init__()
        self.dropout_prob = dropout_prob
        # use add_sublayer to add multi-layers
        self.gru_cells = []
        for i in range(num_layers):
            self.gru_cells.append(
                self.add_sublayer(
                    "gru_%d" % i,
                    #BasicGRUCell(
                    GRUCell(
                        input_size=input_size if i == 0 else hidden_size,
                        hidden_size=hidden_size,
                        param_attr=fluid.ParamAttr(
                            initializer=fluid.initializer.UniformInitializer(
                                low=-init_scale, high=init_scale))))) 
開發者ID:PaddlePaddle,項目名稱:hapi,代碼行數:23,代碼來源:text.py

示例2: __init__

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def __init__(self,
                 num_layers,
                 input_size,
                 hidden_size,
                 dropout_prob=0.,
                 init_scale=0.1):
        super(EncoderCell, self).__init__()
        self.dropout_prob = dropout_prob
        # use add_sublayer to add multi-layers
        self.lstm_cells = []
        for i in range(num_layers):
            self.lstm_cells.append(
                self.add_sublayer(
                    "lstm_%d" % i,
                    BasicLSTMCell(
                        input_size=input_size if i == 0 else hidden_size,
                        hidden_size=hidden_size,
                        param_attr=ParamAttr(initializer=UniformInitializer(
                            low=-init_scale, high=init_scale))))) 
開發者ID:PaddlePaddle,項目名稱:hapi,代碼行數:21,代碼來源:seq2seq_base.py

示例3: __init__

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def __init__(self,
                 num_layers,
                 input_size,
                 hidden_size,
                 dropout_prob=0.,
                 init_scale=0.1):
        super(DecoderCell, self).__init__()
        self.dropout_prob = dropout_prob
        # use add_sublayer to add multi-layers
        self.lstm_cells = []
        for i in range(num_layers):
            self.lstm_cells.append(
                self.add_sublayer(
                    "lstm_%d" % i,
                    BasicLSTMCell(
                        input_size=input_size + hidden_size
                        if i == 0 else hidden_size,
                        hidden_size=hidden_size,
                        param_attr=ParamAttr(initializer=UniformInitializer(
                            low=-init_scale, high=init_scale)))))
        self.attention_layer = AttentionLayer(hidden_size) 
開發者ID:PaddlePaddle,項目名稱:hapi,代碼行數:23,代碼來源:seq2seq_attn.py

示例4: __init__

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def __init__(self,
                 args,
                 config,
                 num_labels,
                 return_pooled_out=True,
                 use_fp16=False):
        super(ClsModelLayer, self).__init__()
        self.config = config
        self.use_fp16 = use_fp16
        self.loss_scaling = args.loss_scaling

        self.bert_layer = BertEncoder(
            config=self.config, return_pooled_out=True, use_fp16=self.use_fp16)

        self.cls_fc = Linear(
            input_dim=self.config["hidden_size"],
            output_dim=num_labels,
            param_attr=fluid.ParamAttr(
                name="cls_out_w",
                initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
            bias_attr=fluid.ParamAttr(
                name="cls_out_b", initializer=fluid.initializer.Constant(0.))) 
開發者ID:PaddlePaddle,項目名稱:hapi,代碼行數:24,代碼來源:bert_classifier.py

示例5: __init__

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def __init__(self, src_vocab_size, max_length, n_layer, n_head, d_key,
                 d_value, d_model, d_inner_hid, prepostprocess_dropout,
                 attention_dropout, relu_dropout, preprocess_cmd,
                 postprocess_cmd, word_embedder):
        super(WrapEncoder, self).__init__()

        self.emb_dropout = prepostprocess_dropout
        self.emb_dim = d_model
        self.word_embedder = word_embedder
        self.pos_encoder = Embedding(
            size=[max_length, self.emb_dim],
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.NumpyArrayInitializer(
                    position_encoding_init(max_length, self.emb_dim)),
                trainable=False))

        self.encoder = Encoder(n_layer, n_head, d_key, d_value, d_model,
                               d_inner_hid, prepostprocess_dropout,
                               attention_dropout, relu_dropout, preprocess_cmd,
                               postprocess_cmd) 
開發者ID:PaddlePaddle,項目名稱:hapi,代碼行數:22,代碼來源:transformer.py

示例6: get_pooled_output

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def get_pooled_output(self):
        """
        Get the first feature of each sequence for classification
        """

        next_sent_feat = fluid.layers.slice(
            input=self._enc_out, axes=[1], starts=[0], ends=[1])
        next_sent_feat = fluid.layers.fc(
            input=next_sent_feat,
            num_flatten_dims=2,
            size=self._emb_size,
            act="tanh",
            param_attr=fluid.ParamAttr(
                name="pooled_fc.w_0", initializer=self._param_initializer),
            bias_attr="pooled_fc.b_0")
        return next_sent_feat 
開發者ID:baidu,項目名稱:Senta,代碼行數:18,代碼來源:bert.py

示例7: positionwise_feed_forward

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def positionwise_feed_forward(x,
                              d_inner_hid,
                              d_hid,
                              dropout_rate,
                              hidden_act,
                              param_initializer=None,
                              name='ffn'):

    hidden = layers.fc(input=x,
                       size=d_inner_hid,
                       num_flatten_dims=2,
                       act=hidden_act,
                       param_attr=fluid.ParamAttr(
                           name=name + '_fc_0.w_0',
                           initializer=param_initializer),
                       bias_attr=name + '_fc_0.b_0')
    if dropout_rate:
        hidden = layers.dropout(
            hidden,
            dropout_prob=dropout_rate,
            dropout_implementation="upscale_in_train",
            is_test=False)
    out = layers.fc(input=hidden,
                    size=d_hid,
                    num_flatten_dims=2,
                    param_attr=fluid.ParamAttr(
                        name=name + '_fc_1.w_0', initializer=param_initializer),
                    bias_attr=name + '_fc_1.b_0')
    return out 
開發者ID:jtyoui,項目名稱:Jtyoui,代碼行數:31,代碼來源:transformer_encoder.py

示例8: pre_post_process_layer

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def pre_post_process_layer(prev_out, out, process_cmd, dropout_rate=0., name=''):
    for cmd in process_cmd:
        if cmd == "a":  # add residual connection
            out = out + prev_out if prev_out else out
        elif cmd == "n":  # add layer normalization
            out_dtype = out.dtype
            if out_dtype == fluid.core.VarDesc.VarType.FP16:
                out = layers.cast(x=out, dtype="float32")
            out = layers.layer_norm(
                out,
                begin_norm_axis=len(out.shape) - 1,
                param_attr=fluid.ParamAttr(
                    name=name + '_layer_norm_scale',
                    initializer=fluid.initializer.Constant(1.)),
                bias_attr=fluid.ParamAttr(
                    name=name + '_layer_norm_bias',
                    initializer=fluid.initializer.Constant(0.)))
            if out_dtype == fluid.core.VarDesc.VarType.FP16:
                out = layers.cast(x=out, dtype="float16")
        elif cmd == "d":  # add dropout
            if dropout_rate:
                out = layers.dropout(
                    out,
                    dropout_prob=dropout_rate,
                    dropout_implementation="upscale_in_train",
                    is_test=False)
    return out 
開發者ID:jtyoui,項目名稱:Jtyoui,代碼行數:29,代碼來源:transformer_encoder.py

示例9: bilstm_net

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def bilstm_net(data, dict_dim, class_dim, emb_dim=128, hid_dim=128, hid_dim2=96, emb_lr=30.0):
    # embedding layer
    emb = fluid.layers.embedding(input=data,
                                 size=[dict_dim, emb_dim],
                                 param_attr=fluid.ParamAttr(learning_rate=emb_lr))

    # bi-lstm layer
    fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)

    rfc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)

    lstm_h, c = fluid.layers.dynamic_lstm(input=fc0, size=hid_dim * 4, is_reverse=False)

    rlstm_h, c = fluid.layers.dynamic_lstm(input=rfc0, size=hid_dim * 4, is_reverse=True)

    # extract last layer
    lstm_last = fluid.layers.sequence_last_step(input=lstm_h)
    rlstm_last = fluid.layers.sequence_last_step(input=rlstm_h)

    # concat layer
    lstm_concat = fluid.layers.concat(input=[lstm_last, rlstm_last], axis=1)

    # full connect layer
    fc1 = fluid.layers.fc(input=lstm_concat, size=hid_dim2, act='tanh')
    # softmax layer
    prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
    return prediction 
開發者ID:yeyupiaoling,項目名稱:LearnPaddle2,代碼行數:29,代碼來源:bilstm_net.py

示例10: conv

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def conv(self,*args, **kargs):
        if "xception" in self.name_scope:
            init_std = 0.09
        elif "logit" in self.name_scope:
            init_std = 0.01
        elif self.name_scope.endswith('depthwise/'):
            init_std = 0.33
        else:
            init_std = 0.06
        if self.name_scope.endswith('depthwise/'):
            regularizer = self.depthwise_regularizer
        else:
            regularizer = None

        kargs['param_attr'] = fluid.ParamAttr(
            name=self.name_scope + 'weights',
            regularizer=regularizer,
            initializer=fluid.initializer.TruncatedNormal(
                loc=0.0, scale=init_std))
        if 'bias_attr' in kargs and kargs['bias_attr']:
            kargs['bias_attr'] = fluid.ParamAttr(
                name=self.name_scope + 'biases',
                regularizer=regularizer,
                initializer=fluid.initializer.ConstantInitializer(value=0.0))
        else:
            kargs['bias_attr'] = False
        kargs['name'] = self.name_scope + 'conv'
        return self.append_op_result(fluid.layers.conv2d(*args, **kargs), 'conv') 
開發者ID:qixuxiang,項目名稱:Baidu_Lane_Segmentation,代碼行數:30,代碼來源:deeplabv3p.py

示例11: bn

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def bn(self,*args, **kargs):
        if self.default_norm_type == 'bn':
            with self.scope('BatchNorm'):
                return self.append_op_result(
                    fluid.layers.batch_norm(
                        *args,
                        epsilon=self.default_epsilon,
                        momentum=self.bn_momentum,
                        param_attr=fluid.ParamAttr(
                            name=self.name_scope + 'gamma', regularizer=self.bn_regularizer),
                        bias_attr=fluid.ParamAttr(
                            name=self.name_scope + 'beta', regularizer=self.bn_regularizer),
                        moving_mean_name=self.name_scope + 'moving_mean',
                        moving_variance_name=self.name_scope + 'moving_variance',
                        **kargs),
                    'bn')
        elif self.default_norm_type == 'gn':
            with self.scope('GroupNorm'):
                return self.append_op_result(
                    self.group_norm(
                        args[0],
                        self.default_group_number,
                        eps=self.default_epsilon,
                        param_attr=fluid.ParamAttr(
                            name=self.name_scope + 'gamma', regularizer=self.bn_regularizer),
                        bias_attr=fluid.ParamAttr(
                            name=self.name_scope + 'beta', regularizer=self.bn_regularizer)),
                    'gn')
        else:
            raise "Unsupport norm type:" + self.default_norm_type 
開發者ID:qixuxiang,項目名稱:Baidu_Lane_Segmentation,代碼行數:32,代碼來源:deeplabv3p.py

示例12: bi_lstm_encoder

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def bi_lstm_encoder(input_seq, gate_size, para_name, args):
    """
    A bi-directional lstm encoder implementation.
    Linear transformation part for input gate, output gate, forget gate
    and cell activation vectors need be done outside of dynamic_lstm.
    So the output size is 4 times of gate_size.
    """

    input_forward_proj = layers.fc(
        input=input_seq,
        param_attr=fluid.ParamAttr(name=para_name + '_fw_gate_w'),
        size=gate_size * 4,
        act=None,
        bias_attr=False)
    input_reversed_proj = layers.fc(
        input=input_seq,
        param_attr=fluid.ParamAttr(name=para_name + '_bw_gate_w'),
        size=gate_size * 4,
        act=None,
        bias_attr=False)
    forward, _ = layers.dynamic_lstm(
        input=input_forward_proj,
        size=gate_size * 4,
        use_peepholes=False,
        param_attr=fluid.ParamAttr(name=para_name + '_fw_lstm_w'),
        bias_attr=fluid.ParamAttr(name=para_name + '_fw_lstm_b'))
    reversed, _ = layers.dynamic_lstm(
        input=input_reversed_proj,
        param_attr=fluid.ParamAttr(name=para_name + '_bw_lstm_w'),
        bias_attr=fluid.ParamAttr(name=para_name + '_bw_lstm_b'),
        size=gate_size * 4,
        is_reverse=True,
        use_peepholes=False)

    encoder_out = layers.concat(input=[forward, reversed], axis=1)
    return encoder_out 
開發者ID:baidu,項目名稱:DuReader,代碼行數:38,代碼來源:rc_model.py

示例13: embedding

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def embedding(input_ids, shape, args):
    """Embedding layer"""
    input_embedding = layers.embedding(
        input=input_ids,
        size=shape,
        dtype='float32',
        is_sparse=True,
        param_attr=fluid.ParamAttr(name='embedding_para'))
    return input_embedding 
開發者ID:baidu,項目名稱:DuReader,代碼行數:11,代碼來源:rc_model.py

示例14: lstm_step

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def lstm_step(x_t, hidden_t_prev, cell_t_prev, size, para_name, args):
    """Util function for pointer network"""
    def linear(inputs, para_name, args):
        return layers.fc(input=inputs,
                         size=size,
                         param_attr=fluid.ParamAttr(name=para_name + '_w'),
                         bias_attr=fluid.ParamAttr(name=para_name + '_b'))

    input_cat = layers.concat([hidden_t_prev, x_t], axis=1)
    forget_gate = layers.sigmoid(x=linear(input_cat, para_name + '_lstm_f',
                                          args))
    input_gate = layers.sigmoid(x=linear(input_cat, para_name + '_lstm_i',
                                         args))
    output_gate = layers.sigmoid(x=linear(input_cat, para_name + '_lstm_o',
                                          args))
    cell_tilde = layers.tanh(x=linear(input_cat, para_name + '_lstm_c', args))

    cell_t = layers.sums(input=[
        layers.elementwise_mul(
            x=forget_gate, y=cell_t_prev), layers.elementwise_mul(
                x=input_gate, y=cell_tilde)
    ])

    hidden_t = layers.elementwise_mul(x=output_gate, y=layers.tanh(x=cell_t))

    return hidden_t, cell_t 
開發者ID:baidu,項目名稱:DuReader,代碼行數:28,代碼來源:rc_model.py

示例15: __init__

# 需要導入模塊: from paddle import fluid [as 別名]
# 或者: from paddle.fluid import ParamAttr [as 別名]
def __init__(
            self,
            in_channel=1,
            rnn_hidden_size=200,
            decoder_size=128,
            is_test=False, ):
        super(Encoder, self).__init__()
        self.rnn_hidden_size = rnn_hidden_size

        self.backbone = CNN(in_ch=in_channel, is_test=is_test)

        para_attr = fluid.ParamAttr(
            initializer=fluid.initializer.Normal(0.0, 0.02))
        bias_attr = fluid.ParamAttr(
            initializer=fluid.initializer.Normal(0.0, 0.02), learning_rate=2.0)
        self.gru_fwd = RNN(cell=GRUCell(
            input_size=128 * 6,
            hidden_size=rnn_hidden_size,
            param_attr=para_attr,
            bias_attr=bias_attr,
            candidate_activation='relu'),
                           is_reverse=False,
                           time_major=False)
        self.gru_bwd = RNN(cell=GRUCell(
            input_size=128 * 6,
            hidden_size=rnn_hidden_size,
            param_attr=para_attr,
            bias_attr=bias_attr,
            candidate_activation='relu'),
                           is_reverse=True,
                           time_major=False)
        self.encoded_proj_fc = fluid.dygraph.Linear(
            rnn_hidden_size * 2, decoder_size, bias_attr=False) 
開發者ID:PaddlePaddle,項目名稱:hapi,代碼行數:35,代碼來源:seq2seq_attn.py


注:本文中的paddle.fluid.ParamAttr方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。