当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.zeros_initializer函数代码示例

本文整理汇总了Python中tensorflow.zeros_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_initializer函数的具体用法?Python zeros_initializer怎么用?Python zeros_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了zeros_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: project_bilstm_layer

    def project_bilstm_layer(self, lstm_outputs, name=None):
        """
        hidden layer between lstm layer and logits
        :param lstm_outputs: [batch_size, num_steps, emb_size] 
        :return: [batch_size, num_steps, num_tags]
        """
        with tf.variable_scope("project" if not name else name):
            with tf.variable_scope("hidden"):
                W = tf.get_variable("W", shape=[self.hidden_unit * 2, self.hidden_unit],
                                    dtype=tf.float32, initializer=self.initializers.xavier_initializer())

                b = tf.get_variable("b", shape=[self.hidden_unit], dtype=tf.float32,
                                    initializer=tf.zeros_initializer())
                output = tf.reshape(lstm_outputs, shape=[-1, self.hidden_unit * 2])
                hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))

            # project to score of tags
            with tf.variable_scope("logits"):
                W = tf.get_variable("W", shape=[self.hidden_unit, self.num_labels],
                                    dtype=tf.float32, initializer=self.initializers.xavier_initializer())

                b = tf.get_variable("b", shape=[self.num_labels], dtype=tf.float32,
                                    initializer=tf.zeros_initializer())

                pred = tf.nn.xw_plus_b(hidden, W, b)
            return tf.reshape(pred, [-1, self.seq_length, self.num_labels])
开发者ID:chongp,项目名称:Name-Entity-Recognition,代码行数:26,代码来源:lstm_crf_layer.py

示例2: initialize_parameters

def initialize_parameters():
    """
    Initializes parameters to build a neural network with tensorflow. The shapes are:
                        W1 : [n_hidden_1, n_input]
                        b1 : [n_hidden_1, 1]
                        W2 : [n_hidden_2, n_hidden_1]
                        b2 : [n_hidden_2, 1]
                        W3 : [n_classes, n_hidden_2]
                        b3 : [n_classes, 1]
    """
    tf.set_random_seed(42)
    # First hidden layer
    W1 = tf.get_variable("W1", [n_hidden_1, n_input], initializer=tf.contrib.layers.xavier_initializer(seed=42))
    b1 = tf.get_variable("b1", [n_hidden_1, 1], initializer=tf.zeros_initializer())

    # Second hidden layer
    W2 = tf.get_variable("W2", [n_hidden_2, n_hidden_1], initializer=tf.contrib.layers.xavier_initializer(seed=42))
    b2 = tf.get_variable("b2", [n_hidden_2, 1], initializer=tf.zeros_initializer())

    # Output layer
    W3 = tf.get_variable("W3", [n_classes, n_hidden_2], initializer=tf.contrib.layers.xavier_initializer(seed=42))
    b3 = tf.get_variable("b3", [n_classes, 1], initializer=tf.zeros_initializer())

    # Store initializations as a dictionary of parameters
    parameters = {
        "W1": W1,
        "b1": b1,
        "W2": W2,
        "b2": b2,
        "W3": W3,
        "b3": b3
    }

    return parameters
开发者ID:vaibhawvipul,项目名称:Machine-Learning,代码行数:34,代码来源:fashion_mnist_tf.py

示例3: auxnet

def auxnet(embedding, size, dropout_rate=.5, std=.2, is_training=True, scope='auxnet'):
    """
    Defines the fully connected layers for the auxnet: 
        -- so far, one layer to batch norm to relu to dropout
    Args:
        embedding: the histogram embedding matrix
        size: int size of each hidden layer
        dropout_rate: rate to dropout (usually .5)
        std: standard deviation used for initilizer 
        is_training: bool--used to turn off dropout for inference
        scope: name the op/tensor
    Returns:
        fc: the fully connected network as a tensor of size (pxsize)
    """
    # make lower/upper for uniform init
    a,b = 0 - np.sqrt(3)*std, np.sqrt(3)*std

    with tf.variable_scope(scope,'Aux'):
	# notes: if you use dropout and batchnorm no need for regularizer
        with slim.arg_scope([slim.fully_connected],
                weights_initializer = tf.random_uniform_initializer(minval=a,maxval=b),
                #weights_initializer = tf.truncated_normal_initializer(std),
	        weights_regularizer = slim.l2_regularizer(.005),
		activation_fn=tf.nn.relu):
            
            """
            net = slim.fully_connected(embedding, size, scope='hidden')
            net = slim.dropout(net, dropout_rate,
                    is_training=is_training, scope='dropout')
            net= slim.fully_connected(net, size, scope='output',
                    activation_fn=None)
	    """
	    fc  = slim.fully_connected(embedding, size,
					 biases_initializer=tf.zeros_initializer(), 
                                         activation_fn=None, #tf.nn.relu,
                                         scope='hidden')
	    #tf.summary.histogram('beforebn/%s' % scope, fc, collections=['train'])
            

	    fc = slim.batch_norm(fc, center=True, 
                                    scale=True, 
			      	    zero_debias_moving_mean=True,
				    is_training=is_training,
                                    scope='bn')
            
            # mod option: add another layer here:
            fc = tf.nn.relu(fc, 'relu')
    	    
            # now apply the dropout:
            fc = slim.dropout(fc, dropout_rate,
			      is_training=is_training, 
			      scope='dropout')
	
	    # add another layer:	
	    fc = slim.fully_connected(fc, size, biases_initializer=tf.zeros_initializer(),
					activation_fn=tf.nn.tanh, scope="hidden2")

    #tf.summary.histogram('activations/auxnet/%s' % scope, fc, collections=['train'])

    return fc 
开发者ID:ljstrnadiii,项目名称:DietNet,代码行数:60,代码来源:network.py

示例4: batch_norm

def batch_norm(x, decay=0.999, epsilon=1e-03, is_training=True,
               scope="scope"):
    x_shape = x.get_shape()
    num_inputs = x_shape[-1]
    reduce_dims = list(range(len(x_shape) - 1))
    with tf.variable_scope(scope):
        beta = create_var("beta", [num_inputs,],
                               initializer=tf.zeros_initializer())
        gamma = create_var("gamma", [num_inputs,],
                                initializer=tf.ones_initializer())
        # for inference
        moving_mean = create_var("moving_mean", [num_inputs,],
                                 initializer=tf.zeros_initializer(),
                                 trainable=False)
        moving_variance = create_var("moving_variance", [num_inputs],
                                     initializer=tf.ones_initializer(),
                                     trainable=False)
    if is_training:
        mean, variance = tf.nn.moments(x, axes=reduce_dims)
        update_move_mean = moving_averages.assign_moving_average(moving_mean,
                                                mean, decay=decay)
        update_move_variance = moving_averages.assign_moving_average(moving_variance,
                                                variance, decay=decay)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_mean)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_variance)
    else:
        mean, variance = moving_mean, moving_variance
    return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
开发者ID:kaka7,项目名称:DeepLearning_tutorials,代码行数:28,代码来源:ResNet50.py

示例5: mlp_param_init

def mlp_param_init(dim, scheme = 'zero'):
    """
    @note: Initializes parameters to build a multi-layer perceptron with tensorflow.
        The shapes are:
            W1: [n1, n_x]
            B1: [n1, 1]
            W2: [n2, n1]
            B2: [n2, 1]
            ...
            Wl: [n_y, nl-1]
            Bl: [n_y, 1]
        
    @param dim: the number of unit in each level -- dim = [n_x, n1, n2, ..., n(l-1), n_y]    
    @param scheme: the initial scheme of Weight, including {'zero', 'xavier'}
    
    @return: parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
    """
    parameters = {}
    l = len(dim)  # the layers' count
    
    # parameter initializing (using xavier_initializer for weight)
    # (from 0 - input to l-1 - output)
    for i in range(1, l):
        if scheme == 'xavier':
            parameters['W'+str(i)] = tf.get_variable('W'+str(i), [dim[i], dim[i-1]], \
                                                     initializer = tf.contrib.layers.xavier_initializer())
        else:
            parameters['W'+str(i)] = tf.get_variable('W'+str(i), [dim[i], dim[i-1]], \
                                                     initializer = tf.zeros_initializer())     
        parameters['B'+str(i)] = tf.get_variable('B'+str(i), [dim[i], 1], \
                                                 initializer = tf.zeros_initializer())

    return parameters
开发者ID:LiuYouliang,项目名称:Practice-of-Machine-Learning,代码行数:33,代码来源:mlp_demo.py

示例6: initialize_parameters

def initialize_parameters():
    """
    Initializes parameters to build a neural network with tensorflow. The shapes are:
                        W1 : [25, 12288]
                        b1 : [25, 1]
                        W2 : [12, 25]
                        b2 : [12, 1]
                        W3 : [6, 12]
                        b3 : [6, 1]
    
    Returns:
    parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
    """
    
    tf.set_random_seed(1)                   # so that your "random" numbers match ours
        
    ### START CODE HERE ### (approx. 6 lines of code)
    W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
    b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
    W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
    b2 = tf.get_variable("b2", [12,1], initializer = tf.zeros_initializer())
    W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
    b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer())
    ### END CODE HERE ###

    parameters = {"W1": W1,
                  "b1": b1,
                  "W2": W2,
                  "b2": b2,
                  "W3": W3,
                  "b3": b3}
    
    return parameters
开发者ID:shriavi,项目名称:datasciencecoursera,代码行数:33,代码来源:Tensorflow+Tutorial.py

示例7: query_encoder

 def query_encoder(self, v_q, is_training=True, scope="query_encoder"):
     """Encode query image feature
     
     Args:
         v_q: query image feature (batch_size, img_dim)
         is_training: True - training model / False - inference model
     Returns:
         phi_q: query vector
         v_qr: reconstructed v_q
     """
     with tf.variable_scope(scope):
         h1 = tf.contrib.layers.fully_connected(inputs=v_q, 
             num_outputs=256,
             activation_fn=tf.nn.tanh,
             weights_regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay),
             biases_initializer=tf.zeros_initializer())
         phi_q = tf.contrib.layers.fully_connected(inputs=h1, 
             num_outputs=128,
             activation_fn=tf.nn.tanh,
             weights_regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay),
             biases_initializer=tf.zeros_initializer())
         h2 = tf.contrib.layers.fully_connected(inputs=phi_q, 
             num_outputs=256,
             activation_fn=tf.nn.tanh,
             weights_regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay),
             biases_initializer=tf.zeros_initializer())
         v_qr = tf.contrib.layers.fully_connected(inputs=h2, 
             num_outputs=self.img_dim,
             activation_fn=tf.nn.tanh,
             weights_regularizer=tf.contrib.layers.l2_regularizer(self.weight_decay),
             biases_initializer=tf.zeros_initializer())
         return phi_q, v_qr
开发者ID:juneyang666,项目名称:clean-net,代码行数:32,代码来源:model.py

示例8: conv2d_zeros

def conv2d_zeros(x,
                 width,
                 filter_size=[3, 3],
                 stride=[1, 1],
                 pad="SAME",
                 logscale_factor=3,
                 skip=1,
                 edge_bias=True,
                 name=None):
    with tf.variable_scope(name, "conv2d"):
        if edge_bias and pad == "SAME":
            x = add_edge_padding(x, filter_size)
            pad = 'VALID'

        n_in = int(x.get_shape()[3])
        stride_shape = [1] + stride + [1]
        filter_shape = filter_size + [n_in, width]
        w = tf.get_variable("W", filter_shape, tf.float32,
                            initializer=tf.zeros_initializer())
        if skip == 1:
            x = tf.nn.conv2d(x, w, stride_shape, pad, data_format='NHWC')
        else:
            assert stride[0] == 1 and stride[1] == 1
            x = tf.nn.atrous_conv2d(x, w, skip, pad)
        x += tf.get_variable("b", [1, 1, 1, width],
                             initializer=tf.ones_initializer())
        x *= tf.exp(tf.get_variable("logs",
                                    [1, width], initializer=tf.zeros_initializer()) * logscale_factor)
    return x
开发者ID:gdahia,项目名称:DLF,代码行数:29,代码来源:ops.py

示例9: basic_fc_discriminator

def basic_fc_discriminator(x):
    """Compute discriminator score for a batch of input images.
    
    Inputs:
    - x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
    
    Returns:
    TensorFlow Tensor with shape [batch_size, 1], containing the score 
    for an image being real for each input image.
    """

    with tf.variable_scope("bfcdiscriminator"):

        W1 = tf.get_variable("W1", (784, 256))
        b1 = tf.get_variable("b1", (256, ), initializer=tf.zeros_initializer())
        W2 = tf.get_variable("W2", (256, 256))
        b2 = tf.get_variable("b2", (256, ), initializer=tf.zeros_initializer())
        W3 = tf.get_variable("W3", (256, 1), )
        b3 = tf.get_variable("b3", (1, ), initializer=tf.zeros_initializer())

        H1 = tf.matmul(x, W1) + b1
        H1L = leaky_relu(H1)
        H2 = tf.matmul(H1L, W2) + b2
        H2L = leaky_relu(H2)
        logits = tf.matmul(H2L, W3) + b3

        return logits
开发者ID:haolang9527,项目名称:MyDeepLearning,代码行数:27,代码来源:TFGAN.py

示例10: project_layer

    def project_layer(self, lstm_outputs, name=None):
        """
        """
        with tf.variable_scope("project" if not name else name):
            with tf.variable_scope("hidden"):
                w_tanh = tf.get_variable("w_tanh", shape=[self.lstm_dim * 2, self.lstm_dim],
                                    dtype=tf.float32, initializer=self.initializer, regularizer=tf.contrib.layers.l2_regularizer(0.001))

                b_tanh = tf.get_variable("b_tanh", shape=[self.lstm_dim], dtype=tf.float32,
                                    initializer=tf.zeros_initializer())

                output = tf.reshape(lstm_outputs, shape=[-1, self.lstm_dim * 2])
                hidden = tf.tanh(tf.nn.xw_plus_b(output, w_tanh, b_tanh))

                drop_hidden = tf.nn.dropout(hidden, self.dropout)


            # project to score of tags
            with tf.variable_scope("output"):
                w_out = tf.get_variable("w_out", shape=[self.lstm_dim, self.num_tags],
                                    dtype=tf.float32, initializer=self.initializer, regularizer=tf.contrib.layers.l2_regularizer(0.001))

                b_out = tf.get_variable("b_out", shape=[self.num_tags], dtype=tf.float32,
                                    initializer=tf.zeros_initializer() )
                pred = tf.nn.xw_plus_b(drop_hidden, w_out, b_out, name="pred")
            self.logits = tf.reshape(pred, [-1, self.num_steps, self.num_tags], name="logits")
开发者ID:forin-xyz,项目名称:FoolNLTK,代码行数:26,代码来源:bi_lstm.py

示例11: create_slots

  def create_slots(self, var):
    """Create the factorized Adam accumulators for diet variables."""
    params = self.params
    shape = var.get_shape().as_list()

    if not hasattr(params, "slots"):
      params.slots = defaultdict(dict)

    name = var.op.name
    slots = params.slots[name]

    if params.factored_second_moment_accumulator and len(shape) == 2:
      slots["adam_vr"] = tf.get_variable(
          name + "_adam_vr", [shape[0], 1],
          trainable=False,
          initializer=tf.zeros_initializer())
      slots["adam_vc"] = tf.get_variable(
          name + "_adam_vc", [1, shape[1]],
          trainable=False,
          initializer=tf.zeros_initializer())
    else:
      slots["adam_v"] = tf.get_variable(
          name + "_adam_v",
          shape,
          trainable=False,
          initializer=tf.zeros_initializer())
    if params.beta1 != 0.0:
      slots["adam_m"] = tf.get_variable(
          name + "_adam_m",
          shape,
          trainable=False,
          initializer=tf.zeros_initializer())
开发者ID:kltony,项目名称:tensor2tensor,代码行数:32,代码来源:diet.py

示例12: initializeParameters

    def initializeParameters(self, m, n):
        """
        Arguments:
            m -- number of users
            n -- number of items

        Returns:
            parameters -- parameters['b'], global bias, scalar
                          parameters['u'], users bias, shape (m, 1)
                          parameters['d'], item bias, shape (1, n)
                          parameters['P'], users feature matrix, shape (m, K)
                          parameters['Q'], items feature matrix, shape (n, K)        
        """
        k = self.K
        
        parameters = {}
        parameters['b'] = tf.get_variable(name='b', dtype=tf.float64, shape=[],
                                          initializer=tf.zeros_initializer())

        parameters['u'] = tf.get_variable(name='u', dtype=tf.float64, shape=[m, 1],
                                          initializer=tf.zeros_initializer())

        parameters['d'] = tf.get_variable(name='d', dtype=tf.float64, shape=[1, n],
                                          initializer=tf.zeros_initializer())

        parameters['P'] = tf.get_variable(name='P', dtype=tf.float64, shape=[m, k],
                                          initializer=tf.random_normal_initializer())

        parameters['Q'] = tf.get_variable(name='Q', dtype=tf.float64, shape=[n, k],
                                          initializer=tf.random_normal_initializer())

        return parameters
开发者ID:cheng-w-liu,项目名称:ML_algos,代码行数:32,代码来源:matrix_factorization_in_TensorFlow.py

示例13: initialize_parameters

def initialize_parameters():
    '''
    初始化神经网络的参数,参数的维度如下:
    W1:[25,12288]
    b1:[25,1]
    W2:[12,25]
    b2:[12,1]
    W3:[6.12]
    b3:[6,1]
    :return: 
    parameters - 包含了W和b的字典
    '''

    tf.set_random_seed(1)#指定随机种子

    W1= tf.get_variable('W1',[25,12288],initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b1 = tf.get_variable('b1',[25,1],initializer=tf.zeros_initializer())
    W2 = tf.get_variable('W2',[12,25],initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b2 = tf.get_variable('b2',[12,1],initializer=tf.zeros_initializer())
    W3 = tf.get_variable('W3',[6,12],initializer=tf.contrib.layers.xavier_initializer(seed=1))
    b3 = tf.get_variable('b3',[6,1],initializer=tf.zeros_initializer())

    parameters = {
        'W1':W1,
        'b1':b1,
        'W2':W2,
        'b2':b2,
        'W3':W3,
        'b3':b3
    }
    return parameters
开发者ID:491811030,项目名称:hellow-world,代码行数:31,代码来源:work_2_firsttfnetwork.py

示例14: Discriminator_with_Vanilla

def Discriminator_with_Vanilla(input_Pattern, hidden_Unit_Size = 128, label_Unit_Size = 10, is_Training  = True, reuse = False):
    with tf.variable_scope('discriminator', reuse=reuse):
        hidden_Activation = tf.layers.dense(
            inputs = input_Pattern,
            units = hidden_Unit_Size,
            activation = tf.nn.relu,
            use_bias = True,
            kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
            bias_initializer = tf.zeros_initializer(),
            name = "hidden"
            )
        discrimination_Logits = tf.layers.dense(
            inputs = hidden_Activation,
            units = 1,
            activation = None,
            use_bias = True,
            kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
            bias_initializer = tf.zeros_initializer(),
            name = "discrimination"
            )
        discrimination_Activation = tf.nn.sigmoid(discrimination_Logits);

        label_Logits = tf.layers.dense(
            inputs = hidden_Activation,
            units = label_Unit_Size,
            activation = None,
            use_bias = True,
            kernel_initializer = tf.truncated_normal_initializer(stddev=0.1),
            bias_initializer = tf.zeros_initializer(),
            name = "label"
            )
        label_Activation = tf.nn.softmax(label_Logits);

        return discrimination_Logits, label_Logits, discrimination_Activation, label_Activation;
开发者ID:CODEJIN,项目名称:GAN,代码行数:34,代码来源:Customized_Layers.py

示例15: bacthnorm

def bacthnorm(inputs, scope, epsilon=1e-05, momentum=0.99, is_training=True):
    inputs_shape = inputs.get_shape().as_list()# 输出 形状尺寸
    params_shape = inputs_shape[-1:]# 输入参数的长度
    axis = list(range(len(inputs_shape) - 1))

    with tf.variable_scope(scope):
        beta = create_variable("beta", params_shape,
                               initializer=tf.zeros_initializer())
        gamma = create_variable("gamma", params_shape,
                                initializer=tf.ones_initializer())
        # 均值 常量 不需要训练 for inference
        moving_mean = create_variable("moving_mean", params_shape,
                            initializer=tf.zeros_initializer(), trainable=False)
		# 方差 常量 不需要训练
        moving_variance = create_variable("moving_variance", params_shape,
                            initializer=tf.ones_initializer(), trainable=False)
    if is_training:
        mean, variance = tf.nn.moments(inputs, axes=axis)# 计算均值和方差
		# 移动平均求 均值和 方差  考虑上一次的量 xt = a * x_t-1 +(1-a)*x_now
        update_move_mean = moving_averages.assign_moving_average(moving_mean,
                                                mean, decay=momentum)
        update_move_variance = moving_averages.assign_moving_average(moving_variance,
                                                variance, decay=momentum)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_mean)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_variance)
    else:
        mean, variance = moving_mean, moving_variance
    return tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)
开发者ID:dyz-zju,项目名称:MVision,代码行数:28,代码来源:MobileNet_tf.py


注:本文中的tensorflow.zeros_initializer函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。