当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.constant_initializer方法代码示例

本文整理汇总了Python中tensorflow.constant_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.constant_initializer方法的具体用法?Python tensorflow.constant_initializer怎么用?Python tensorflow.constant_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.constant_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_adam

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def test_adam(self):
        with self.test_session() as sess:
            w = tf.get_variable(
                "w",
                shape=[3],
                initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
            x = tf.constant([0.4, 0.2, -0.5])
            loss = tf.reduce_mean(tf.square(x - w))
            tvars = tf.trainable_variables()
            grads = tf.gradients(loss, tvars)
            global_step = tf.train.get_or_create_global_step()
            optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
            train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            sess.run(init_op)
            for _ in range(100):
                sess.run(train_op)
            w_np = sess.run(w)
            self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2) 
开发者ID:Socialbird-AILab,项目名称:BERT-Classification-Tutorial,代码行数:22,代码来源:optimization_test.py

示例2: wrap_variable

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def wrap_variable(self, var):
        """wrap layer.w into variables"""
        val = self.lay.w.get(var, None)
        if val is None:
            shape = self.lay.wshape[var]
            args = [0., 1e-2, shape]
            if 'moving_mean' in var:
                val = np.zeros(shape)
            elif 'moving_variance' in var:
                val = np.ones(shape)
            else:
                val = np.random.normal(*args)
            self.lay.w[var] = val.astype(np.float32)
            self.act = 'Init '
        if not self.var: return

        val = self.lay.w[var]
        self.lay.w[var] = tf.constant_initializer(val)
        if var in self._SLIM: return
        with tf.variable_scope(self.scope):
            self.lay.w[var] = tf.get_variable(var,
                shape = self.lay.wshape[var],
                dtype = tf.float32,
                initializer = self.lay.w[var]) 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:26,代码来源:baseop.py

示例3: highwaynet

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def highwaynet(inputs, num_units=None, scope="highwaynet", reuse=None):
    '''Highway networks, see https://arxiv.org/abs/1505.00387

    Args:
      inputs: A 3D tensor of shape [N, T, W].
      num_units: An int or `None`. Specifies the number of units in the highway layer
             or uses the input size if `None`.
      scope: Optional scope for `variable_scope`.
      reuse: Boolean, whether to reuse the weights of a previous layer
        by the same name.

    Returns:
      A 3D tensor of shape [N, T, W].
    '''
    if not num_units:
        num_units = inputs.get_shape()[-1]

    with tf.variable_scope(scope, reuse=reuse):
        H = tf.layers.dense(inputs, units=num_units, activation=tf.nn.relu, name="dense1")
        T = tf.layers.dense(inputs, units=num_units, activation=tf.nn.sigmoid,
                            bias_initializer=tf.constant_initializer(-1.0), name="dense2")
        outputs = H * T + inputs * (1. - T)
    return outputs 
开发者ID:Kyubyong,项目名称:dc_tts,代码行数:25,代码来源:modules.py

示例4: cifarnet_arg_scope

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def cifarnet_arg_scope(weight_decay=0.004):
  """Defines the default cifarnet argument scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  with slim.arg_scope(
      [slim.conv2d],
      weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
      activation_fn=tf.nn.relu):
    with slim.arg_scope(
        [slim.fully_connected],
        biases_initializer=tf.constant_initializer(0.1),
        weights_initializer=trunc_normal(0.04),
        weights_regularizer=slim.l2_regularizer(weight_decay),
        activation_fn=tf.nn.relu) as sc:
      return sc 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:cifarnet.py

示例5: _initialize_gru_cell

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def _initialize_gru_cell(self, num_units):
    """Initializes a GRU cell.

    The Variables of the GRU cell are initialized in a way that exactly matches
    the skip-thoughts paper: recurrent weights are initialized from random
    orthonormal matrices and non-recurrent weights are initialized from random
    uniform matrices.

    Args:
      num_units: Number of output units.

    Returns:
      cell: An instance of RNNCell with variable initializers that match the
        skip-thoughts paper.
    """
    return gru_cell.LayerNormGRUCell(
        num_units,
        w_initializer=self.uniform_initializer,
        u_initializer=random_orthonormal_initializer,
        b_initializer=tf.constant_initializer(0.0)) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:skip_thoughts_model.py

示例6: conv_linear

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def conv_linear(args, kw, kh, nin, nout, rate, do_bias, bias_start, prefix):
  """Convolutional linear map."""
  if not isinstance(args, (list, tuple)):
    args = [args]
  with tf.variable_scope(prefix):
    with tf.device("/cpu:0"):
      k = tf.get_variable("CvK", [kw, kh, nin, nout])
    if len(args) == 1:
      arg = args[0]
    else:
      arg = tf.concat(axis=3, values=args)
    res = tf.nn.convolution(arg, k, dilation_rate=(rate, 1), padding="SAME")
    if not do_bias: return res
    with tf.device("/cpu:0"):
      bias_term = tf.get_variable(
          "CvB", [nout], initializer=tf.constant_initializer(bias_start))
    bias_term = tf.reshape(bias_term, [1, 1, 1, nout])
    return res + bias_term 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:20,代码来源:neural_gpu.py

示例7: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def __init__(self, initializer=Bias(0), name=None):
    """Initializes Bias block.

    |initializer| parameter have two special cases.

    1. If initializer is None, then this block works as a PassThrough.
    2. If initializer is a Bias class object, then tf.constant_initializer is
       used with the stored value.

    Args:
      initializer: An initializer for the bias variable.
      name: Name of this block.
    """
    super(BiasAdd, self).__init__(name)

    with self._BlockScope():
      if isinstance(initializer, Bias):
        self._initializer = tf.constant_initializer(value=initializer.value)
      else:
        self._initializer = initializer

      self._bias = None 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:24,代码来源:blocks_std.py

示例8: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def __init__(self, learning_rate, clip_norm=5,
               policy_weight=1.0, critic_weight=0.1,
               tau=0.1, gamma=1.0, rollout=10,
               eps_lambda=0.0, clip_adv=None):
    super(ActorCritic, self).__init__(learning_rate, clip_norm=clip_norm)
    self.policy_weight = policy_weight
    self.critic_weight = critic_weight
    self.tau = tau
    self.gamma = gamma
    self.rollout = rollout
    self.clip_adv = clip_adv

    self.eps_lambda = tf.get_variable(  # TODO: need a better way
        'eps_lambda', [], initializer=tf.constant_initializer(eps_lambda))
    self.new_eps_lambda = tf.placeholder(tf.float32, [])
    self.assign_eps_lambda = self.eps_lambda.assign(
        0.95 * self.eps_lambda + 0.05 * self.new_eps_lambda) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:objective.py

示例9: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def __init__(self):

        self.session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False))
        self.actor = networks.Actor_MLP(scope="actor1",units=[settings.S_DIM,100,settings.A_DIM],activations=[None,'relu','tanh'],trainable=True)
        self.old_actor = networks.Actor_MLP(scope="actor0",units=[settings.S_DIM,100,settings.A_DIM],activations=[None,'relu','tanh'],trainable=False)
        self.critic =  networks.Critic_MLP(scope="critic1",units=[settings.S_DIM,100,1],activations=[None,'relu',None],trainable=True)

        self.state_tf = tf.placeholder(dtype=tf.float32,shape=[None,settings.S_DIM])
        self.action_tf = tf.placeholder(dtype=tf.float32,shape=[None,settings.A_DIM])
        self.return_tf = tf.placeholder(dtype=tf.float32,shape=[None,1]) 
        self.adv_tf = tf.placeholder(dtype=tf.float32,shape=[None,1]) 
        
        # global steps to keep track of training
        self.actor_step = tf.get_variable('actor_global_step', [], initializer=tf.constant_initializer(0), trainable=False)
        self.critic_step = tf.get_variable('critic_global_step', [], initializer=tf.constant_initializer(0), trainable=False)

        # build computation graphs
        self.actor.build_graph(self.state_tf,self.actor_step) 
        self.old_actor.build_graph(self.state_tf,0)
        self.critic.build_graph(self.state_tf,self.critic_step)
        self.build_graph() 
开发者ID:utra-robosoccer,项目名称:soccer-matlab,代码行数:23,代码来源:agents.py

示例10: make_encoder

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def make_encoder(self, state, z_size, scope, n_layers, hid_size):
        """
            ### PROBLEM 3
            ### YOUR CODE HERE

            args:
                state: tf variable
                z_size: output dimension of the encoder network
                scope: scope name
                n_layers: number of layers of the encoder network
                hid_size: hidden dimension of encoder network

            TODO:
                1. z_mean: the output of a neural network that takes the state as input,
                    has output dimension z_size, n_layers layers, and hidden 
                    dimension hid_size
                2. z_logstd: a trainable variable, initialized to 0
                    shape (z_size,)

            Hint: use build_mlp
        """
        z_mean = build_mlp(state, z_size, scope, n_layers, hid_size)
        z_logstd = tf.get_variable('z_logstd', shape=z_size, trainable=True,
                                   initializer=tf.constant_initializer(value=0.))
        return tfp.distributions.MultivariateNormalDiag(loc=z_mean, scale_diag=tf.exp(z_logstd)) 
开发者ID:xuwd11,项目名称:cs294-112_hws,代码行数:27,代码来源:density_model.py

示例11: init_vq_bottleneck

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def init_vq_bottleneck(bottleneck_size, hidden_size):
  """Get lookup table for VQ bottleneck."""
  means = tf.get_variable(
      name="means",
      shape=[bottleneck_size, hidden_size],
      initializer=tf.uniform_unit_scaling_initializer())
  ema_count = tf.get_variable(
      name="ema_count",
      shape=[bottleneck_size],
      initializer=tf.constant_initializer(0),
      trainable=False)
  with tf.colocate_with(means):
    ema_means = tf.get_variable(
        name="ema_means",
        initializer=means.initialized_value(),
        trainable=False)

  return means, ema_means, ema_count 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:20,代码来源:transformer_nat.py

示例12: conv

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC'):
    if data_format == 'NHWC':
        channel_ax = 3
        strides = [1, stride, stride, 1]
        bshape = [1, 1, 1, nf]
    elif data_format == 'NCHW':
        channel_ax = 1
        strides = [1, 1, stride, stride]
        bshape = [1, nf, 1, 1]
    else:
        raise NotImplementedError
    nin = x.get_shape()[channel_ax].value
    wshape = [rf, rf, nin, nf]
    with tf.variable_scope(scope):
        w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
        b = tf.get_variable("b",bshape, initializer=tf.constant_initializer(0.0))
        return b + tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:19,代码来源:utils.py

示例13: lstm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
    nbatch, nin = [v.value for v in xs[0].get_shape()]
    nsteps = len(xs)
    with tf.variable_scope(scope):
        wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
        wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
        b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))

    c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
    for idx, (x, m) in enumerate(zip(xs, ms)):
        c = c*(1-m)
        h = h*(1-m)
        z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
        i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
        i = tf.nn.sigmoid(i)
        f = tf.nn.sigmoid(f)
        o = tf.nn.sigmoid(o)
        u = tf.tanh(u)
        c = f*c + i*u
        h = o*tf.tanh(c)
        xs[idx] = h
    s = tf.concat(axis=1, values=[c, h])
    return xs, s 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:25,代码来源:utils.py

示例14: conv2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def conv2d(self, input_, n_filters, k_size, padding='same'):
        if not self.cfg.weight_scale:
            return tf.layers.conv2d(input_, n_filters, k_size, padding=padding)

        n_feats_in = input_.get_shape().as_list()[-1]
        fan_in = k_size * k_size * n_feats_in
        c = tf.constant(np.sqrt(2. / fan_in), dtype=tf.float32)
        kernel_init = tf.random_normal_initializer(stddev=1.)
        w_shape = [k_size, k_size, n_feats_in, n_filters]
        w = tf.get_variable('kernel', shape=w_shape, initializer=kernel_init)
        w = c * w
        strides = [1, 1, 1, 1]
        net = tf.nn.conv2d(input_, w, strides, padding=padding.upper())
        b = tf.get_variable('bias', [n_filters],
                            initializer=tf.constant_initializer(0.))
        net = tf.nn.bias_add(net, b)
        return net 
开发者ID:preritj,项目名称:progressive_growing_of_GANs,代码行数:19,代码来源:net.py

示例15: conv

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant_initializer [as 别名]
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
    if data_format == 'NHWC':
        channel_ax = 3
        strides = [1, stride, stride, 1]
        bshape = [1, 1, 1, nf]
    elif data_format == 'NCHW':
        channel_ax = 1
        strides = [1, 1, stride, stride]
        bshape = [1, nf, 1, 1]
    else:
        raise NotImplementedError
    bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
    nin = x.get_shape()[channel_ax].value
    wshape = [rf, rf, nin, nf]
    with tf.variable_scope(scope):
        w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
        b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
        if not one_dim_bias and data_format == 'NHWC':
            b = tf.reshape(b, bshape)
        return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:22,代码来源:utils.py


注:本文中的tensorflow.constant_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。