当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.Op方法代码示例

本文整理汇总了Python中tensorflow.Op方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Op方法的具体用法?Python tensorflow.Op怎么用?Python tensorflow.Op使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.Op方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Op [as 别名]
def __init__(self, inpt, n_hidden, n_output, transfer_hidden=tf.nn.elu, transfer=None,
                 hidden_weight_init=None, hidden_bias_init=None,weight_init=None, bias_init=None,
                 name=None):
        """
        :param inpt: inpt tensor
        :param n_hidden: scalar ot list, number of hidden units
        :param n_output: scalar, number of output units
        :param transfer_hidden: scalar or list, transfers for hidden units. If list, len must be == len(n_hidden).
        :param transfer: tf.Op or None
        """

        self.n_hidden = nest.flatten(n_hidden)
        self.n_output = n_output
        self.hidden_weight_init = hidden_weight_init
        self.hidden_bias_init = hidden_bias_init

        transfer_hidden = nest.flatten(transfer_hidden)
        if len(transfer_hidden) == 1:
            transfer_hidden *= len(self.n_hidden)
        self.transfer_hidden = transfer_hidden

        self.transfer = transfer
        super(MLP, self).__init__(inpt, name, weight_init, bias_init) 
开发者ID:akosiorek,项目名称:hart,代码行数:25,代码来源:nn.py

示例2: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Op [as 别名]
def __init__(self, sigma_e, tau, mode="mean", **kwargs):
    """
    Args:
      obs_shape: list. Shape of the observation tensor
      n_actions: int. Number of possible actions
      opt_conf: rltf.optimizers.OptimizerConf. Configuration for the optimizer
      gamma: float. Discount factor
      sigma_e: float. Standard deviation of the noise observation for BLR
      tau: float. Standard deviation for the weight prior in BLR
      huber_loss: bool. Whether to use huber loss or not
    """

    super().__init__(**kwargs)

    self.agent_blr  = [BLR(tau=tau, sigma_e=sigma_e, mode=mode)   for _ in range(self.n_actions)]
    self.target_blr = [BLR(tau=tau, sigma_e=sigma_e, mode="mean") for _ in range(self.n_actions)]

    # Custom TF Tensors and Ops
    self._target    = None    # BLR target
    self._phi       = None    # BLR features
    self.train_blr  = None    # Op for updating the BLR weight posterior
    self.reset_blr  = None    # Op for reseting the BLR to initial weights
    self.a_var      = None    # Tensor with BLR var 
开发者ID:nikonikolov,项目名称:rltf,代码行数:25,代码来源:bdqn.py

示例3: _build_train_blr_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Op [as 别名]
def _build_train_blr_op(self, phi, target, name):
    """Build the Bayesian Linear Regression ops and estimates
    Args:
      phi: tf.Tensor, shape: `[None, dim_phi]`. The feature tensor
      target: tf.Tensor, as returned by `self._compute_target()`; `[None]`
    Returns:
      tf.Op: The train Op for BLR
    """
    target = tf.expand_dims(target, axis=-1)

    def train_blr(blr, a):
      """Given a BLR instance, select only the examples for the corresponding action"""
      mask = tf.expand_dims(tf.equal(self.act_t_ph, a), axis=-1)
      mask = tf.cast(mask, tf.float32)  # out shape: [None]
      X = phi * mask                    # out shape: [None, dim_phi]
      y = target * mask                 # out shape: [None, 1]
      return blr.train(X, y)

    w_updates = [train_blr(blr, i) for i, blr in enumerate(self.agent_blr)]

    return tf.group(*w_updates, name=name) 
开发者ID:nikonikolov,项目名称:rltf,代码行数:23,代码来源:bdqn.py

示例4: train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Op [as 别名]
def train(self, X, y):
    """Compute the weight posteriror of Bayesian Linear Regression
    Args:
      X: tf.Tensor, `shape=[None, D]`. The feature matrix
      y: tf.Tensor, `shape=[None, 1]`. The correct outputs
    Returns:
      tf.Op which performs the update operation
    """
    X = self._cast_input(X)
    y = self._cast_input(y)

    # Compute the posterior precision matrix
    w_Lambda = self.w_Lambda + self.beta * tf.matmul(X, X, transpose_a=True)

    # Compute the posterior covariance matrix
    X_norm  = 1.0 / self.sigma * X
    w_Sigma = tf_inv.woodburry_inverse(self.w_Sigma, tf.transpose(X_norm), X_norm)

    error = tf.losses.mean_squared_error(tf.matmul(w_Lambda, w_Sigma), tf.eye(self.w_dim))
    tf.summary.scalar("debug/BLR/inv_error", error)

    # Compute the posterior mean
    w_mu = tf.matmul(w_Sigma, self.beta * tf.matmul(X, y, True) + tf.matmul(self.w_Lambda, self.w_mu))

    return self._tf_update_params(w_mu, w_Sigma, w_Lambda) 
开发者ID:nikonikolov,项目名称:rltf,代码行数:27,代码来源:blr.py

示例5: masked_apply

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Op [as 别名]
def masked_apply(tensor, op, mask):
    """Applies `op` to tensor only at locations indicated by `mask` and sets the rest to zero.

    Similar to doing `tensor = tf.where(mask, op(tensor), tf.zeros_like(tensor))` but it behaves correctly
    when `op(tensor)` is NaN or inf while tf.where does not.

    :param tensor: tf.Tensor
    :param op: tf.Op
    :param mask: tf.Tensor with dtype == bool
    :return: tf.Tensor
    """
    chosen = tf.boolean_mask(tensor, mask)
    applied = op(chosen)
    idx = tf.to_int32(tf.where(mask))
    result = tf.scatter_nd(idx, applied, tf.shape(tensor))
    return result 
开发者ID:akosiorek,项目名称:attend_infer_repeat,代码行数:18,代码来源:prior.py

示例6: sigmoid

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Op [as 别名]
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


# Predefined loss functions
# Should take 2 tf.Ops: outputs and targets and should return tf.Op of loss
# Be carefull about dimentionality -- maybe tf.transpose(outputs) is needed 
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-TensorFlow-Second-Edition,代码行数:9,代码来源:utils.py

示例7: evaluate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Op [as 别名]
def evaluate(session, op_to_evaluate, feed_dict, batch_size):
        """ evaluate.

        Evaluate an operation with provided data dict using a batch size
        to save GPU memory.

        Args:
            session: `tf.Session`. Session for running operations.
            op_to_evaluate: `tf.Op`. Operation to be evaluated.
            feed_dict: `dict`. Data dictionary to feed op_to_evaluate.
            batch_size: `int`. Batch size to be used for evaluation.

        Ret:
            `float`. op_to_evaluate mean over all batches.

        """
        tflearn.is_training(False, session)
        n_test_samples = len(get_dict_first_element(feed_dict))
        batches = make_batches(n_test_samples, batch_size)
        index_array = np.arange(n_test_samples)
        avg = 0.0
        for i, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            feed_batch = {}
            for key in feed_dict:
                # Make batch for multi-dimensional data
                if np.ndim(feed_dict[key]) > 0:
                    feed_batch[key] = slice_array(feed_dict[key], batch_ids)
                else:
                    feed_batch[key] = feed_dict[key]
            avg += session.run(op_to_evaluate, feed_batch) / len(batches)
        return avg 
开发者ID:limbo018,项目名称:FRU,代码行数:34,代码来源:trainer.py

示例8: _tf_update_params

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Op [as 别名]
def _tf_update_params(self, w_mu, w_Sigma, w_Lambda):
    """
    Returns:
      tf.Op which performs an update on all weight parameters
    """
    mu_op     = tf.assign(self.w_mu,      w_mu)
    Sigma_op  = tf.assign(self.w_Sigma,   w_Sigma)
    Lambda_op = tf.assign(self.w_Lambda,  w_Lambda)
    return tf.group(mu_op, Sigma_op, Lambda_op) 
开发者ID:nikonikolov,项目名称:rltf,代码行数:11,代码来源:blr.py


注:本文中的tensorflow.Op方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。