当前位置: 首页>>代码示例>>Python>>正文


Python functions.square方法代码示例

本文整理汇总了Python中chainer.functions.square方法的典型用法代码示例。如果您正苦于以下问题:Python functions.square方法的具体用法?Python functions.square怎么用?Python functions.square使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.functions的用法示例。


在下文中一共展示了functions.square方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _lossfun

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def _lossfun(self,
                 entropy, vs_pred, log_probs,
                 vs_pred_old, log_probs_old,
                 advs, vs_teacher):

        prob_ratio = F.exp(log_probs - log_probs_old)

        loss_policy = - F.mean(F.minimum(
            prob_ratio * advs,
            F.clip(prob_ratio, 1 - self.clip_eps, 1 + self.clip_eps) * advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(F.maximum(
                F.square(vs_pred - vs_teacher),
                F.square(_elementwise_clip(vs_pred,
                                           vs_pred_old - self.clip_eps_vf,
                                           vs_pred_old + self.clip_eps_vf)
                         - vs_teacher)
            ))
        loss_entropy = -F.mean(entropy)

        self.value_loss_record.append(float(loss_value_func.array))
        self.policy_loss_record.append(float(loss_policy.array))

        loss = (
            loss_policy
            + self.value_func_coef * loss_value_func
            + self.entropy_coef * loss_entropy
        )

        return loss 
开发者ID:chainer,项目名称:chainerrl,代码行数:35,代码来源:ppo.py

示例2: compute_weighted_value_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def compute_weighted_value_loss(y, t, weights,
                                clip_delta=True, batch_accumulator='mean'):
    """Compute a loss for value prediction problem.

    Args:
        y (Variable or ndarray): Predicted values.
        t (Variable or ndarray): Target values.
        weights (ndarray): Weights for y, t.
        clip_delta (bool): Use the Huber loss function if set True.
        batch_accumulator (str): 'mean' will divide loss by batchsize
    Returns:
        (Variable) scalar loss
    """
    assert batch_accumulator in ('mean', 'sum')
    y = F.reshape(y, (-1, 1))
    t = F.reshape(t, (-1, 1))
    if clip_delta:
        losses = F.huber_loss(y, t, delta=1.0)
    else:
        losses = F.square(y - t) / 2
    losses = F.reshape(losses, (-1,))
    loss_sum = F.sum(losses * weights)
    if batch_accumulator == 'mean':
        loss = loss_sum / y.shape[0]
    elif batch_accumulator == 'sum':
        loss = loss_sum
    return loss 
开发者ID:chainer,项目名称:chainerrl,代码行数:29,代码来源:dqn.py

示例3: _get_mean_and_std

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def _get_mean_and_std(self):
        mean = self.sum / self.count
        std = np.sqrt(np.maximum(self.sum2 / self.count - np.square(mean), 0.01))
        return mean, std 
开发者ID:openai,项目名称:EPG,代码行数:6,代码来源:utils.py

示例4: update

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def update(self, x):
        self.sum += np.sum(x, axis=0)
        self.sum2 += np.sum(np.square(x), axis=0)
        self.count += x.shape[0] 
开发者ID:openai,项目名称:EPG,代码行数:6,代码来源:utils.py

示例5: gaussian_kl

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def gaussian_kl(params0, params1):
    (mean0, logstd0), (mean1, logstd1) = params0, params1
    assert mean0.shape == logstd0.shape == mean1.shape == logstd1.shape
    return F.sum(
        logstd1 - logstd0 + (F.square(F.exp(logstd0)) + F.square(mean0 - mean1)) / (
                2.0 * F.square(F.exp(logstd1))) - 0.5,
        axis=1
    ) 
开发者ID:openai,项目名称:EPG,代码行数:10,代码来源:utils.py

示例6: _compute_ppo_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def _compute_ppo_loss(self, obs, acts, at, vt, old_params):
        params = self._pi_f(obs)
        cv = F.flatten(self._vf_f(obs))
        ratio = F.exp(self._logp(params, acts) - self._logp(old_params, acts))
        surr1 = ratio * at
        surr2 = F.clip(ratio, 1 - self._ppo_clipparam, 1 + self._ppo_clipparam) * at
        ppo_surr_loss = (
                -sym_mean(F.minimum(surr1, surr2))
                + self._ppo_klcoeff * sym_mean(self.kl(old_params, params))
                + sym_mean(F.square(cv - vt))
        )
        return ppo_surr_loss 
开发者ID:openai,项目名称:EPG,代码行数:14,代码来源:agents.py

示例7: _pi_logp

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def _pi_logp(self, obs, acts):
        mean, logstd = self._pi_f(obs)
        return (
                - 0.5 * np.log(2.0 * np.pi) * acts.shape[1]
                - 0.5 * F.sum(F.square((acts - mean) / (F.exp(logstd)) + 1e-8), axis=1)
                - F.sum(logstd, axis=1)
        ) 
开发者ID:openai,项目名称:EPG,代码行数:9,代码来源:agents.py

示例8: test_chx_array_view

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def test_chx_array_view(self):
        from_connected = self.from_connected
        calculate_by_variable = self.calculate_by_variable
        backward_by_variable = self.backward_by_variable

        # Create an original array, either connected or disconnected.
        a = chainerx.array([1, 2], np.float32)
        if from_connected:
            a.require_grad()

        # Wrap with a variable
        x = chainer.Variable(a, requires_grad=True)
        x_arr = x.chx_array  # Unwrap a view

        assert x_arr.is_backprop_required()
        assert not x_arr.is_grad_required()
        assert a is not x_arr  # x_arr is a view of a

        if calculate_by_variable:
            # Calculate by variable
            y = F.square(x_arr)
            # Unwrap the output array
            y_arr = y.chx_array
            y_arr.grad = chainerx.ones_like(y.array)
        else:
            # Calculate by array
            y_arr = chainerx.square(x_arr)
            y_arr.grad = chainerx.ones_like(y_arr)
            # Wrap y with variable
            y = chainer.Variable(y_arr, requires_grad=True)

        # Backward
        if backward_by_variable:
            y.backward()
        else:
            y_arr.backward()

        # x.grad is set
        assert x.grad is not None
        chainerx.testing.assert_array_equal_ex(
            chainerx.array([2, 4], np.float32), x.grad) 
开发者ID:chainer,项目名称:chainer,代码行数:43,代码来源:test_variable.py

示例9: get_bbox_side_lengths

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def get_bbox_side_lengths(self, grids):
        x0, x1, x2, y0, y1, y2 = self.get_corners(grids)

        width = F.sqrt(
            F.square(x1 - x0) + F.square(y1 - y0)
        )

        height = F.sqrt(
            F.square(x2 - x0) + F.square(y2 - y0)
        )
        return width, height 
开发者ID:Bartzi,项目名称:see,代码行数:13,代码来源:loss_metrics.py

示例10: _smooth_l1_loss_base

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def _smooth_l1_loss_base(x, t, in_weight, sigma):
    sigma2 = sigma ** 2
    diff = in_weight * (x - t)
    abs_diff = F.absolute(diff)
    flag = (abs_diff.array < (1. / sigma2)).astype(np.float32)

    y = (flag * (sigma2 / 2.) * F.square(diff) +
         (1 - flag) * (abs_diff - 0.5 / sigma2))
    return F.sum(y, axis=1) 
开发者ID:chainer,项目名称:chainercv,代码行数:11,代码来源:light_head_rcnn_train_chain.py

示例11: _smooth_l1_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def _smooth_l1_loss(x, t, in_weight, sigma):
    sigma2 = sigma ** 2
    diff = in_weight * (x - t)
    abs_diff = F.absolute(diff)
    flag = (abs_diff.array < (1. / sigma2)).astype(np.float32)

    y = (flag * (sigma2 / 2.) * F.square(diff) +
         (1 - flag) * (abs_diff - 0.5 / sigma2))

    return F.sum(y) 
开发者ID:chainer,项目名称:chainercv,代码行数:12,代码来源:faster_rcnn_train_chain.py

示例12: get_bbox_side_lengths

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def get_bbox_side_lengths(self, grids, image_size):
        x0, x1, x2, _, y0, y1, y2, _ = self.get_corners(grids, image_size)

        width = F.sqrt(
            F.square(x1 - x0) + F.square(y1 - y0)
        )

        height = F.sqrt(
            F.square(x2 - x0) + F.square(y2 - y0)
        )
        return width, height 
开发者ID:Bartzi,项目名称:kiss,代码行数:13,代码来源:utils.py

示例13: _smooth_l1_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def _smooth_l1_loss(x, t, in_weight, sigma):
    sigma2 = sigma ** 2
    diff = in_weight * (x - t)
    abs_diff = F.absolute(diff)
    flag = (abs_diff.data < (1. / sigma2)).astype(np.float32)

    y = (flag * (sigma2 / 2.) * F.square(diff) +
         (1 - flag) * (abs_diff - 0.5 / sigma2))

    return F.sum(y) 
开发者ID:wkentaro,项目名称:chainer-mask-rcnn,代码行数:12,代码来源:mask_rcnn_train_chain.py

示例14: compute_weighted_value_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def compute_weighted_value_loss(y, t, weights,
                                mask, clip_delta=True,
                                batch_accumulator='mean'):
    """Compute a loss for value prediction problem.

    Args:
        y (Variable or ndarray): Predicted values.
        t (Variable or ndarray): Target values.
        weights (ndarray): Weights for y, t.
        mask (ndarray): Mask to use for loss calculation
        clip_delta (bool): Use the Huber loss function if set True.
        batch_accumulator (str): 'mean' will divide loss by batchsize
    Returns:
        (Variable) scalar loss
    """
    assert batch_accumulator in ('mean', 'sum')
    y = F.reshape(y, (-1, 1))
    t = F.reshape(t, (-1, 1))
    if clip_delta:
        losses = F.huber_loss(y, t, delta=1.0)
    else:
        losses = F.square(y - t) / 2
    losses = F.reshape(losses, (-1,))
    loss_sum = F.sum(losses * weights * mask)
    if batch_accumulator == 'mean':
        loss = loss_sum / max(n_mask, 1.0)
    elif batch_accumulator == 'sum':
        loss = loss_sum
    return loss 
开发者ID:minerllabs,项目名称:baselines,代码行数:31,代码来源:dqfd.py

示例15: get_var_line_length_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import square [as 别名]
def get_var_line_length_loss(vertices, faces):
    vertices = vertices[faces]
    num_faces = vertices.shape[0]
    v01 = vertices[:, 1] - vertices[:, 0]
    v12 = vertices[:, 2] - vertices[:, 1]
    v20 = vertices[:, 0] - vertices[:, 2]
    n01_square = cf.sum(cf.square(v01), axis=1)
    n12_square = cf.sum(cf.square(v12), axis=1)
    n20_square = cf.sum(cf.square(v20), axis=1)
    n01 = cf.sqrt(n01_square)
    n12 = cf.sqrt(n12_square)
    n20 = cf.sqrt(n20_square)
    mean_of_square = (cf.sum(n01_square) + cf.sum(n12_square) + cf.sum(n20_square)) / (3. * num_faces)
    square_of_mean = cf.square((cf.sum(n01) + cf.sum(n12) + cf.sum(n20)) / (3. * num_faces))
    return (mean_of_square - square_of_mean) * num_faces 
开发者ID:hiroharu-kato,项目名称:deep_dream_3d,代码行数:17,代码来源:main.py


注:本文中的chainer.functions.square方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。