当前位置: 首页>>代码示例>>Python>>正文


Python functions.mean_squared_error方法代码示例

本文整理汇总了Python中chainer.functions.mean_squared_error方法的典型用法代码示例。如果您正苦于以下问题:Python functions.mean_squared_error方法的具体用法?Python functions.mean_squared_error怎么用?Python functions.mean_squared_error使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.functions的用法示例。


在下文中一共展示了functions.mean_squared_error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: linear_train

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def linear_train(train_data, train_target, n_epochs=200):
    for _ in range(n_epochs):
        # Get the result of the forward pass.
        output = linear_forward(train_data)

        # Calculate the loss between the training data and target data.
        loss = F.mean_squared_error(train_target, output)

        # Zero all gradients before updating them.
        linear_function.zerograds()

        # Calculate and update all gradients.
        loss.backward()

        # Use the optmizer to move all parameters of the network
        # to values which will reduce the loss.
        optimizer.update() 
开发者ID:floydhub,项目名称:dockerfiles,代码行数:19,代码来源:linear_regression.py

示例2: _update_vf

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def _update_vf(self, dataset):
        """Update the value function using a given dataset.

        The value function is updated via SGD to minimize TD(lambda) errors.
        """

        xp = self.vf.xp

        assert 'state' in dataset[0]
        assert 'v_teacher' in dataset[0]

        dataset_iter = chainer.iterators.SerialIterator(
            dataset, self.vf_batch_size)

        while dataset_iter.epoch < self.vf_epochs:
            batch = dataset_iter.__next__()
            states = batch_states([b['state'] for b in batch], xp, self.phi)
            if self.obs_normalizer:
                states = self.obs_normalizer(states, update=False)
            vs_teacher = xp.array(
                [b['v_teacher'] for b in batch], dtype=xp.float32)
            vs_pred = self.vf(states)
            vf_loss = F.mean_squared_error(vs_pred, vs_teacher[..., None])
            self.vf_optimizer.update(lambda: vf_loss) 
开发者ID:chainer,项目名称:chainerrl,代码行数:26,代码来源:trpo.py

示例3: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def __call__(self, inputs):
        pos_x, pos_y, offset_x, ego_x, ego_y, pose_x, pose_y = self._prepare_input(inputs)
        batch_size, past_len, _ = pos_x.shape

        h_pos = self.pos_encoder(pos_x)
        h_ego = self.ego_encoder(ego_x)
        h = F.concat((h_pos, h_ego), axis=1)  # (B, C, 2)
        h = self.inter(h)
        h_pos = self.pos_decoder(h)
        pred_y = self.last(h_pos)  # (B, 10, C+6+28)
        pred_y = F.swapaxes(pred_y, 1, 2)
        pred_y = pred_y[:, :pos_y.shape[1], :]
        loss = F.mean_squared_error(pred_y, pos_y)

        pred_y = pred_y + F.broadcast_to(F.expand_dims(offset_x, 1), pred_y.shape)
        pred_y = cuda.to_cpu(pred_y.data) * self._std + self._mean
        return loss, pred_y, None 
开发者ID:takumayagi,项目名称:fpl,代码行数:19,代码来源:cnn.py

示例4: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def __call__(self, x, t=None, w=None):
        # t, w is on host.

        # Forward network
        alpha = self.forward(x)

        if t is None:
            assert not chainer.config.train
            return

        # Weighted mean squared error
        # TODO: Do more tests
#         loss = F.mean(F.squared_error(alpha, t) * w)
        loss = F.mean_squared_error(alpha, t)

        if np.isnan(float(loss.data)):
            raise ValueError('Loss is nan.')
        chainer.report({'loss': loss}, self)

        return loss 
开发者ID:takiyu,项目名称:portrait_matting,代码行数:22,代码来源:fcn8s_matting.py

示例5: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def forward(self, state, action, Reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        Q = self.Q_func(s)  # Get Q-value

        # Generate Target Signals
        max_Q_dash_ = self.Q_func(s_dash)
        tmp = list(map(np.max, max_Q_dash_.data.get()))
        max_Q_dash = np.asanyarray(tmp, dtype=np.float32)
        target = np.asanyarray(Q.data.get(), dtype=np.float32)

        for i in xrange(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = np.sign(Reward[i]) + self.gamma * max_Q_dash[i]
            else:
                tmp_ = np.sign(Reward[i])
            target[i, self.action_to_index(action[i])] = tmp_

        loss = F.mean_squared_error(Variable(cuda.to_gpu(target)), Q)
        return loss, Q 
开发者ID:ugo-nama-kun,项目名称:DQN-chainer,代码行数:24,代码来源:dqn_agent_nips.py

示例6: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def __call__(self, x, t=None):
        self.clear()
        #x = Variable(x_data)  # x_data.astype(np.float32)

        h = F.leaky_relu(self.conv1(x), slope=0.1)
        h = F.leaky_relu(self.conv2(h), slope=0.1)
        h = F.leaky_relu(self.conv3(h), slope=0.1)
        h = F.leaky_relu(self.conv4(h), slope=0.1)
        h = F.leaky_relu(self.conv5(h), slope=0.1)
        h = F.leaky_relu(self.conv6(h), slope=0.1)
        h = F.clipped_relu(self.conv7(h), z=1.0)
        if self.train:
            self.loss = F.mean_squared_error(h, t)
            return self.loss
        else:
            return h 
开发者ID:corochann,项目名称:SeRanet,代码行数:18,代码来源:basic_cnn_tail.py

示例7: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def __call__(self, x, t):
        y = self.predictor(x)

        if self.loss == "euclidean":
            return F.mean_squared_error(y, t)

        elif self.loss == "sdtw":
            loss = 0
            for i in range(y.shape[0]):
                y_i = F.reshape(y[i], (-1,1))
                t_i = F.reshape(t[i], (-1,1))
                loss += SoftDTWLoss(self.gamma)(y_i, t_i)
            return loss

        else:
            raise ValueError("Unknown loss") 
开发者ID:mblondel,项目名称:soft-dtw,代码行数:18,代码来源:plot_chainer_MLP.py

示例8: _lossfun

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def _lossfun(self,
                 entropy, vs_pred, log_probs,
                 vs_pred_old, log_probs_old,
                 advs, vs_teacher):

        prob_ratio = F.exp(log_probs - log_probs_old)

        loss_policy = - F.mean(F.minimum(
            prob_ratio * advs,
            F.clip(prob_ratio, 1 - self.clip_eps, 1 + self.clip_eps) * advs))

        if self.clip_eps_vf is None:
            loss_value_func = F.mean_squared_error(vs_pred, vs_teacher)
        else:
            loss_value_func = F.mean(F.maximum(
                F.square(vs_pred - vs_teacher),
                F.square(_elementwise_clip(vs_pred,
                                           vs_pred_old - self.clip_eps_vf,
                                           vs_pred_old + self.clip_eps_vf)
                         - vs_teacher)
            ))
        loss_entropy = -F.mean(entropy)

        self.value_loss_record.append(float(loss_value_func.array))
        self.policy_loss_record.append(float(loss_policy.array))

        loss = (
            loss_policy
            + self.value_func_coef * loss_value_func
            + self.entropy_coef * loss_entropy
        )

        return loss 
开发者ID:chainer,项目名称:chainerrl,代码行数:35,代码来源:ppo.py

示例9: update_q_func

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def update_q_func(self, batch):
        """Compute loss for a given Q-function."""

        batch_next_state = batch['next_state']
        batch_rewards = batch['reward']
        batch_terminal = batch['is_state_terminal']
        batch_state = batch['state']
        batch_actions = batch['action']
        batch_discount = batch['discount']

        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            next_action_distrib = self.policy(batch_next_state)
            next_actions, next_log_prob =\
                next_action_distrib.sample_with_log_prob()
            next_q1 = self.target_q_func1(batch_next_state, next_actions)
            next_q2 = self.target_q_func2(batch_next_state, next_actions)
            next_q = F.minimum(next_q1, next_q2)
            entropy_term = self.temperature * next_log_prob[..., None]
            assert next_q.shape == entropy_term.shape

            target_q = batch_rewards + batch_discount * \
                (1.0 - batch_terminal) * F.flatten(next_q - entropy_term)

        predict_q1 = F.flatten(self.q_func1(batch_state, batch_actions))
        predict_q2 = F.flatten(self.q_func2(batch_state, batch_actions))

        loss1 = 0.5 * F.mean_squared_error(target_q, predict_q1)
        loss2 = 0.5 * F.mean_squared_error(target_q, predict_q2)

        # Update stats
        self.q1_record.extend(cuda.to_cpu(predict_q1.array))
        self.q2_record.extend(cuda.to_cpu(predict_q2.array))
        self.q_func1_loss_record.append(float(loss1.array))
        self.q_func2_loss_record.append(float(loss2.array))

        self.q_func1_optimizer.update(lambda: loss1)
        self.q_func2_optimizer.update(lambda: loss2) 
开发者ID:chainer,项目名称:chainerrl,代码行数:39,代码来源:soft_actor_critic.py

示例10: update_q_func

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def update_q_func(self, batch):
        """Compute loss for a given Q-function."""

        batch_next_state = batch['next_state']
        batch_rewards = batch['reward']
        batch_terminal = batch['is_state_terminal']
        batch_state = batch['state']
        batch_actions = batch['action']
        batch_discount = batch['discount']

        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            next_actions = self.target_policy_smoothing_func(
                self.target_policy(batch_next_state).sample().array)
            next_q1 = self.target_q_func1(batch_next_state, next_actions)
            next_q2 = self.target_q_func2(batch_next_state, next_actions)
            next_q = F.minimum(next_q1, next_q2)

            target_q = batch_rewards + batch_discount * \
                (1.0 - batch_terminal) * F.flatten(next_q)

        predict_q1 = F.flatten(self.q_func1(batch_state, batch_actions))
        predict_q2 = F.flatten(self.q_func2(batch_state, batch_actions))

        loss1 = F.mean_squared_error(target_q, predict_q1)
        loss2 = F.mean_squared_error(target_q, predict_q2)

        # Update stats
        self.q1_record.extend(cuda.to_cpu(predict_q1.array))
        self.q2_record.extend(cuda.to_cpu(predict_q2.array))
        self.q_func1_loss_record.append(float(loss1.array))
        self.q_func2_loss_record.append(float(loss2.array))

        self.q_func1_optimizer.update(lambda: loss1)
        self.q_func2_optimizer.update(lambda: loss2) 
开发者ID:chainer,项目名称:chainerrl,代码行数:36,代码来源:td3.py

示例11: compute_value_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def compute_value_loss(y, t, clip_delta=True, batch_accumulator='mean'):
    """Compute a loss for value prediction problem.

    Args:
        y (Variable or ndarray): Predicted values.
        t (Variable or ndarray): Target values.
        clip_delta (bool): Use the Huber loss function if set True.
        batch_accumulator (str): 'mean' or 'sum'. 'mean' will use the mean of
            the loss values in a batch. 'sum' will use the sum.
    Returns:
        (Variable) scalar loss
    """
    assert batch_accumulator in ('mean', 'sum')
    y = F.reshape(y, (-1, 1))
    t = F.reshape(t, (-1, 1))
    if clip_delta:
        loss_sum = F.sum(F.huber_loss(y, t, delta=1.0))
        if batch_accumulator == 'mean':
            loss = loss_sum / y.shape[0]
        elif batch_accumulator == 'sum':
            loss = loss_sum
    else:
        loss_mean = F.mean_squared_error(y, t) / 2
        if batch_accumulator == 'mean':
            loss = loss_mean
        elif batch_accumulator == 'sum':
            loss = loss_mean * y.shape[0]
    return loss 
开发者ID:chainer,项目名称:chainerrl,代码行数:30,代码来源:dqn.py

示例12: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def __call__(self, x):
        y = self.extract_feature(x)
        e = F.mean_squared_error(self.y0, y)
        tv = self.tv_norm(x)
        self.loss = (self.lambda_euc * float(self.y0.data.size) * e +
                     self.args.lambda_tv * tv +
                     self.args.lambda_lp * F.sum(x ** self.args.p))

        return self.loss 
开发者ID:mitmul,项目名称:ssai-cnn,代码行数:11,代码来源:invert.py

示例13: z_generate

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def z_generate(z, G, copy_paste_var, nz, gpu):
    z = np.reshape(z, (nz, 1, 1)).astype(np.float32)
    z_var = Variable(chainer.dataset.concat_examples([z], gpu))

    loss = F.mean_squared_error(copy_paste_var, G(z_var))

    # Backward
    loss.backward()
    # Transfer loss & diff from GPU to CPU
    loss = cuda.to_cpu(loss.data)
    dz = np.squeeze(cuda.to_cpu(z_var.grad))

    return loss, np.asarray(dz.flatten(), dtype=np.float64) 
开发者ID:wuhuikai,项目名称:GP-GAN,代码行数:15,代码来源:gp_gan.py

示例14: g_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def g_loss(self, errG, fake, gtv):
        l2_loss = F.mean_squared_error(fake, gtv)
        loss = (1 - self.args.l2_weight) * errG + self.args.l2_weight * l2_loss

        chainer.report({'loss': loss}, self.G)
        chainer.report({'l2_loss': l2_loss}, self.G)
        chainer.report({'gan_loss': errG}, self.G)

        return loss 
开发者ID:wuhuikai,项目名称:GP-GAN,代码行数:11,代码来源:updater.py

示例15: update

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import mean_squared_error [as 别名]
def update(Q, target_Q, policy, target_policy, opt_Q, opt_policy,
           samples, gamma=0.99):
    """Update a Q-function and a policy."""
    dtype = chainer.get_dtype()
    xp = Q.xp
    obs = xp.asarray([sample[0] for sample in samples], dtype=dtype)
    action = xp.asarray([sample[1] for sample in samples], dtype=dtype)
    reward = xp.asarray([sample[2] for sample in samples], dtype=dtype)
    done = xp.asarray([sample[3] for sample in samples], dtype=dtype)
    obs_next = xp.asarray([sample[4] for sample in samples], dtype=dtype)

    def update_Q():
        # Predicted values: Q(s,a)
        y = F.squeeze(Q(obs, action), axis=1)
        # Target values: r + gamma * Q(s,policy(s))
        with chainer.no_backprop_mode():
            next_q = F.squeeze(target_Q(obs_next, target_policy(obs_next)),
                               axis=1)
            target = reward + gamma * (1 - done) * next_q
        loss = F.mean_squared_error(y, target)
        Q.cleargrads()
        loss.backward()
        opt_Q.update()

    def update_policy():
        # Maximize Q(s,policy(s))
        q = Q(obs, policy(obs))
        q = q[:]  # Avoid https://github.com/chainer/chainer/issues/2744
        loss = - F.mean(q)
        policy.cleargrads()
        loss.backward()
        opt_policy.update()

    update_Q()
    update_policy() 
开发者ID:chainer,项目名称:chainer,代码行数:37,代码来源:ddpg_pendulum.py


注:本文中的chainer.functions.mean_squared_error方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。