当前位置: 首页>>代码示例>>Python>>正文


Python functions.expand_dims方法代码示例

本文整理汇总了Python中chainer.functions.expand_dims方法的典型用法代码示例。如果您正苦于以下问题:Python functions.expand_dims方法的具体用法?Python functions.expand_dims怎么用?Python functions.expand_dims使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.functions的用法示例。


在下文中一共展示了functions.expand_dims方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def __call__(self, state):
        h = state
        for layer in self.hidden_layers:
            h = F.relu(layer(h))
        v = self.v(h)
        mu = self.mu(h)

        if self.scale_mu:
            mu = scale_by_tanh(mu, high=self.action_space.high,
                               low=self.action_space.low)

        mat_diag = F.exp(self.mat_diag(h))
        if hasattr(self, 'mat_non_diag'):
            mat_non_diag = self.mat_non_diag(h)
            tril = lower_triangular_matrix(mat_diag, mat_non_diag)
            mat = F.matmul(tril, tril, transb=True)
        else:
            mat = F.expand_dims(mat_diag ** 2, axis=2)
        return QuadraticActionValue(
            mu, mat, v, min_action=self.action_space.low,
            max_action=self.action_space.high) 
开发者ID:chainer,项目名称:chainerrl,代码行数:23,代码来源:state_q_functions.py

示例2: _evaluate_psi_x_with_quantile_thresholds

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def _evaluate_psi_x_with_quantile_thresholds(psi_x, phi, f, taus):
    assert psi_x.ndim == 2
    batch_size, hidden_size = psi_x.shape
    assert taus.ndim == 2
    assert taus.shape[0] == batch_size
    n_taus = taus.shape[1]
    phi_taus = phi(taus)
    assert phi_taus.ndim == 3
    assert phi_taus.shape == (batch_size, n_taus, hidden_size)
    psi_x_b = F.broadcast_to(
        F.expand_dims(psi_x, axis=1), phi_taus.shape)
    h = psi_x_b * phi_taus
    h = F.reshape(h, (-1, hidden_size))
    assert h.shape == (batch_size * n_taus, hidden_size)
    h = f(h)
    assert h.ndim == 2
    assert h.shape[0] == batch_size * n_taus
    n_actions = h.shape[-1]
    h = F.reshape(h, (batch_size, n_taus, n_actions))
    return QuantileDiscreteActionValue(h) 
开发者ID:chainer,项目名称:chainerrl,代码行数:22,代码来源:iqn.py

示例3: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def __call__(self, x):
        y = self.branches(x)

        u = F.sum(y, axis=1)
        s = F.average_pooling_2d(u, ksize=u.shape[2:])
        z = self.fc1(s)
        w = self.fc2(z)

        batch = w.shape[0]
        w = F.reshape(w, shape=(batch, self.num_branches, self.out_channels))
        w = self.softmax(w)
        w = F.expand_dims(F.expand_dims(w, axis=3), axis=4)

        y = y * w
        y = F.sum(y, axis=1)
        return y 
开发者ID:osmr,项目名称:imgclsmob,代码行数:18,代码来源:sknet.py

示例4: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def __call__(self, x):
        heatmap = x
        vector_dim = 2
        batch = heatmap.shape[0]
        channels = heatmap.shape[1]
        in_size = x.shape[2:]
        heatmap_vector = F.reshape(heatmap, shape=(batch, channels, -1))
        indices = F.cast(F.expand_dims(F.argmax(heatmap_vector, axis=vector_dim), axis=vector_dim), np.float32)
        scores = F.max(heatmap_vector, axis=vector_dim, keepdims=True)
        scores_mask = (scores.array > 0.0).astype(np.float32)
        pts_x = (indices.array % in_size[1]) * scores_mask
        pts_y = (indices.array // in_size[1]) * scores_mask
        pts = F.concat((pts_x, pts_y, scores), axis=vector_dim).array
        for b in range(batch):
            for k in range(channels):
                hm = heatmap[b, k, :, :].array
                px = int(pts_x[b, k])
                py = int(pts_y[b, k])
                if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1):
                    pts[b, k, 0] += np.sign(hm[py, px + 1] - hm[py, px - 1]) * 0.25
                    pts[b, k, 1] += np.sign(hm[py + 1, px] - hm[py - 1, px]) * 0.25
        return pts 
开发者ID:osmr,项目名称:imgclsmob,代码行数:24,代码来源:common.py

示例5: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def __call__(self, inputs):
        pos_x, pos_y, offset_x, ego_x, ego_y, pose_x, pose_y = self._prepare_input(inputs)
        batch_size, past_len, _ = pos_x.shape

        h_pos = self.pos_encoder(pos_x)
        h_ego = self.ego_encoder(ego_x)
        h = F.concat((h_pos, h_ego), axis=1)  # (B, C, 2)
        h = self.inter(h)
        h_pos = self.pos_decoder(h)
        pred_y = self.last(h_pos)  # (B, 10, C+6+28)
        pred_y = F.swapaxes(pred_y, 1, 2)
        pred_y = pred_y[:, :pos_y.shape[1], :]
        loss = F.mean_squared_error(pred_y, pos_y)

        pred_y = pred_y + F.broadcast_to(F.expand_dims(offset_x, 1), pred_y.shape)
        pred_y = cuda.to_cpu(pred_y.data) * self._std + self._mean
        return loss, pred_y, None 
开发者ID:takumayagi,项目名称:fpl,代码行数:19,代码来源:cnn.py

示例6: process_trajectory

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def process_trajectory(self, l):
        """This is the time-dependent convolution operation, applied to a trajectory (in order).
        """
        shp = l.shape[0]
        # First dim is batchsize=1, then either 1 channel for 2d conv or n_feat channels
        # for 1d conv.
        l = F.expand_dims(l, axis=0)
        l = F.transpose(l, (0, 2, 1))
        l = self.traj_c0(l)
        l = F.leaky_relu(l)
        l = self.traj_c1(l)
        l = F.leaky_relu(l)
        l = F.sum(l, axis=(0, 2)) / l.shape[0] / l.shape[2]
        l = F.expand_dims(l, axis=0)
        l = self.traj_d0(l)
        l = F.tile(l, (shp, 1))
        return l 
开发者ID:openai,项目名称:EPG,代码行数:19,代码来源:losses.py

示例7: calc_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def calc_loss(self, x, t):
        batch_predictions, _, _ = x

        # concat all individual predictions and slice for each time step
        batch_predictions = F.concat([F.expand_dims(p, axis=0) for p in batch_predictions], axis=0)

        self.xp = cuda.get_array_module(batch_predictions[0], t)
        batch_size = t.shape[0]
        t = F.reshape(t, (batch_size, self.num_timesteps, -1))

        losses = []
        for predictions, labels in zip(F.separate(batch_predictions, axis=0), F.separate(t, axis=1)):
            batch_size, num_chars, num_classes = predictions.shape
            predictions = F.reshape(predictions, (batch_size * num_chars, num_classes))
            labels = F.reshape(labels, (-1,))
            losses.append(F.softmax_cross_entropy(predictions, labels))

        return sum(losses) 
开发者ID:Bartzi,项目名称:see,代码行数:20,代码来源:svhn_softmax_metrics.py

示例8: attend

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def attend(self, encoded_features):
        self.out_lstm.reset_state()
        transformed_encoded_features = F.concat([F.expand_dims(self.transform_encoded_features(feature), axis=1) for feature in encoded_features], axis=1)
        concat_encoded_features = F.concat([F.expand_dims(e, axis=1) for e in encoded_features], axis=1)

        lstm_output = self.xp.zeros_like(encoded_features[0])
        outputs = []
        for _ in range(self.num_labels):
            transformed_lstm_output = self.transform_out_lstm_feature(lstm_output)
            attended_feats = []
            for transformed_encoded_feature in F.separate(transformed_encoded_features, axis=1):
                attended_feat = transformed_encoded_feature + transformed_lstm_output
                attended_feat = F.tanh(attended_feat)
                attended_feats.append(self.generate_attended_feat(attended_feat))

            attended_feats = F.concat(attended_feats, axis=1)
            alphas = F.softmax(attended_feats, axis=1)

            lstm_input_feature = F.batch_matmul(alphas, concat_encoded_features, transa=True)
            lstm_input_feature = F.squeeze(lstm_input_feature, axis=1)
            lstm_output = self.out_lstm(lstm_input_feature)
            outputs.append(lstm_output)
        return outputs 
开发者ID:Bartzi,项目名称:see,代码行数:25,代码来源:fsns.py

示例9: decode_predictions

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def decode_predictions(self, predictions):
        # concat all individual predictions and slice for each time step
        predictions = F.concat([F.expand_dims(p, axis=0) for p in predictions], axis=0)

        words = []
        with cuda.get_device_from_array(predictions.data):
            for prediction in F.separate(predictions, axis=0):
                prediction = F.squeeze(prediction, axis=0)
                prediction = F.softmax(prediction, axis=1)
                prediction = self.xp.argmax(prediction.data, axis=1)
                word = self.loss_metrics.strip_prediction(prediction[self.xp.newaxis, ...])[0]
                if len(word) == 1 and word[0] == 0:
                    return ''

                word = "".join(map(self.loss_metrics.label_to_char, word))
                word = word.replace(chr(self.loss_metrics.char_map[str(self.loss_metrics.blank_symbol)]), '')
                words.append(word)

        text = " ".join(words)
        return text 
开发者ID:Bartzi,项目名称:see,代码行数:22,代码来源:svhn_bbox_plotter.py

示例10: predict

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def predict(self, images, return_raw_classification_result=False):
        feature_map = self.extract_features(images)
        memory = self.transformer.encode(feature_map, None)

        target = self.get_bos_token_array(len(images), self.num_words)
        target = self.xp.reshape(target, (-1, 1))
        char = None

        for _ in range(self.num_chars):
            decoded = self.transformer.decode(memory, None, target, self.mask)
            char = self.classifier(decoded, n_batch_axes=2)
            predicted_chars = self.decode_prediction(char)
            target = F.concat([target, predicted_chars[:, -1:]])

        result = F.expand_dims(target[:, 1:], 1)
        if return_raw_classification_result:
            return result, char
        return result 
开发者ID:Bartzi,项目名称:kiss,代码行数:20,代码来源:transformer_recognizer.py

示例11: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def __call__(self, v, h, label):
        v_t = self.vertical_conv_t(v)
        v_s = self.vertical_conv_s(v)
        to_vertical_t = self.v_to_h_conv_t(v_t)
        to_vertical_s = self.v_to_h_conv_s(v_s)

        # v_gate = self.vertical_gate_conv(v)
        # label bias is added to both vertical and horizontal conv
        # here we take only shape as it should be the same
        label = F.broadcast_to(F.expand_dims(F.expand_dims(self.label(label), -1), -1), v_t.shape)
        v_t, v_s = v_t + label, v_s + label
        v = F.tanh(v_t) * F.sigmoid(v_s)

        h_t = self.horizontal_conv_t(h)
        h_s = self.horizontal_conv_s(h)
        h_t, h_s = h_t + to_vertical_t + label, h_s + to_vertical_s + label
        h = self.horizontal_output(F.tanh(h_t) * F.sigmoid(h_s))

        return v, h 
开发者ID:rampage644,项目名称:wavenet,代码行数:21,代码来源:models.py

示例12: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def __call__(self, g, n_nodes):
        """main calculation

        Args:
            g: super node feature. shape (bs, hidden_dim_super)
            n_nodes (int): number of nodes

        Returns:
            g_trans: super --> original transmission
        """
        mb = len(g)
        # for local updates
        g_trans = self.F_super(g)
        # intermediate_h_super.shape == (mb, self.hidden_dim)
        g_trans = functions.tanh(g_trans)
        # intermediate_h_super.shape == (mb, 1, self.hidden_dim)
        g_trans = functions.expand_dims(g_trans, 1)
        # intermediate_h_super.shape == (mb, atom, self.hidden_dim)
        g_trans = functions.broadcast_to(g_trans,
                                         (mb, n_nodes, self.hidden_dim))
        return g_trans 
开发者ID:chainer,项目名称:chainer-chemistry,代码行数:23,代码来源:gwm.py

示例13: scale_by_tanh

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def scale_by_tanh(x, low, high):
    xp = cuda.get_array_module(x.array)
    scale = (high - low) / 2
    scale = xp.expand_dims(xp.asarray(scale, dtype=np.float32), axis=0)
    mean = (high + low) / 2
    mean = xp.expand_dims(xp.asarray(mean, dtype=np.float32), axis=0)
    return F.tanh(x) * scale + mean 
开发者ID:chainer,项目名称:chainerrl,代码行数:9,代码来源:state_q_functions.py

示例14: compute_eltwise_huber_quantile_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def compute_eltwise_huber_quantile_loss(y, t, taus, huber_loss_threshold=1.0):
    """Compute elementwise Huber losses for quantile regression.

    This is based on Algorithm 1 of https://arxiv.org/abs/1806.06923.

    This function assumes that, both of the two kinds of quantile thresholds,
    taus (used to compute y) and taus_prime (used to compute t) are iid samples
    from U([0,1]).

    Args:
        y (chainer.Variable): Quantile prediction from taus as a
            (batch_size, N)-shaped array.
        t (chainer.Variable or ndarray): Target values for quantile regression
            as a (batch_size, N_prime)-array.
        taus (ndarray): Quantile thresholds used to compute y as a
            (batch_size, N)-shaped array.
        huber_loss_threshold (float): Threshold of Huber loss. In the IQN
            paper, this is denoted by kappa.

    Returns:
        chainer.Variable: Loss (batch_size, N, N_prime)
    """
    assert y.shape == taus.shape
    # (batch_size, N) -> (batch_size, N, 1)
    y = F.expand_dims(y, axis=2)
    # (batch_size, N_prime) -> (batch_size, 1, N_prime)
    t = F.expand_dims(t, axis=1)
    # (batch_size, N) -> (batch_size, N, 1)
    taus = F.expand_dims(taus, axis=2)
    # Broadcast to (batch_size, N, N_prime)
    y, t, taus = F.broadcast(y, t, taus)
    I_delta = ((t.array - y.array) > 0).astype('f')
    eltwise_huber_loss = F.huber_loss(
        y, t, delta=huber_loss_threshold, reduce='no')
    eltwise_loss = abs(taus - I_delta) * eltwise_huber_loss
    return eltwise_loss 
开发者ID:chainer,项目名称:chainerrl,代码行数:38,代码来源:iqn.py

示例15: update_on_policy

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import expand_dims [as 别名]
def update_on_policy(self, statevar):
        assert self.t_start < self.t

        if not self.disable_online_update:
            next_values = {}
            for t in range(self.t_start + 1, self.t):
                next_values[t - 1] = self.past_values[t]
            if statevar is None:
                next_values[self.t - 1] = chainer.Variable(
                    self.xp.zeros_like(self.past_values[self.t - 1].array))
            else:
                with state_kept(self.model):
                    _, v = self.model(statevar)
                next_values[self.t - 1] = v
            log_probs = {t: self.past_action_distrib[t].log_prob(
                self.xp.asarray(self.xp.expand_dims(a, 0)))
                for t, a in self.past_actions.items()}
            self.online_batch_losses.append(self.compute_loss(
                t_start=self.t_start, t_stop=self.t,
                rewards=self.past_rewards,
                values=self.past_values,
                next_values=next_values,
                log_probs=log_probs))
            if len(self.online_batch_losses) == self.batchsize:
                loss = chainerrl.functions.sum_arrays(
                    self.online_batch_losses) / self.batchsize
                self.update(loss)
                self.online_batch_losses = []

        self.init_history_data_for_online_update() 
开发者ID:chainer,项目名称:chainerrl,代码行数:32,代码来源:pcl.py


注:本文中的chainer.functions.expand_dims方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。