当前位置: 首页>>代码示例>>Python>>正文


Python functions.squeeze方法代码示例

本文整理汇总了Python中chainer.functions.squeeze方法的典型用法代码示例。如果您正苦于以下问题:Python functions.squeeze方法的具体用法?Python functions.squeeze怎么用?Python functions.squeeze使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.functions的用法示例。


在下文中一共展示了functions.squeeze方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: update

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def update(self, loss):

        self.average_loss += (
            (1 - self.average_loss_decay) *
            (asfloat(loss) - self.average_loss))

        # Compute gradients using thread-specific model
        self.model.cleargrads()
        F.squeeze(loss).backward()
        if self.train_async:
            # Copy the gradients to the globally shared model
            copy_param.copy_grad(
                target_link=self.shared_model, source_link=self.model)
            if self.process_idx == 0:
                xp = self.xp
                norm = sum(xp.sum(xp.square(param.grad))
                           for param in self.optimizer.target.params()
                           if param.grad is not None)
                self.logger.debug('grad norm:%s', norm)
        self.optimizer.update()

        if self.train_async:
            self.sync_parameters()
        if isinstance(self.model, Recurrent):
            self.model.unchain_backward() 
开发者ID:chainer,项目名称:chainerrl,代码行数:27,代码来源:pcl.py

示例2: accumulate_grad

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def accumulate_grad(self):
        if self.n_backward == 0:
            self.model.cleargrads()
        # Compute losses
        losses = []
        for r_seq, log_prob_seq, ent_seq in zip(self.reward_sequences,
                                                self.log_prob_sequences,
                                                self.entropy_sequences):
            assert len(r_seq) - 1 == len(log_prob_seq) == len(ent_seq)
            # Convert rewards into returns (=sum of future rewards)
            R_seq = np.cumsum(list(reversed(r_seq[1:])))[::-1]
            for R, log_prob, entropy in zip(R_seq, log_prob_seq, ent_seq):
                loss = -R * log_prob - self.beta * entropy
                losses.append(loss)
        total_loss = chainerrl.functions.sum_arrays(losses)
        # When self.batchsize is future.types.newint.newint, dividing a
        # Variable with it will raise an error, so it is manually converted to
        # float here.
        total_loss /= float(self.batchsize)
        F.squeeze(total_loss).backward()
        self.reward_sequences = [[]]
        self.log_prob_sequences = [[]]
        self.entropy_sequences = [[]]
        self.n_backward += 1 
开发者ID:chainer,项目名称:chainerrl,代码行数:26,代码来源:reinforce.py

示例3: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def __call__(self, ty_args, ty_kwargs):
        x_type, = ty_args

        self.axis, lacks_axis = get_kwarg(ty_kwargs, 'axis', None)
        if isinstance(self.axis, int):
            self.axis = (self.axis,)

        if is_incomplete_shape(x_type.shape):
            # TODO: use ty_kwargs['axis'].size()
            if lacks_axis or self.axis is None:
                assert False, "chainer.fucntions.squeeze: cannot guess ndim of return type"

        self.check_type_forward(make_multiple_tc_variable(ty_args, ('x',)))

        if self.axis is not None:
            for i in self.axis:
                assert x_type.shape[i] == 1, "chainer.fucntions.squeeze: invalid axis"
        return self.infer_return(x_type) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:20,代码来源:chainer_functions.py

示例4: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def __call__(self, x, z, ze, mask, conv_mask):
        att_scale = self.xp.sum(
            mask, axis=2, keepdims=True)[:, None, :, :] ** 0.5
        pad = self.xp.zeros(
            (x.shape[0], x.shape[1], self.width - 1, 1), dtype=x.dtype)
        base_x = x
        z = F.squeeze(z, axis=3)
        # Note: these behaviors of input, output, and attention result
        # may refer to the code by authors, which looks little different
        # from the paper's saying.
        for conv_name, preatt_name in zip(self.conv_names, self.preatt_names):
            # Calculate Output of GLU
            out = getattr(self, conv_name)(
                F.concat([pad, x], axis=2), conv_mask)
            # Calcualte Output of Attention using Output of GLU
            preatt = seq_linear(getattr(self, preatt_name), out)
            query = base_x + preatt
            query = F.squeeze(query, axis=3)
            c = self.attend(query, z, ze, mask) * att_scale
            # Merge Them in Redidual Calculation and Scaling
            x = (x + (c + out) * scale05) * scale05

        return x 
开发者ID:soskek,项目名称:convolutional_seq2seq,代码行数:25,代码来源:net.py

示例5: attend

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def attend(self, encoded_features):
        self.out_lstm.reset_state()
        transformed_encoded_features = F.concat([F.expand_dims(self.transform_encoded_features(feature), axis=1) for feature in encoded_features], axis=1)
        concat_encoded_features = F.concat([F.expand_dims(e, axis=1) for e in encoded_features], axis=1)

        lstm_output = self.xp.zeros_like(encoded_features[0])
        outputs = []
        for _ in range(self.num_labels):
            transformed_lstm_output = self.transform_out_lstm_feature(lstm_output)
            attended_feats = []
            for transformed_encoded_feature in F.separate(transformed_encoded_features, axis=1):
                attended_feat = transformed_encoded_feature + transformed_lstm_output
                attended_feat = F.tanh(attended_feat)
                attended_feats.append(self.generate_attended_feat(attended_feat))

            attended_feats = F.concat(attended_feats, axis=1)
            alphas = F.softmax(attended_feats, axis=1)

            lstm_input_feature = F.batch_matmul(alphas, concat_encoded_features, transa=True)
            lstm_input_feature = F.squeeze(lstm_input_feature, axis=1)
            lstm_output = self.out_lstm(lstm_input_feature)
            outputs.append(lstm_output)
        return outputs 
开发者ID:Bartzi,项目名称:see,代码行数:25,代码来源:fsns.py

示例6: decode_predictions

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def decode_predictions(self, predictions):
        # concat all individual predictions and slice for each time step
        predictions = F.concat([F.expand_dims(p, axis=0) for p in predictions], axis=0)

        words = []
        with cuda.get_device_from_array(predictions.data):
            for prediction in F.separate(predictions, axis=0):
                prediction = F.squeeze(prediction, axis=0)
                prediction = F.softmax(prediction, axis=1)
                prediction = self.xp.argmax(prediction.data, axis=1)
                word = self.loss_metrics.strip_prediction(prediction[self.xp.newaxis, ...])[0]
                if len(word) == 1 and word[0] == 0:
                    return ''

                word = "".join(map(self.loss_metrics.label_to_char, word))
                word = word.replace(chr(self.loss_metrics.char_map[str(self.loss_metrics.blank_symbol)]), '')
                words.append(word)

        text = " ".join(words)
        return text 
开发者ID:Bartzi,项目名称:see,代码行数:22,代码来源:svhn_bbox_plotter.py

示例7: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def __call__(self, x, *args):
        """
           Args:
               x (ndarray): Shape is (Batch * K, 7, t).
                            each set has (xi, yi, zi, ri, xi −vx, yi −vy, zi −vz).
                            vx, vy, vz is local mean at each voxel.
           Return:
               y (ndarray): Shape is (Batch * K, 128)
        """
        n_batch, n_channels, n_points = x.shape
        # mask = F.max(x, axis=(1, 2), keepdims=True).data != 0
        mask = F.max(x, axis=1, keepdims=True).data != 0
        active_length = 0 #mask.sum()

        # Convolution1D -> BN -> relu -> pool -> concat
        h = F.relu(self.bn1(self.conv1(x), active_length, mask))
        global_feat = F.max_pooling_nd(h, n_points)
        # Shape is (Batch, channel, points)
        global_feat_expand = F.tile(global_feat, (1, 1, n_points))
        h = F.concat((h, global_feat_expand))
        h *= mask

        h = self.conv2(h)
        return F.squeeze(F.max_pooling_nd(h, n_points)) 
开发者ID:yukitsuji,项目名称:voxelnet_chainer,代码行数:26,代码来源:light_voxelnet.py

示例8: compute_loss_with_kl_constraint

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def compute_loss_with_kl_constraint(distrib, another_distrib, original_loss,
                                    delta):
    """Compute loss considering a KL constraint.

    Args:
        distrib (Distribution): Distribution to optimize
        another_distrib (Distribution): Distribution used to compute KL
        original_loss (chainer.Variable): Loss to minimize
        delta (float): Minimum KL difference
    Returns:
        loss (chainer.Variable)
    """
    for param in distrib.params:
        assert param.shape[0] == 1
        assert param.requires_grad
    # Compute g: a direction to minimize the original loss
    g = [grad.array[0] for grad in
         chainer.grad([F.squeeze(original_loss)], distrib.params)]

    # Compute k: a direction to increase KL div.
    kl = F.squeeze(another_distrib.kl(distrib))
    k = [grad.array[0] for grad in
         chainer.grad([-kl], distrib.params)]

    # Compute z: combination of g and k to keep small KL div.
    kg_dot = sum(np.dot(kp.ravel(), gp.ravel())
                 for kp, gp in zip(k, g))
    kk_dot = sum(np.dot(kp.ravel(), kp.ravel()) for kp in k)
    if kk_dot > 0:
        k_factor = max(0, ((kg_dot - delta) / kk_dot))
    else:
        k_factor = 0
    z = [gp - k_factor * kp for kp, gp in zip(k, g)]
    loss = 0
    for p, zp in zip(distrib.params, z):
        loss += F.sum(p * zp)
    return F.reshape(loss, original_loss.shape), float(kl.array) 
开发者ID:chainer,项目名称:chainerrl,代码行数:39,代码来源:acer.py

示例9: update

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def update(self, t_start, t_stop, R, states, actions, rewards, values,
               action_values, action_distribs, action_distribs_mu,
               avg_action_distribs):

        assert np.isscalar(R)

        total_loss = self.compute_loss(
            t_start=t_start,
            t_stop=t_stop,
            R=R,
            states=states,
            actions=actions,
            rewards=rewards,
            values=values,
            action_values=action_values,
            action_distribs=action_distribs,
            action_distribs_mu=action_distribs_mu,
            avg_action_distribs=avg_action_distribs)

        # Compute gradients using thread-specific model
        self.model.cleargrads()
        F.squeeze(total_loss).backward()
        # Copy the gradients to the globally shared model
        copy_param.copy_grad(
            target_link=self.shared_model, source_link=self.model)
        # Update the globally shared model
        if self.process_idx == 0:
            norm = sum(np.sum(np.square(param.grad))
                       for param in self.optimizer.target.params()
                       if param.grad is not None)
            self.logger.debug('grad norm:%s', norm)
        self.optimizer.update()

        self.sync_parameters()
        if isinstance(self.model, Recurrent):
            self.model.unchain_backward() 
开发者ID:chainer,项目名称:chainerrl,代码行数:38,代码来源:acer.py

示例10: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def forward(self, x):
        return F.squeeze(x, 1) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:4,代码来源:Squeeze.py

示例11: test_squeeze

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def test_squeeze(self):
        class Test():
            def forward(self):
                F.squeeze(np.zeros((2, 1, 1, 3)))
                F.squeeze(np.zeros((2, 1, 1, 3)), axis=2)
                F.squeeze(np.zeros((2, 1, 1, 3)), axis=(1,2))

        id2type = generate_id2type_from_forward(Test(), ())

        self.assertEqual(str(id2type[1]), "class Test -> NoneType")	# FunctionDef forward (line 1)
        self.assertEqual(str(id2type[5]), "NoneType")	# Expr
        self.assertEqual(str(id2type[6]), "Variable(float64, (2, 3))")	# Call F.squeeze(np.zeros((2, 1, 1, 3))) (line 2)
        self.assertEqual(str(id2type[11]), "ndarray(float64, (2, 1, 1, 3))")	# Call np.zeros((2, 1, 1, 3)) (line 2)
        self.assertEqual(str(id2type[16]), "(int, int, int, int)")	# Tuple (2, 1, 1, 3) (line 2)
        self.assertEqual(str(id2type[17]), "int")	# Num 2 (line 2)
        self.assertEqual(str(id2type[18]), "int")	# Num 1 (line 2)
        self.assertEqual(str(id2type[19]), "int")	# Num 1 (line 2)
        self.assertEqual(str(id2type[20]), "int")	# Num 3 (line 2)
        self.assertEqual(str(id2type[22]), "NoneType")	# Expr
        self.assertEqual(str(id2type[23]), "Variable(float64, (2, 1, 3))")	# Call F.squeeze(np.zeros((2, 1, 1, 3)), axis=2) (line 3)
        self.assertEqual(str(id2type[28]), "ndarray(float64, (2, 1, 1, 3))")	# Call np.zeros((2, 1, 1, 3)) (line 3)
        self.assertEqual(str(id2type[33]), "(int, int, int, int)")	# Tuple (2, 1, 1, 3) (line 3)
        self.assertEqual(str(id2type[34]), "int")	# Num 2 (line 3)
        self.assertEqual(str(id2type[35]), "int")	# Num 1 (line 3)
        self.assertEqual(str(id2type[36]), "int")	# Num 1 (line 3)
        self.assertEqual(str(id2type[37]), "int")	# Num 3 (line 3)
        self.assertEqual(str(id2type[40]), "int")	# Num 2 (line 3)
        self.assertEqual(str(id2type[41]), "NoneType")	# Expr
        self.assertEqual(str(id2type[42]), "Variable(float64, (2, 3))")	# Call F.squeeze(np.zeros((2, 1, 1, 3)), axis=(1, 2)) (line 4)
        self.assertEqual(str(id2type[47]), "ndarray(float64, (2, 1, 1, 3))")	# Call np.zeros((2, 1, 1, 3)) (line 4)
        self.assertEqual(str(id2type[52]), "(int, int, int, int)")	# Tuple (2, 1, 1, 3) (line 4)
        self.assertEqual(str(id2type[53]), "int")	# Num 2 (line 4)
        self.assertEqual(str(id2type[54]), "int")	# Num 1 (line 4)
        self.assertEqual(str(id2type[55]), "int")	# Num 1 (line 4)
        self.assertEqual(str(id2type[56]), "int")	# Num 3 (line 4)
        self.assertEqual(str(id2type[59]), "(int, int)")	# Tuple (1, 2) (line 4)
        self.assertEqual(str(id2type[60]), "int")	# Num 1 (line 4)
        self.assertEqual(str(id2type[61]), "int")	# Num 2 (line 4) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:40,代码来源:ExtFunctions_test.py

示例12: __call__

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def __call__(self, x):
        x = self.encode(x)
        x = F.sum(x, axis=0) / x.shape[0]
        return F.squeeze(x) 
开发者ID:wuhuikai,项目名称:GP-GAN,代码行数:6,代码来源:model.py

示例13: update

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def update(Q, target_Q, policy, target_policy, opt_Q, opt_policy,
           samples, gamma=0.99):
    """Update a Q-function and a policy."""
    dtype = chainer.get_dtype()
    xp = Q.xp
    obs = xp.asarray([sample[0] for sample in samples], dtype=dtype)
    action = xp.asarray([sample[1] for sample in samples], dtype=dtype)
    reward = xp.asarray([sample[2] for sample in samples], dtype=dtype)
    done = xp.asarray([sample[3] for sample in samples], dtype=dtype)
    obs_next = xp.asarray([sample[4] for sample in samples], dtype=dtype)

    def update_Q():
        # Predicted values: Q(s,a)
        y = F.squeeze(Q(obs, action), axis=1)
        # Target values: r + gamma * Q(s,policy(s))
        with chainer.no_backprop_mode():
            next_q = F.squeeze(target_Q(obs_next, target_policy(obs_next)),
                               axis=1)
            target = reward + gamma * (1 - done) * next_q
        loss = F.mean_squared_error(y, target)
        Q.cleargrads()
        loss.backward()
        opt_Q.update()

    def update_policy():
        # Maximize Q(s,policy(s))
        q = Q(obs, policy(obs))
        q = q[:]  # Avoid https://github.com/chainer/chainer/issues/2744
        loss = - F.mean(q)
        policy.cleargrads()
        loss.backward()
        opt_policy.update()

    update_Q()
    update_policy() 
开发者ID:chainer,项目名称:chainer,代码行数:37,代码来源:ddpg_pendulum.py

示例14: query

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def query(self, u):
        xp = backend.get_array_module(u)
        size = self.m.shape[1]
        inds = xp.arange(size - 1, -1, -1, dtype=numpy.int32)
        tm = self.TA(inds)
        tc = self.TC(inds)
        tm = F.broadcast_to(tm, self.m.shape)
        tc = F.broadcast_to(tc, self.c.shape)
        p = F.softmax(F.matmul(self.m + tm, F.expand_dims(u, -1)))
        o = F.matmul(F.swapaxes(self.c + tc, 2, 1), p)
        o = F.squeeze(o, -1)
        u = o + u
        return u 
开发者ID:chainer,项目名称:chainer,代码行数:15,代码来源:memnn.py

示例15: forward_expected

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import squeeze [as 别名]
def forward_expected(self, inputs):
        x, = inputs
        y = numpy.squeeze(x, axis=self.axis)
        return y, 
开发者ID:chainer,项目名称:chainer,代码行数:6,代码来源:test_squeeze.py


注:本文中的chainer.functions.squeeze方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。