当前位置: 首页>>代码示例>>Python>>正文


Python cuda.get_array_module方法代码示例

本文整理汇总了Python中chainer.cuda.get_array_module方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.get_array_module方法的具体用法?Python cuda.get_array_module怎么用?Python cuda.get_array_module使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.cuda的用法示例。


在下文中一共展示了cuda.get_array_module方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: check_forward

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def check_forward(self, x_data):
        xp = cuda.get_array_module(x_data)
        y = mellowmax(x_data, axis=self.axis, omega=self.omega)
        self.assertEqual(y.array.dtype, self.dtype)

        x_min = xp.min(x_data, axis=self.axis)
        x_max = xp.max(x_data, axis=self.axis)
        x_mean = xp.mean(x_data, axis=self.axis)
        print('x_min', x_min)
        print('y.array', y.array)

        # min <= mellowmax <= max
        eps = 1e-5
        self.assertTrue(xp.all(x_min <= y.array + eps))
        self.assertTrue(xp.all(x_max >= y.array - eps))

        # omega > 0 -> mellowmax is more like max
        if self.omega > 0:
            self.assertTrue(xp.all(x_mean <= y.array + eps))
        # omega < 0 -> mellowmax is more like min
        if self.omega < 0:
            self.assertTrue(xp.all(x_mean >= y.array - eps)) 
开发者ID:chainer,项目名称:chainerrl,代码行数:24,代码来源:test_mellowmax.py

示例2: bound_by_tanh

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def bound_by_tanh(x, low, high):
    """Bound a given value into [low, high] by tanh.

    Args:
        x (chainer.Variable): value to bound
        low (numpy.ndarray): lower bound
        high (numpy.ndarray): upper bound
    Returns: chainer.Variable
    """
    assert isinstance(x, chainer.Variable)
    assert low is not None
    assert high is not None
    xp = cuda.get_array_module(x.array)
    x_scale = (high - low) / 2
    x_scale = xp.expand_dims(xp.asarray(x_scale), axis=0)
    x_mean = (high + low) / 2
    x_mean = xp.expand_dims(xp.asarray(x_mean), axis=0)
    return F.tanh(x) * x_scale + x_mean 
开发者ID:chainer,项目名称:chainerrl,代码行数:20,代码来源:bound_by_tanh.py

示例3: channelwise_inhibited

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def channelwise_inhibited(self, h):
        self.c = random.randint(0, 2)
        xp = cuda.get_array_module(h.data)
        num = h.data.shape[0]

        h = F.split_axis(h, 3, 1)
        c = F.reshape(h[self.c], (num, 16, 16))
        z = Variable(xp.zeros_like(c.data), 'AUTO')
        c = F.batch_matmul(c, z)
        c = F.reshape(c, (num, 1, 16, 16))
        hs = []
        for i, s in enumerate(h):
            if i == self.c:
                hs.append(c)
            else:
                hs.append(s)
        return F.concat(hs, 1) 
开发者ID:mitmul,项目名称:ssai-cnn,代码行数:19,代码来源:MnihCNN_rcis.py

示例4: channelwise_inhibited

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def channelwise_inhibited(self, h):
        xp = cuda.get_array_module(h.data)
        num = h.data.shape[0]

        h = F.split_axis(h, 3, 1)
        c = F.reshape(h[self.c], (num, 16, 16))
        z = Variable(xp.zeros_like(c.data), 'AUTO')
        c = F.batch_matmul(c, z)
        c = F.reshape(c, (num, 1, 16, 16))
        hs = []
        for i, s in enumerate(h):
            if i == self.c:
                hs.append(c)
            else:
                hs.append(s)
        return F.concat(hs, 1) 
开发者ID:mitmul,项目名称:ssai-cnn,代码行数:18,代码来源:MnihCNN_cis.py

示例5: backward_gpu

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def backward_gpu(self, inputs, gys):
        if not self.gpu_optim:
            return self.backward_cpu(inputs,  gys)
        xp = cuda.get_array_module(*inputs)
        x, gamma, beta = inputs
        gy, = gys
        g_beta = xp.sum(gy, axis=0, keepdims=True)
        g_gamma = xp.sum(gy*self.normalized, axis=0, keepdims=True)
        
        gy2 = gy*gamma
        gy_centered = gy2 - xp.mean(gy2, axis=1, keepdims=True)
        sc_prod = xp.sum(gy_centered * self.normalized, axis = 1, keepdims=True)
        
        H = x.shape[1]
#         ga = backprop_scale(self.inv_norm, gy_centered, self.normalized, sc_prod/H)
        ga = cp.ElementwiseKernel(
         'T inv_norm, T gy_centered, T normalized, T sc_prod',
         'T z',
          '''
              z = inv_norm *(gy_centered - normalized * (sc_prod/%f));
         '''%H,
         'backprop_scale')(self.inv_norm, gy_centered, self.normalized, sc_prod)
        
        return ga, g_gamma, g_beta 
开发者ID:fabiencro,项目名称:knmt,代码行数:26,代码来源:layer_normalization.py

示例6: _batch_matmul

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def _batch_matmul(a, b, transa=False, transb=False, transout=False):
    a = a.reshape(a.shape[:2] + (-1,))
    b = b.reshape(b.shape[:2] + (-1,))
    trans_axis = (0, 2, 1)
    if transout:
        transa, transb = not transb, not transa
        a, b = b, a
    if transa:
        a = a.transpose(trans_axis)
    if transb:
        b = b.transpose(trans_axis)
    xp = cuda.get_array_module(a)
    if xp is numpy:
        ret = numpy.empty(a.shape[:2] + b.shape[2:], dtype=a.dtype)
        for i in six.moves.range(len(a)):
            ret[i] = numpy.dot(a[i], b[i])
        return ret
    return xp.matmul(a, b) 
开发者ID:fabiencro,项目名称:knmt,代码行数:20,代码来源:constant_batch_mul.py

示例7: forward

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def forward(self, x, t):
        xp = cuda.get_array_module(x)
        y = self.predictor(x)
        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        batch_size = chainer.Variable(xp.array(t.size, xp.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        loss = -log_prob / batch_size
        reporter.report({'loss': loss}, self)
        if self.compute_accuracy:
            acc = accuracy.accuracy(y, xp.argmax(t, axis=1))
            reporter.report({'accuracy': acc}, self)
        loss.name = 'loss'
        return loss 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:25,代码来源:gen_mnist_mlp.py

示例8: listmle

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def listmle(x, t):
    """
    The ListMLE loss as in Xia et al (2008), Listwise Approach to Learning to
    Rank - Theory and Algorithm.

    :param x: The activation of the previous layer
    :param t: The target labels
    :return: The loss
    """

    # Get the ground truth by sorting activations by the relevance labels
    xp = cuda.get_array_module(t)
    t_hat = t[:, 0]
    x_hat = x[xp.flip(xp.argsort(t_hat), axis=0)]

    # Compute MLE loss
    final = logcumsumexp(x_hat)
    return F.sum(final - x_hat) 
开发者ID:rjagerman,项目名称:shoelace,代码行数:20,代码来源:listwise.py

示例9: _pl_sample

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def _pl_sample(t, α):
    """
    Sample from the plackett luce distribution directly

    :param t: The target labels
    :return: A random permutation from the plackett-luce distribution
             parameterized by the target labels
    """
    xp = cuda.get_array_module(t)
    t = t[:, 0]

    probs = xp.exp(t * α)
    probs /= xp.sum(probs)

    # Use CPU-based numpy implementation, because cupy.random.choice with
    # replace=False does not work
    probs = cuda.to_cpu(probs)
    result = np.random.choice(probs.shape[0], probs.shape[0], replace=False,
                              p=probs)
    return xp.array(result, copy=False) 
开发者ID:rjagerman,项目名称:shoelace,代码行数:22,代码来源:listwise.py

示例10: dc_loss

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def dc_loss(embedding, label):
    """
    Deep clustering loss function.

    Args:
      embedding: (T,D)-shaped activation values
      label: (T,C)-shaped labels
    return:
      (1,)-shaped squared flobenius norm of the difference
      between embedding and label affinity matrices
    """
    xp = cuda.get_array_module(label)
    b = xp.zeros((label.shape[0], 2**label.shape[1]))
    b[np.arange(label.shape[0]),
      [int(''.join(str(x) for x in t), base=2) for t in label.data]] = 1

    label_f = chainer.Variable(b.astype(np.float32))
    loss = F.sum(F.square(F.matmul(embedding, embedding, True, False))) \
        + F.sum(F.square(F.matmul(label_f, label_f, True, False))) \
        - 2 * F.sum(F.square(F.matmul(embedding, label_f, True, False)))
    return loss 
开发者ID:hitachi-speech,项目名称:EEND,代码行数:23,代码来源:models.py

示例11: vat_loss

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def vat_loss(forward, distance, x, train=True, epsilon=8.0, xi=1e-6, Ip=1, p_logit=None):
    if p_logit is None:
        p_logit = forward(x, train=train, update_batch_stats=False).data  # unchain
    else:
        assert not isinstance(p_logit, Variable)

    xp = cuda.get_array_module(x.data)
    d = xp.random.normal(size=x.shape)
    d = get_normalized_vector(d, xp) 
    for ip in range(Ip):
        x_d = Variable(x.data + xi * d.astype(xp.float32))
        p_d_logit = forward(x_d, train=train, update_batch_stats=False)
        kl_loss = distance(p_logit, p_d_logit)
        kl_loss.backward()
        d = x_d.grad
        d = d / xp.sqrt(xp.sum(d ** 2, axis=range(1, len(d.shape)), keepdims=True))
    x_adv = x + epsilon * d 
    p_adv_logit = forward(x_adv, train=train, update_batch_stats=False)
    return distance(p_logit, p_adv_logit) 
开发者ID:takerum,项目名称:vat_chainer,代码行数:21,代码来源:loss.py

示例12: loss_unlabeled

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def loss_unlabeled(forward, x, args):
    if args.method == 'vat':
        # Virtual adversarial training loss
        logit = forward(x, train=True, update_batch_stats=False)
        return loss.vat_loss(forward, loss.distance, x, epsilon=args.epsilon, xi=XI, p_logit=logit.data)
    elif args.method == 'vatent':
        # Virtual adversarial training loss + Conditional Entropy loss
        logit = forward(x, train=True, update_batch_stats=False)
        vat_loss = loss.vat_loss(forward, loss.distance, x, epsilon=args.epsilon, xi=XI, p_logit=logit.data)
        ent_y_x = loss.entropy_y_x(logit)
        return vat_loss + ent_y_x
    elif args.method == 'baseline':
        xp = cuda.get_array_module(x.data)
        return Variable(xp.array(0, dtype=xp.float32))
    else:
        raise NotImplementedError 
开发者ID:takerum,项目名称:vat_chainer,代码行数:18,代码来源:train_semisup.py

示例13: calc_loss

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def calc_loss(self, x, t):
        batch_predictions, _, grids = x
        self.xp = cuda.get_array_module(batch_predictions, t)

        loss = self.calc_actual_loss(batch_predictions, None, t)

        # reshape grids
        batch_size = t.shape[0]
        grids = grids[-1]
        grid_shape = grids.shape
        grids = F.reshape(grids, (-1, batch_size) + grid_shape[1:])

        grid_losses = []
        for grid in F.separate(grids, axis=0):
            with cuda.get_device_from_array(getattr(grid, 'data', grid[0].data)):
                grid_losses.append(self.calc_direction_loss(grid))

        return loss + (sum(grid_losses) / len(grid_losses)) 
开发者ID:Bartzi,项目名称:see,代码行数:20,代码来源:textrec_metrics.py

示例14: calc_loss

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def calc_loss(self, x, t):
        batch_predictions, _, _ = x

        # concat all individual predictions and slice for each time step
        batch_predictions = F.concat([F.expand_dims(p, axis=0) for p in batch_predictions], axis=0)

        self.xp = cuda.get_array_module(batch_predictions[0], t)
        batch_size = t.shape[0]
        t = F.reshape(t, (batch_size, self.num_timesteps, -1))

        losses = []
        for predictions, labels in zip(F.separate(batch_predictions, axis=0), F.separate(t, axis=1)):
            batch_size, num_chars, num_classes = predictions.shape
            predictions = F.reshape(predictions, (batch_size * num_chars, num_classes))
            labels = F.reshape(labels, (-1,))
            losses.append(F.softmax_cross_entropy(predictions, labels))

        return sum(losses) 
开发者ID:Bartzi,项目名称:see,代码行数:20,代码来源:svhn_softmax_metrics.py

示例15: backward_log_softmax

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_array_module [as 别名]
def backward_log_softmax(self, x, y, gy):
        if cuda.cudnn_enabled:
            cudnn = cuda.cudnn
            libcudnn = cuda.cuda.cudnn
            _algorithm = libcudnn.CUDNN_SOFTMAX_LOG
            _mode = libcudnn.CUDNN_SOFTMAX_MODE_CHANNEL

        xp = cuda.get_array_module(x)
        if xp is not numpy and chainer.should_use_cudnn('>=auto', 3000):
            oz_dtype = 'd' if x.dtype == 'd' else 'f'
            one = numpy.array(1, dtype=oz_dtype).ctypes
            zero = numpy.array(0, dtype=oz_dtype).ctypes
            handle = cudnn.get_handle()
            gx = xp.empty(x.shape, dtype=x.dtype)
            gx_cube = gx.reshape(gx.shape[:2] + (-1, 1))
            desc = cudnn.create_tensor_descriptor(gx_cube)
            libcudnn.softmaxBackward(
                handle, _algorithm, _mode, one.data, desc.value,
                y.data.ptr, desc.value, gy.data.ptr, zero.data,
                desc.value, gx.data.ptr)
        else:
            gx = gy - xp.exp(y) * gy.sum(axis=1, keepdims=True)

        return gx 
开发者ID:chainer,项目名称:models,代码行数:26,代码来源:adaptive_softmax.py


注:本文中的chainer.cuda.get_array_module方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。