当前位置: 首页>>代码示例>>Python>>正文


Python cuda.get_device_from_array方法代码示例

本文整理汇总了Python中chainer.cuda.get_device_from_array方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.get_device_from_array方法的具体用法?Python cuda.get_device_from_array怎么用?Python cuda.get_device_from_array使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.cuda的用法示例。


在下文中一共展示了cuda.get_device_from_array方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: update

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def update(self, s, i):
        """Update decoder state

        Args:
            s (any): Current (hidden, cell) states.  If ``None`` is specified 
                     zero-vector is used.
            i (int): input label.
        Return:
            (~chainer.Variable) updated decoder state
        """
        if cuda.get_device_from_array(s[0].data).id >= 0:
            xp = cuda.cupy
        else:
            xp = np

        v = chainer.Variable(xp.array([i],dtype=np.int32))
        x = self.embed(v)
        if s is not None:
            hy, cy, dy = self.lstm(s[0], s[1], [x])
        else:
            hy, cy, dy = self.lstm(None, None, [x])

        return hy, cy, dy 
开发者ID:dialogtekgeek,项目名称:DSTC6-End-to-End-Conversation-Modeling,代码行数:25,代码来源:lstm_decoder.py

示例2: calc_loss

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def calc_loss(self, x, t):
        batch_predictions, _, grids = x
        self.xp = cuda.get_array_module(batch_predictions, t)

        loss = self.calc_actual_loss(batch_predictions, None, t)

        # reshape grids
        batch_size = t.shape[0]
        grids = grids[-1]
        grid_shape = grids.shape
        grids = F.reshape(grids, (-1, batch_size) + grid_shape[1:])

        grid_losses = []
        for grid in F.separate(grids, axis=0):
            with cuda.get_device_from_array(getattr(grid, 'data', grid[0].data)):
                grid_losses.append(self.calc_direction_loss(grid))

        return loss + (sum(grid_losses) / len(grid_losses)) 
开发者ID:Bartzi,项目名称:see,代码行数:20,代码来源:textrec_metrics.py

示例3: decode_predictions

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def decode_predictions(self, predictions):
        # concat all individual predictions and slice for each time step
        predictions = F.concat([F.expand_dims(p, axis=0) for p in predictions], axis=0)

        words = []
        with cuda.get_device_from_array(predictions.data):
            for prediction in F.separate(predictions, axis=0):
                prediction = F.squeeze(prediction, axis=0)
                prediction = F.softmax(prediction, axis=1)
                prediction = self.xp.argmax(prediction.data, axis=1)
                word = self.loss_metrics.strip_prediction(prediction[self.xp.newaxis, ...])[0]
                if len(word) == 1 and word[0] == 0:
                    return ''

                word = "".join(map(self.loss_metrics.label_to_char, word))
                word = word.replace(chr(self.loss_metrics.char_map[str(self.loss_metrics.blank_symbol)]), '')
                words.append(word)

        text = " ".join(words)
        return text 
开发者ID:Bartzi,项目名称:see,代码行数:22,代码来源:svhn_bbox_plotter.py

示例4: sum_sqnorm

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def sum_sqnorm(arr):
    """Calculate the norm of the array.

    Args:
        arr (numpy.ndarray)

    Returns:
        Float: Sum of the norm calculated from the given array.

    """
    sq_sum = collections.defaultdict(float)
    for x in arr:
        with cuda.get_device_from_array(x) as dev:
            if x is not None:
                x = x.ravel()
                s = x.dot(x)
                sq_sum[int(dev)] += s
    return sum([float(i) for i in six.itervalues(sq_sum)]) 
开发者ID:espnet,项目名称:espnet,代码行数:20,代码来源:training.py

示例5: init_state

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def init_state(self, param):
        xp = cuda.get_array_module(param.array)
        with cuda.get_device_from_array(param.array):
            self.state['ms'] = xp.zeros_like(param.array) 
开发者ID:chainer,项目名称:chainerrl,代码行数:6,代码来源:rmsprop_async.py

示例6: __call__

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def __call__(self, rule, param):
        if param.name == 'b':
            return
        p, g = param.array, param.grad
        if p is None or g is None:
            return
        with cuda.get_device_from_array(p) as dev:
            if int(dev) == -1:
                g += self.rate * p
            else:
                kernel = cuda.elementwise(
                    'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')
                kernel(p, self.rate, g) 
开发者ID:chainer,项目名称:chainerrl,代码行数:15,代码来源:nonbias_weight_decay.py

示例7: compute_ctxt_demux

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def compute_ctxt_demux(self, fb_concat, mask):
        mb_size, nb_elems, Hi = fb_concat.data.shape
        assert Hi == self.Hi
        assert mb_size == 1
        assert len(mask) == 0

        precomputed_al_factor = F.reshape(self.al_lin_h(
            F.reshape(fb_concat, (mb_size * nb_elems, self.Hi))), (mb_size, nb_elems, self.Ha))

#         concatenated_mask = F.concat([F.reshape(mask_elem, (mb_size, 1)) for mask_elem in mask], 1)

        def compute_ctxt(previous_state, prev_word_embedding=None):
            current_mb_size = previous_state.data.shape[0]

            al_factor = F.broadcast_to(precomputed_al_factor, (current_mb_size, nb_elems, self.Ha))
#             used_fb_concat = F.broadcast_to(fb_concat, (current_mb_size, nb_elems, Hi))
#             used_concatenated_mask = F.broadcast_to(concatenated_mask, (current_mb_size, nb_elems))

            state_al_factor = self.al_lin_s(previous_state)
            
            #As suggested by Isao Goto
            if prev_word_embedding is not None:
                state_al_factor = state_al_factor + self.al_lin_y(prev_word_embedding)
            
            state_al_factor_bc = F.broadcast_to(F.reshape(state_al_factor, (current_mb_size, 1, self.Ha)), (current_mb_size, nb_elems, self.Ha))
            a_coeffs = F.reshape(self.al_lin_o(F.reshape(F.tanh(state_al_factor_bc + al_factor),
                                                         (current_mb_size * nb_elems, self.Ha))), (current_mb_size, nb_elems))


#             with cuda.get_device_from_array(used_concatenated_mask.data):
#                 a_coeffs = a_coeffs - 10000 * (1-used_concatenated_mask.data)

            attn = F.softmax(a_coeffs)

#             ci = F.reshape(F.batch_matmul(attn, used_fb_concat, transa = True), (current_mb_size, self.Hi))

            ci = F.reshape(F.matmul(attn, F.reshape(fb_concat, (nb_elems, Hi))), (current_mb_size, self.Hi))

            return ci, attn

        return compute_ctxt 
开发者ID:fabiencro,项目名称:knmt,代码行数:43,代码来源:attention.py

示例8: naive_call

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def naive_call(self, fb_concat, targets, mask):
        compute_ctxt = self.attn_module.naive_call(fb_concat, mask)
        loss = None
        current_mb_size = targets[0].data.shape[0]
        assert current_mb_size == 1
        previous_states = self.gru.get_initial_states(current_mb_size)
#         previous_word = Variable(np.array([self.bos_idx] * mb_size, dtype = np.int32))
        # xp = cuda.get_array_module(self.gru.initial_state.data)
        with cuda.get_device_from_array(self.gru.initial_state.data):
            prev_y = F.broadcast_to(self.bos_embeding, (1, self.Eo))
#             previous_word = Variable(xp.array([self.bos_idx] * current_mb_size, dtype = np.int32))
        previous_word = None
        attn_list = []
        total_nb_predictions = 0
        for i in range(len(targets)):
            if previous_word is not None:  # else we are using the initial prev_y
                prev_y = self.emb(previous_word)
            ci, attn = compute_ctxt(previous_states[-1])
            concatenated = F.concat((prev_y, ci))
    #             print(concatenated.data.shape)
            new_states = self.gru(previous_states, concatenated)

            all_concatenated = F.concat((concatenated, new_states[-1]))
            logits = self.lin_o(self.maxo(all_concatenated))

            local_loss = F.softmax_cross_entropy(logits, targets[i])

            loss = local_loss if loss is None else loss + local_loss
            total_nb_predictions += 1
            previous_word = targets[i]
            previous_states = new_states
            attn_list.append(attn)

        loss = loss / total_nb_predictions
        return loss, attn_list 
开发者ID:fabiencro,项目名称:knmt,代码行数:37,代码来源:models_test.py

示例9: calc_accuracy

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def calc_accuracy(self, x, t):
        batch_predictions, _, _ = x
        batch_predictions = F.concat([F.expand_dims(prediction, axis=0) for prediction in batch_predictions], axis=0)

        self.xp = cuda.get_array_module(batch_predictions[0], t)
        accuracies = []

        with cuda.get_device_from_array(batch_predictions.data):
            classification = F.softmax(batch_predictions, axis=2)
            classification = classification.data
            classification = self.xp.argmax(classification, axis=2)
            classification = self.xp.transpose(classification, (1, 0))

            words = self.strip_prediction(classification)
            labels = self.strip_prediction(t)

            num_correct_words = 0
            for word, label in zip(words, labels):
                word = "".join(map(self.label_to_char, word))
                label = "".join(map(self.label_to_char, label))
                if word == label:
                    num_correct_words += 1

            accuracy = num_correct_words / len(labels)
            accuracies.append(accuracy)

        overall_accuracy = sum(accuracies) / max(len(accuracies), 1)
        self.scale_area_loss_factor(overall_accuracy)
        return overall_accuracy 
开发者ID:Bartzi,项目名称:see,代码行数:31,代码来源:textrec_metrics.py

示例10: calc_accuracy

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def calc_accuracy(self, x, t):
        batch_predictions, _, _ = x
        self.xp = cuda.get_array_module(batch_predictions[0], t)
        batch_size = t.shape[0]
        t = F.reshape(t, (batch_size, self.num_timesteps, -1))
        accuracies = []

        for predictions, labels in zip(batch_predictions, F.separate(t, axis=1)):
            if isinstance(predictions, list):
                predictions = F.concat([F.expand_dims(p, axis=0) for p in predictions], axis=0)
            with cuda.get_device_from_array(predictions.data):

                classification = F.softmax(predictions, axis=2)
                classification = classification.data
                classification = self.xp.argmax(classification, axis=2)
                classification = self.xp.transpose(classification, (1, 0))

                words = self.strip_prediction(classification)
                labels = self.strip_prediction(labels.data)

                num_correct_words = 0
                for word, label in zip(words, labels):
                    word = "".join(map(self.label_to_char, word))
                    label = "".join(map(self.label_to_char, label))
                    if word == label:
                        num_correct_words += 1

                accuracy = num_correct_words / len(labels)
                accuracies.append(accuracy)

        overall_accuracy = sum(accuracies) / max(len(accuracies), 1)
        self.scale_area_loss_factor(overall_accuracy)
        return overall_accuracy 
开发者ID:Bartzi,项目名称:see,代码行数:35,代码来源:loss_metrics.py

示例11: calc_loss

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def calc_loss(self, x, t):
        batch_predictions, _, grids = x
        self.xp = cuda.get_array_module(batch_predictions[0], t)

        # reshape labels
        batch_size = t.shape[0]

        # reshape grids
        grid_shape = grids.shape
        if self.uses_original_data:
            grids = F.reshape(grids, (self.num_timesteps, batch_size, 4,) + grid_shape[1:])
        else:
            grids = F.reshape(grids, (self.num_timesteps, batch_size, 1,) + grid_shape[1:])
        recognition_losses = []

        for prediction, label in zip(batch_predictions, F.separate(t, axis=1)):
            recognition_loss = F.softmax_cross_entropy(prediction, label)
            recognition_losses.append(recognition_loss)

        losses = [sum(recognition_losses) / len(recognition_losses)]

        # with cuda.get_device_from_array(grids.data):
        #     grid_list = F.separate(F.reshape(grids, (self.timesteps, -1,) + grids.shape[3:]), axis=0)
        #     overlap_losses = []
        #     for grid_1, grid_2 in itertools.combinations(grid_list, 2):
        #         overlap_losses.append(self.calc_iou_loss(grid_1, grid_2))
        #     losses.append(sum(overlap_losses) / len(overlap_losses))

        for i, grid in enumerate(F.separate(grids, axis=0), start=1):
            with cuda.get_device_from_array(grid.data):
                grid_losses = []
                for sub_grid in F.separate(grid, axis=1):
                    width, height = self.get_bbox_side_lengths(sub_grid)
                    grid_losses.append(self.area_loss_factor * self.calc_area_loss(width, height))
                    grid_losses.append(self.aspect_ratio_loss_factor * self.calc_aspect_ratio_loss(width, height))
                    grid_losses.append(self.calc_direction_loss(sub_grid))
                    grid_losses.append(self.calc_height_loss(height))
                losses.append(sum(grid_losses))

        return sum(losses) / len(losses) 
开发者ID:Bartzi,项目名称:see,代码行数:42,代码来源:lstm_per_step_metrics.py

示例12: calc_accuracy

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def calc_accuracy(self, predictions, labels):
        batch_predictions = predictions
        # concat all individual predictions and slice for each time step
        batch_predictions = F.concat([F.expand_dims(p, axis=2) for p in batch_predictions], axis=2)

        t = F.reshape(labels, (1, self.args.timesteps, -1))

        accuracies = []
        with cuda.get_device_from_array(batch_predictions.data):
            for prediction, label in zip(F.separate(batch_predictions, axis=0), F.separate(t, axis=2)):
                classification = F.softmax(prediction, axis=2)
                classification = classification.data
                classification = self.xp.argmax(classification, axis=2)
                # classification = self.xp.transpose(classification, (1, 0))

                words = self.strip_prediction(classification)
                labels = self.strip_prediction(label.data)

                for word, label in zip(words, labels):
                    word = "".join(map(self.label_to_char, word))
                    label = "".join(map(self.label_to_char, label))
                    if word == label:
                        self.num_correct_words += 1
                    self.num_words += 1

        return word, label 
开发者ID:Bartzi,项目名称:see,代码行数:28,代码来源:evaluator.py

示例13: __call__

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def __call__(self, images):
        self.lstm.reset_state()
        self.transform_2.reset_state()

        h = self.bn0(self.conv0(images))
        h = F.average_pooling_2d(F.relu(h), 2, stride=2)

        h = self.rs1(h)
        h = F.max_pooling_2d(h, 2, stride=2)

        h = self.rs2(h)
        h = F.max_pooling_2d(h, 2, stride=2)

        h = self.rs3(h)
        # h = self.rs4(h)
        self.vis_anchor = h
        h = F.average_pooling_2d(h, 5, stride=2)

        localizations = []

        with cuda.get_device_from_array(h.data):
            # lstm_prediction = chainer.Variable(self.xp.zeros((len(images), self.lstm.state_size), dtype=h.dtype))

            for _ in range(self.num_timesteps):
                # in_feature = self.attend(h, lstm_prediction)
                in_feature = h
                lstm_prediction = F.relu(self.lstm(in_feature))
                transformed = self.transform_2(lstm_prediction)
                transformed = F.reshape(transformed, (-1, 2, 3))
                localizations.append(rotation_dropout(transformed, ratio=self.dropout_ratio))

        return F.concat(localizations, axis=0) 
开发者ID:Bartzi,项目名称:see,代码行数:34,代码来源:fsns.py

示例14: localization_net

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def localization_net(self, images):
        self.lstm.reset_state()
        self.transform_2.reset_state()

        images = self.data_bn(images)
        h = F.relu(self.bn0(self.conv0(images)))
        h = F.max_pooling_2d(h, 3, stride=2, pad=1)

        h = self.rs1_1(h)
        h = self.rs1_2(h)

        h = self.rs2_1(h)
        h = self.rs2_2(h)

        h = self.rs3_1(h)
        h = self.rs3_2(h)

        # h = self.rs4_1(h)
        # h = self.rs4_2(h)

        self.localization_vis_anchor = h

        h = F.average_pooling_2d(h, 5, stride=1)

        localizations = []

        with cuda.get_device_from_array(h.data):
            for _ in range(self.num_timesteps):
                in_feature = h
                lstm_prediction = F.relu(self.lstm(in_feature))
                transformed = self.transform_2(lstm_prediction)
                transformed = F.reshape(transformed, (-1, 2, 3))
                localizations.append(rotation_dropout(transformed, ratio=self.dropout_ratio))

        return F.concat(localizations, axis=0) 
开发者ID:Bartzi,项目名称:see,代码行数:37,代码来源:fsns.py

示例15: decode_predictions

# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import get_device_from_array [as 别名]
def decode_predictions(self, predictions):
        # concat all individual predictions and slice for each time step
        predictions = F.concat([F.expand_dims(prediction, axis=0) for prediction in predictions], axis=0)

        with cuda.get_device_from_array(predictions.data):
            prediction = F.squeeze(predictions, axis=1)
            classification = F.softmax(prediction, axis=1)
            classification = classification.data
            classification = self.xp.argmax(classification, axis=1)

            words = self.loss_metrics.strip_prediction(classification[self.xp.newaxis, ...])[0]
            word = "".join(map(self.loss_metrics.label_to_char, words))

        return word 
开发者ID:Bartzi,项目名称:see,代码行数:16,代码来源:text_rec_bbox_plotter.py


注:本文中的chainer.cuda.get_device_from_array方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。