当前位置: 首页>>代码示例>>Python>>正文


Python Connector.fprop方法代码示例

本文整理汇总了Python中quagga.connector.Connector.fprop方法的典型用法代码示例。如果您正苦于以下问题:Python Connector.fprop方法的具体用法?Python Connector.fprop怎么用?Python Connector.fprop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在quagga.connector.Connector的用法示例。


在下文中一共展示了Connector.fprop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: RepeatBlock

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
class RepeatBlock(object):
    def __init__(self, x, repeats, axis=None, device_id=None):
        self.context = Context(device_id)
        device_id = self.context.device_id
        self.repeats = repeats
        self.axis = axis
        learning = x.bpropagable
        if learning:
            self.x, self.dL_dx = x.register_usage(device_id, device_id)
        else:
            self.x = x.register_usage(device_id)
        if axis == 0:
            self.output = Matrix.empty(x.nrows * repeats, x.ncols, x.dtype, device_id)
        elif axis == 1:
            self.output = Matrix.empty(x.nrows, x.ncols * repeats, x.dtype, device_id)
        else:
            raise ValueError('TODO')
        self.output = Connector(self.output, device_id if learning else None)

    def fprop(self):
        self.output.assign_repeat(self.context, self.x, self.repeats, self.axis)
        self.output.fprop()

    def bprop(self):
        if hasattr(self, 'dL_dx'):
            self.dL_dx.add_repeat_derivative(self.context, self.output.backward_matrix, self.repeats, self.axis)
开发者ID:Sandy4321,项目名称:quagga,代码行数:28,代码来源:RepeatBlock.py

示例2: test_bprop

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
    def test_bprop(self):
        r = []
        for i in xrange(self.N):
            repeats = self.rng.random_integers(42)
            axis = self.rng.randint(2)
            input_dim, output_dim = self.rng.random_integers(2000, size=2)
            x = self.get_normal_matrix(input_dim, output_dim)
            input_dim = input_dim if axis else input_dim * repeats
            true_labels = self.rng.randint(output_dim, size=(input_dim, 1)).astype(np.int32)
            device_id = 0

            output = {}
            for processor_type in ['gpu', 'cpu']:
                quagga.processor_type = processor_type
                qx = Connector(Matrix.from_npa(x), device_id)
                qtrue_labels = Connector(Matrix.from_npa(true_labels))
                repeat_block = RepeatBlock(qx, repeats, axis)
                sce_block = SoftmaxCeBlock(repeat_block.output, qtrue_labels)
                qx.fprop()
                qtrue_labels.fprop()
                repeat_block.fprop()
                sce_block.fprop()
                sce_block.bprop()
                repeat_block.bprop()
                output[processor_type] = qx.backward_matrix.to_host()

            r.append(np.allclose(output['gpu'], output['cpu']))

        self.assertEqual(sum(r), len(r))
开发者ID:Sandy4321,项目名称:quagga,代码行数:31,代码来源:test_RepeatBlock.py

示例3: ArgmaxBlock

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
class ArgmaxBlock(object):
    """
    Determines argmax values along the specified ``axis`` in the input matrix.
    The block returns a vector (matrix with one of its dimensions equals 1) of
    argmax values.


    Parameters
    ----------
    x : Matrix (GpuMatrix or CpuMatrix)
        Block's input
    axis : int
        Axis along which argmax is determined
    device_id : int
        Defines the device's id on which the computation will take place

    Returns
    -------
    vector
        A vector containing argmax values (e.g. argmax for each row if axis == 1).
    """
    def __init__(self, x, axis, device_id=None):
        if axis != 1:
            raise NotImplementedError
        self.axis = axis
        self.context = Context(device_id)
        device_id = self.context.device_id

        self.x = x.register_usage(device_id)
        self.output = Connector(Matrix.empty(x.nrows, 1, x.dtype, device_id))

    def fprop(self):
        self.x.argmax(self.context, self.output, self.axis)
        self.output.fprop()
开发者ID:Sandy4321,项目名称:quagga,代码行数:36,代码来源:ArgmaxBlock.py

示例4: test_theano_fprop_vector

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
    def test_theano_fprop_vector(self):
        r = []
        for _ in xrange(self.N):
            embd_dim = self.rng.random_integers(10000)
            batch_size, output_dim = self.rng.random_integers(2000, size=2)
            W = self.get_orthogonal_matrix(embd_dim, output_dim)
            row_idxs = self.rng.randint(embd_dim, size=(batch_size, 1)).astype(np.int32)

            quagga.processor_type = 'gpu'
            qrow_idxs = Connector(Matrix.from_npa(row_idxs))
            qW = Connector(Matrix.from_npa(W))
            row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
            qW.fprop()
            qrow_idxs.fprop()
            row_slicing_block.fprop()
            q_output = row_slicing_block.output.to_host()

            trow_idxs = T.ivector()
            row_slicing_layer = RowSlicingLayer(W)
            t_output = row_slicing_layer.get_output_expr(trow_idxs)
            t_output = theano.function([trow_idxs], t_output)(row_idxs[:, 0])

            r.append(np.allclose(q_output, t_output))

        self.assertEqual(sum(r), len(r))
开发者ID:Sandy4321,项目名称:quagga,代码行数:27,代码来源:test_RowSlicingBlock.py

示例5: test_bprop_vector

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
    def test_bprop_vector(self):
        r = []
        for _ in xrange(self.N):
            embd_dim = self.rng.random_integers(10000)
            batch_size, output_dim = self.rng.random_integers(2000, size=2)
            W = self.get_orthogonal_matrix(embd_dim, output_dim)
            row_idxs = self.rng.randint(embd_dim, size=(batch_size, 1)).astype(np.int32)
            true_labels = self.rng.randint(output_dim, size=(batch_size, 1)).astype(np.int32)
            device_id = 0

            output = {}
            for processor_type in ['gpu', 'cpu']:
                quagga.processor_type = processor_type
                qrow_idxs = Connector(Matrix.from_npa(row_idxs))
                qtrue_labels = Connector(Matrix.from_npa(true_labels))
                qW = Connector(Matrix.from_npa(W), device_id)
                row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
                sce_block = SoftmaxCeBlock(row_slicing_block.output, qtrue_labels)
                qW.fprop()
                qrow_idxs.fprop()
                row_slicing_block.fprop()
                sce_block.fprop()
                sce_block.bprop()
                row_slicing_block.bprop()
                qW.add(Context(), qW.backward_matrix)
                output[processor_type] = qW.to_host()

            r.append(np.allclose(output['gpu'], output['cpu']))

        self.assertEqual(sum(r), len(r))
开发者ID:Sandy4321,项目名称:quagga,代码行数:32,代码来源:test_RowSlicingBlock.py

示例6: test_theano_fprop_matrix

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
    def test_theano_fprop_matrix(self):
        r = []
        for i in xrange(self.N):
            max_input_sequence_len = self.rng.random_integers(300)
            sequence_len = max_input_sequence_len if i == 0 else self.rng.random_integers(max_input_sequence_len)
            embd_dim = self.rng.random_integers(10000)
            batch_size = self.rng.random_integers(500)
            output_dim = self.rng.random_integers(2000)
            W = self.get_orthogonal_matrix(embd_dim, output_dim)
            row_idxs = self.rng.randint(embd_dim, size=(batch_size, max_input_sequence_len)).astype(np.int32)

            quagga.processor_type = 'gpu'
            qrow_idxs = Connector(Matrix.from_npa(row_idxs))
            qW = Connector(Matrix.from_npa(W))
            row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
            qW.fprop()
            qrow_idxs.ncols = sequence_len
            qrow_idxs.fprop()
            row_slicing_block.fprop()
            q_output = row_slicing_block.output.to_host()

            th_row_idxs = T.imatrix()
            row_slicing_layer = RowSlicingLayer(W)
            toutput = row_slicing_layer.get_output_expr(th_row_idxs)
            th_output = theano.function([th_row_idxs], toutput)(row_idxs)

            for i in xrange(sequence_len):
                r.append(np.allclose(q_output[i], th_output[i]))

        self.assertEqual(sum(r), len(r))
开发者ID:Sandy4321,项目名称:quagga,代码行数:32,代码来源:test_RowSlicingBlock.py

示例7: test_fprop_matrix

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
    def test_fprop_matrix(self):
        """
        compare `fprop` results for cpu and gpu backends
        """
        r = []
        for i in xrange(self.N):
            max_input_sequence_len = self.rng.random_integers(300)
            sequence_len = max_input_sequence_len if i == 0 else self.rng.random_integers(max_input_sequence_len)
            embd_dim = self.rng.random_integers(10000)
            batch_size, output_dim = self.rng.random_integers(2000, size=2)
            W = self.get_orthogonal_matrix(embd_dim, output_dim)
            row_idxs = self.rng.randint(embd_dim, size=(batch_size, max_input_sequence_len)).astype(np.int32)

            output = {}
            for processor_type in ['gpu', 'cpu']:
                quagga.processor_type = processor_type
                qrow_idxs = Connector(Matrix.from_npa(row_idxs))
                qW = Connector(Matrix.from_npa(W))
                row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
                qW.fprop()
                qrow_idxs.ncols = sequence_len
                qrow_idxs.fprop()
                row_slicing_block.fprop()
                output[processor_type] = row_slicing_block.output.to_host()

            for output_gpu, output_cpu in izip(output['gpu'], output['cpu']):
                r.append(np.allclose(output_gpu, output_cpu))

        self.assertEqual(sum(r), len(r))
开发者ID:Sandy4321,项目名称:quagga,代码行数:31,代码来源:test_RowSlicingBlock.py

示例8: LastSelectorBlock

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
class LastSelectorBlock(object):
    """
    TODO(igor).

    Parameters
    ----------
    x : Matrix (GpuMatrix or CpuMatrix)

    """
    def __init__(self, x):
        device_id = x[0].device_id
        learning = x[0].bpropagable
        self.context = Context(device_id)
        self.output = Matrix.empty_like(x[0])
        self.output = Connector(self.output, device_id if learning else None)
        if learning:
            self.x, self.dL_dx = izip(*x.register_usage(device_id, device_id))
        else:
            self.x = x.register_usage(device_id)
        self.last_idx = x.length - 1

    def fprop(self):
        self.output.assign(self.context, self.x[self.last_idx])
        self.output.fprop()

    def bprop(self):
        self.dL_dx[self.last_idx].add(self.context, self.output.backward_matrix)
开发者ID:Sandy4321,项目名称:quagga,代码行数:29,代码来源:LastSelectorBlock.py

示例9: test_fprop

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
    def test_fprop(self):
        """
        compare `fprop` results for cpu and gpu backends
        """
        r = []
        for i in xrange(self.N):
            batch_size, x_dim = self.rng.random_integers(3000, size=2)
            x = self.rng.rand(batch_size, x_dim).astype(np.float32)

            for nonlinearity in ['sigmoid', 'tanh', 'relu']:
                state = self.rng.get_state()
                quagga.processor_type = 'gpu'
                x_gpu = Connector(Matrix.from_npa(x))
                nonlinearity_block = NonlinearityBlock(x_gpu, nonlinearity)
                x_gpu.fprop()
                nonlinearity_block.fprop()
                output_gpu = nonlinearity_block.output.to_host()

                self.rng.set_state(state)
                quagga.processor_type = 'cpu'
                x_cpu = Connector(Matrix.from_npa(x))
                nonlinearity_block = NonlinearityBlock(x_cpu, nonlinearity)
                x_cpu.fprop()
                nonlinearity_block.fprop()
                output_cpu = nonlinearity_block.output.to_host()

                r.append(np.allclose(output_gpu, output_cpu))

        self.assertEqual(sum(r), len(r))
开发者ID:Sandy4321,项目名称:quagga,代码行数:31,代码来源:test_NonlinearityBlock.py

示例10: NonlinearityBlock

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
class NonlinearityBlock(object):
    """
    Applies nonlinear functions (``sigmoid``, ``tahn``, ``relu``) on input.

    Parameters
    ----------
    x : Matrix (GpuMatrix or CpuMatrix)
    nonlinearity : string
    device_id : int
    """

    def __init__(self, x, nonlinearity, device_id=None):
        """


        """
        self.f_context = Context(device_id)
        device_id = self.f_context.device_id
        self.learning = x.bpropagable
        if self.learning:
            self.b_context = Context(device_id)
            self.x, self.dL_dx = x.register_usage(device_id, device_id)
            self._df_dpref = Matrix.empty_like(self.x, device_id)
        else:
            self.x = x.register_usage(device_id)
        output = Matrix.empty_like(x, device_id)
        self.output = Connector(output, device_id if self.learning else None)
        if nonlinearity == "sigmoid":
            self.f = self.x.sigmoid
        elif nonlinearity == "tanh":
            self.f = self.x.tanh
        elif nonlinearity == "relu":
            self.f = self.x.relu
        elif nonlinearity == "softmax":
            raise ValueError("For softmax nonlinearity use SoftmaxBlock!")
        else:
            raise ValueError("TODO!")
        self.training_mode = True

    @property
    def df_dpref(self):
        if self.training_mode and self.learning:
            return self._df_dpref

    def fprop(self):
        self.f(self.f_context, self.output, self.df_dpref)
        self.output.fprop()

    def bprop(self):
        if hasattr(self, "dL_dx"):
            # dL/dpref = dL/df .* df/dpref
            dL_df = self.output.backward_matrix
            self.dL_dx.add_hprod(self.b_context, dL_df, self.df_dpref)

    def set_training_mode(self):
        self.training_mode = True

    def set_testing_mode(self):
        self.training_mode = False
开发者ID:yiiwood,项目名称:quagga,代码行数:61,代码来源:NonlinearityBlock.py

示例11: PtbMiniBatchesGenerator

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
class PtbMiniBatchesGenerator(object):
    def __init__(self, ptb_train, ptb_valid, batch_size, sentence_max_len, device_id):
        self.blocking_contexts = None
        self.context = Context(device_id)
        device_id = self.context.device_id
        self.train_offsets = HomogeneousDataGenerator(ptb_train, batch_size, sentence_max_len, randomize=True, infinite=True)
        self.valid_offsets = HomogeneousDataGenerator(ptb_valid, batch_size, sentence_max_len)

        train_sentences = np.array([self.train_offsets.flatten_sentences])
        valid_sentences = np.array([self.valid_offsets.flatten_sentences])
        self.train_sents = Matrix.from_npa(train_sentences, 'int', device_id)
        self.valid_sents = Matrix.from_npa(valid_sentences, 'int', device_id)
        self._sent_lengths = np.empty((batch_size, 1), dtype=np.int32, order='F')[...]
        self.sent_lengths = Matrix.from_npa(self._sent_lengths, device_id=device_id)

        sentence_batch = Matrix.empty(batch_size, sentence_max_len, 'int', device_id)
        self.sentence_batch = Connector(sentence_batch, self.context)
        self.sentence_batch.sync_fill(0)

        self._mask = Matrix.empty(sentence_batch.nrows, self.sentence_batch.ncols, 'float', device_id)
        self.mask = List([Connector(self._mask[:, i]) for i in xrange(sentence_max_len)], self.sentence_batch.ncols)
        self.train_offsets_iterator = iter(self.train_offsets)
        self.valid_offsets_iterator = iter(self.valid_offsets)
        self.training_mode = True

    def set_training_mode(self):
        self.training_mode = True

    def set_testing_mode(self):
        self.training_mode = False

    def fprop(self):
        if self.training_mode:
            offsets = next(self.train_offsets_iterator)
            sents = self.train_sents
        else:
            try:
                offsets = next(self.valid_offsets_iterator)
                sents = self.valid_sents
            except StopIteration as e:
                self.valid_offsets_iterator = iter(self.valid_offsets)
                raise e
        self.context.wait(*self.blocking_contexts)
        self._sent_lengths = self._sent_lengths.base[:len(offsets)]
        self.sentence_batch.nrows = len(offsets)
        for k, offset in enumerate(offsets):
            self.sentence_batch[k].assign(self.context, sents[:, offset[0]:offset[1]])
            self._sent_lengths[k] = offset[1] - offset[0]
        max_sent_len = int(np.max(self._sent_lengths))
        self.sentence_batch.last_modification_context = self.context
        self.sentence_batch.ncols = max_sent_len
        self.sent_lengths.assign_npa(self.context, self._sent_lengths)
        self._mask.mask_column_numbers_row_wise(self.context, self.sent_lengths)
        for e in self.mask:
            e.last_modification_context = self.context
        self.sentence_batch.fprop()
        self.mask.fprop()
开发者ID:Sandy4321,项目名称:quagga,代码行数:59,代码来源:bidirectional_lstm.py

示例12: MeanPoolingBlock

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
class MeanPoolingBlock(object):
    """
    MeanPoolingBlock pools matrix along the specified axis. Can handle matrix with
    varying number of columns. Number of rows is fixed.

    Parameters
    ----------
    matrix : Matrix (GpuMatrix or CpuMatrix)
    axis : int
    device_id : int
        Defines the device's id on which the computation will take place

    Returns
    -------
    """

    def __init__(self, matrix, axis=1, device_id=None):
        self.context = Context(device_id)
        self._ctype = matrix.c_dtype
        self._zero = self._ctype(0.0)
        if axis == 0:
            self._ones = Matrix.empty(1, matrix.nrows, matrix.dtype, device_id)
            self.output = Matrix.empty(1, matrix.ncols, matrix.dtype, device_id)
            self.alpha = self._ctype(1.0 / matrix.nrows)
        elif axis == 1:
            self._ones = Matrix.empty(matrix.ncols, 1, matrix.dtype, device_id)
            self.output = Matrix.empty(matrix.nrows, 1, matrix.dtype, device_id)
            self.alpha = None
        else:
            raise ValueError('Invalid axis!')
        self._ones.sync_fill(1.0)
        self.axis = axis

        if matrix.bpropagable:
            self.matrix, self.dL_dmatrix = matrix.register_usage(self.context, self.context)
            self.output = Connector(self.output, self.context, self.context)
        else:
            self.matrix = matrix.register_usage(self.context)
            self.output = Connector(self.output, self.context)

    def fprop(self):
        if self.axis == 0:
            self.output.ncols = self.matrix.ncols
            self.output.add_dot(self.context, self._ones, self.matrix, alpha=self.alpha, beta=self._zero)
        else:
            self._ones.nrows = self.matrix.ncols
            self.alpha = self._ctype(1.0 / self.matrix.ncols)
            self.output.add_dot(self.context, self.matrix, self._ones, alpha=self.alpha, beta=self._zero)
        self.output.fprop()

    def bprop(self):
        dL_doutput = self.output.backward_matrix
        dL_doutput.scale(self.context, self.alpha)
        if hasattr(self, 'dL_dmatrix'):
            self.dL_dmatrix.tile(self.context, self.axis, dL_doutput)
开发者ID:Sandy4321,项目名称:quagga,代码行数:57,代码来源:MeanPoolingBlock.py

示例13: test_bprop

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
    def test_bprop(self):
        """
        compare `fprop` results for cpu and gpu backends
        """

        r = []
        for i in xrange(self.N):
            max_input_sequence_len = self.rng.random_integers(500)
            sequence_len = max_input_sequence_len if i == 0 else self.rng.random_integers(max_input_sequence_len)
            batch_size = self.rng.random_integers(256)
            input_dim, hidden_dim = self.rng.random_integers(1500, size=2)
            x = [self.rng.randn(batch_size, input_dim).astype(np.float32) for _ in xrange(max_input_sequence_len)]
            true_labels = [self.rng.randint(hidden_dim, size=(batch_size, 1)).astype(np.int32) for _ in xrange(max_input_sequence_len)]
            W = self.get_orthogonal_matrix(input_dim, hidden_dim)
            b = self.rng.rand(1, hidden_dim).astype(np.float32)
            device_id = 0

            quagga_grads = {}
            for reverse in [False, True]:
                for with_bias in [False, True]:
                    for processor_type in ['gpu', 'cpu']:
                        quagga.processor_type = processor_type
                        qx = List([Connector(Matrix.from_npa(e), device_id) for e in x])
                        qtrue_labels = List([Connector(Matrix.from_npa(e)) for e in true_labels], len(qx))
                        qW = Connector(Matrix.from_npa(W), device_id)
                        qb = Connector(Matrix.from_npa(b), device_id) if with_bias else None
                        seq_dot_block = SequencerBlock(block_class=DotBlock,
                                                       params=[qW, qb],
                                                       sequences=[qx],
                                                       output_names=['output'],
                                                       reverse=reverse)
                        seq_sce_block = SequencerBlock(block_class=SoftmaxCeBlock,
                                                       params=[],
                                                       sequences=[seq_dot_block.output, qtrue_labels],
                                                       reverse=reverse)
                        qx.length = sequence_len
                        qx.fprop()
                        qtrue_labels.fprop()
                        qW.fprop()
                        if qb:
                            qb.fprop()
                        seq_dot_block.fprop()
                        seq_sce_block.fprop()
                        seq_sce_block.bprop()
                        seq_dot_block.bprop()
                        quagga_grads[processor_type] = [qW.backward_matrix.to_host()]
                        if with_bias:
                            quagga_grads[processor_type].append(qb.backward_matrix.to_host())
                        quagga_grads[processor_type].extend(e.backward_matrix.to_host() for e in qx)

                    for grad_gpu, grad_cpu in izip(quagga_grads['gpu'], quagga_grads['cpu']):
                        r.append(np.allclose(grad_gpu, grad_cpu, atol=1e-5))

        self.assertEqual(sum(r), len(r))
开发者ID:Sandy4321,项目名称:quagga,代码行数:56,代码来源:test_SequentialDotBlock.py

示例14: SoftmaxCeBlock

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
class SoftmaxCeBlock(object):
    """
    Softmax nonlinearity with mean cross entropy loss
    """

    def __init__(self, x, true_labels, mask=None, device_id=None):
        self.context = Context(device_id)
        device_id = self.context.device_id
        if x.bpropagable:
            self.x, self.dL_dx = x.register_usage(device_id, device_id)
        else:
            self.x = x.register_usage(device_id)
        self.true_labels = true_labels.register_usage(device_id)
        if mask:
            self.mask = mask.register_usage(device_id)
        self.probs = Connector(Matrix.empty_like(self.x))
        self.loss = None

    def fprop(self):
        self.x.softmax(self.context, self.probs)
        self.probs.fprop()

    def bprop(self):
        if not hasattr(self, 'dL_dx'):
            return
        # error = (probs - true_labels) / M
        if self.true_labels.dtype == 'int':
            self.dL_dx.add_softmax_ce_derivative(self.context, self.probs, self.true_labels)
        else:
            self.dL_dx.add_scaled_subtraction(self.context, 1. / self.probs.nrows, self.probs, self.true_labels)
        if hasattr(self, 'mask'):
            self.dL_dx.hprod(self.context, self.mask)

    def calculate_loss(self, context):
        true_labels_np = self.true_labels.to_host(context)
        probs_np = self.probs.to_host(context)
        if hasattr(self, 'mask'):
            mask = self.mask.to_host(context)
            context.add_callback(self._calculate_ce_loss, true_labels_np, probs_np, mask)
        else:
            context.add_callback(self._calculate_ce_loss, true_labels_np, probs_np)

    def _calculate_ce_loss(self, true_labels_np, probs_np, mask=None):
        if self.true_labels.dtype == 'int':
            idxs = range(probs_np.shape[0]), true_labels_np.flatten()
            logs = np.log(probs_np[idxs] + 1e-20)
        else:
            logs = np.log(np.sum(true_labels_np * probs_np, axis=1) + 1e-20)
        if mask is not None:
            logs *= mask[:, 0]
            self.loss = - np.sum(logs) / np.sum(mask)
        else:
            self.loss = - np.mean(logs)
开发者ID:Sandy4321,项目名称:quagga,代码行数:55,代码来源:SoftmaxCeBlock.py

示例15: DataBlock

# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import fprop [as 别名]
class DataBlock(object):
    def __init__(self, word_to_idx, device_id):
        self.context = Context(device_id)
        device_id = self.context.device_id
        self.word_idx = Connector(Matrix.empty(1, 1, 'int', device_id))
        self.word_to_idx = word_to_idx
        self.word = None

    def fprop(self):
        word_npa = np.zeros((1, 1), np.int32, 'F')
        word_npa[0][0] = self.word_to_idx[self.word] if self.word in self.word_to_idx else self.word_to_idx['<UNK>']
        self.word_idx.assign_npa(self.context, word_npa)
        self.word_idx.fprop()
开发者ID:Sandy4321,项目名称:quagga,代码行数:15,代码来源:get_codes.py


注:本文中的quagga.connector.Connector.fprop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。