当前位置: 首页>>代码示例>>Python>>正文


Python tensor.inc_subtensor方法代码示例

本文整理汇总了Python中theano.tensor.inc_subtensor方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.inc_subtensor方法的具体用法?Python tensor.inc_subtensor怎么用?Python tensor.inc_subtensor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.inc_subtensor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ctc_update_log_p

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:26,代码来源:theano_backend.py

示例2: accuracy_instance

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], \
        nb_classes=5, nb_samples_per_class=10, batch_size=1):
    accuracy_0 = theano.shared(np.zeros((batch_size, nb_samples_per_class), \
        dtype=theano.config.floatX))
    indices_0 = theano.shared(np.zeros((batch_size, nb_classes), \
        dtype=np.int32))
    batch_range = T.arange(batch_size)
    def step_(p, t, acc, idx):
        acc = T.inc_subtensor(acc[batch_range, idx[batch_range, t]], T.eq(p, t))
        idx = T.inc_subtensor(idx[batch_range, t], 1)
        return (acc, idx)
    (raw_accuracy, _), _ = theano.foldl(step_, sequences=[predictions.dimshuffle(1, 0), \
        targets.dimshuffle(1, 0)], outputs_info=[accuracy_0, indices_0])
    accuracy = T.mean(raw_accuracy / nb_classes, axis=0)

    return accuracy 
开发者ID:tristandeleu,项目名称:ntm-one-shot,代码行数:18,代码来源:metrics.py

示例3: test_wrong_broadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def test_wrong_broadcast(self):
        a = tt.col()
        increment = tt.vector()

        # These symbolic graphs legitimate, as long as increment has exactly
        # one element. So it should fail at runtime, not at compile time.
        rng = numpy.random.RandomState(utt.fetch_seed())

        def rng_randX(*shape):
            return rng.rand(*shape).astype(theano.config.floatX)

        for op in (tt.set_subtensor, tt.inc_subtensor):
            for base in (a[:], a[0]):
                out = op(base, increment)
                f = theano.function([a, increment], out)
                # This one should work
                f(rng_randX(3, 1), rng_randX(1))
                # These ones should not
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(2))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(3))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(0)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:26,代码来源:test_inc_subtensor.py

示例4: test_incsubtensor_mixed

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def test_incsubtensor_mixed():

    # This catches a bug that occurred when incrementing
    # a float32 tensor by a float64 tensor.
    # The result is defined to be float32, so it is OK
    # to downcast the float64 increment in order to
    # transfer it to the GPU.
    # The bug was that the optimization called GpuFromHost
    # without casting first, causing the optimization to
    # fail.
    X = tensor.fmatrix()
    Y = tensor.dmatrix()
    Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
    f = theano.function([X, Y], Z, mode=mode_with_gpu)
    packed, = f.maker.fgraph.inputs[1].clients
    client, idx = packed
    print(client)
    assert isinstance(client.op, tensor.Elemwise)
    assert isinstance(client.op.scalar_op, theano.scalar.Cast)
    packed, = client.outputs[0].clients
    client, idx = packed
    assert isinstance(client.op, cuda.GpuFromHost) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:24,代码来源:test_opt.py

示例5: test_inc_subtensor

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def test_inc_subtensor():
    shared = cuda.shared_constructor
    #shared = tensor.shared
    x, y = T.fmatrices('x', 'y')
    xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                      dtype='float32')
    yval = numpy.asarray([[10, 10, 10], [10, 10, 10], [10, 10, 10]],
                      dtype='float32')
    expr = T.inc_subtensor(x[:, 1:3], y[:, 1:3])

    f = theano.function([x, y], expr, mode=mode_with_gpu)

    assert sum([isinstance(node.op, cuda.GpuIncSubtensor) and
                node.op.set_instead_of_inc == False
                for node in f.maker.fgraph.toposort()]) == 1
    utt.assert_allclose(f(xval, yval), [[1., 12., 13.],
                                        [4., 15., 16.], [7., 18., 19.]]) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:19,代码来源:test_basic_ops.py

示例6: adagrad_update

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def adagrad_update(self, cost, learning_rate, eps=1e-8):
        params = [ p if p != self.slices else self.EMB for p in self.params ]
        accumulators = [ theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
                                                dtype=theano.config.floatX))
                         for p in params ]
        gparams = [ T.grad(cost, param) for param in self.params ]
        self.gparams = gparams
        updates = [ ]
        for param, gparam, acc in zip(self.params, gparams, accumulators):
            if param == self.slices:
                acc_slices = acc[self.x.flatten()]
                new_acc_slices = acc_slices + gparam**2
                updates.append( (acc, T.set_subtensor(acc_slices, new_acc_slices)) )
                updates.append( (self.EMB, T.inc_subtensor(param,
                                 - learning_rate * gparam / T.sqrt(new_acc_slices+eps))) )
            else:
                new_acc = acc + gparam**2
                updates.append( (acc, new_acc) )
                updates.append( (param, param - learning_rate * gparam /
                                    T.sqrt(new_acc + eps)) )
        return updates 
开发者ID:taolei87,项目名称:text_convnet,代码行数:23,代码来源:model.py

示例7: rmsprop

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def rmsprop(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
        v1 = np.float32(self.adapt_params[0])
        v2 = np.float32(1.0 - self.adapt_params[0])
        acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
        if sample_idx is None:
            acc_new = v1 * acc + v2 * grad ** 2
            updates[acc] = acc_new
        else:
            acc_s = acc[sample_idx]
            #            acc_new = v1 * acc_s + v2 * grad ** 2 #Faster, but inaccurate when an index occurs multiple times
            #            updates[acc] = T.set_subtensor(acc_s, acc_new) #Faster, but inaccurate when an index occurs multiple times
            updates[acc] = T.inc_subtensor(T.set_subtensor(acc_s, acc_s * v1)[sample_idx],
                                           v2 * grad ** 2)  # Slower, but accurate when an index occurs multiple times
            acc_new = updates[acc][sample_idx]  # Slower, but accurate when an index occurs multiple times
        gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX)
        return grad / gradient_scaling 
开发者ID:mquad,项目名称:sars_tutorial,代码行数:18,代码来源:gru4rec.py

示例8: create_adadelta_updates

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d 
开发者ID:yuanzh,项目名称:aspect_adversarial,代码行数:22,代码来源:optimization.py

示例9: fprop_step_mask

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def fprop_step_mask(self, state_below, mask, state_before, U):
        """
        Scan function for case using masks

        Parameters
        ----------
        : todo
        state_below : TheanoTensor
        """

        g_on = tensor.inc_subtensor(
            state_below[:, self.dim:],
            tensor.dot(state_before, U[:, self.dim:])
        )
        r_on = tensor.nnet.sigmoid(g_on[:, self.dim:2*self.dim])
        u_on = tensor.nnet.sigmoid(g_on[:, 2*self.dim:])

        z_t = tensor.tanh(
            g_on[:, :self.dim] +
            tensor.dot(r_on * state_before, U[:, :self.dim])
        )
        z_t = u_on * state_before + (1. - u_on) * z_t
        z_t = mask[:, None] * z_t + (1 - mask[:, None]) * state_before

        return z_t 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:27,代码来源:rnn.py

示例10: fprop_step

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def fprop_step(self, state_below, state_before, U):
        """
        Scan function for case without masks

        Parameters
        ----------
        : todo
        state_below : TheanoTensor
        """

        g_on = tensor.inc_subtensor(
            state_below[:, self.dim:],
            tensor.dot(state_before, U[:, self.dim:])
        )
        r_on = tensor.nnet.sigmoid(g_on[:, self.dim:2*self.dim])
        u_on = tensor.nnet.sigmoid(g_on[:, 2*self.dim:])

        z_t = tensor.tanh(
            g_on[:, :self.dim] +
            tensor.dot(r_on * state_before, U[:, :self.dim])
        )
        z_t = u_on * state_before + (1. - u_on) * z_t

        return z_t 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:26,代码来源:rnn.py

示例11: create_sgd_updates

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
    has_momentum = momentum.get_value() > 0.0
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            if has_momentum:
                acc_slices = get_similar_subtensor(acc, indexes, p)
                new_acc = acc_slices*momentum + g
                updates[acc] = T.set_subtensor(acc_slices, new_acc)
            else:
                new_acc = g
            updates[origin] = T.inc_subtensor(p, - lr * new_acc)
        else:
            if has_momentum:
                new_acc = acc*momentum + g
                updates[acc] = new_acc
            else:
                new_acc = g
            updates[p] = p - lr * new_acc 
开发者ID:taolei87,项目名称:rcnn,代码行数:21,代码来源:optimization.py

示例12: create_adagrad_updates

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            #acc_slices = acc[indexes]
            acc_slices = get_similar_subtensor(acc, indexes, p)
            new_acc = acc_slices + g**2
            updates[acc] = T.set_subtensor(acc_slices, new_acc)
            updates[origin] = T.inc_subtensor(p, \
                    - lr * (g / T.sqrt(new_acc + eps)))
        else:
            new_acc = acc + g**2
            updates[acc] = new_acc
            updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
            #updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
            # which one to use? 
开发者ID:taolei87,项目名称:rcnn,代码行数:18,代码来源:optimization.py

示例13: process

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def process(self, inputs):
        """
        Process a set of inputs and return the final state

        Params:
            input_words: List of input indices. Should be an int tensor of shape (n_batch, input_len)

        Returns: repr_vect, node_vects
            repr_vect: The final representation vector, of shape (n_batch, output_width)
            node_vects: Direct-access vects for each node id, of shape (n_batch, num_node_ids, output_width)
        """
        n_batch, input_len = inputs.shape
        valseq = inputs.dimshuffle([1,0])
        one_hot_vals = T.extra_ops.to_one_hot(inputs.flatten(), self._num_words)\
                    .reshape([n_batch, input_len, self._num_words])
        one_hot_valseq = one_hot_vals.dimshuffle([1,0,2])

        def scan_fn(idx_ipt, onehot_ipt, last_accum, last_state):
            # last_accum stores accumulated outputs per word type
            # and is of shape (n_batch, word_idx, output_width)
            gru_state = self._gru.step(onehot_ipt, last_state)
            new_accum = T.inc_subtensor(last_accum[T.arange(n_batch), idx_ipt, :], gru_state)
            return new_accum, gru_state

        outputs_info = [T.zeros([n_batch, self._num_words, self._output_width]), self._gru.initial_state(n_batch)]
        (all_accum, all_out), _ = theano.scan(scan_fn, sequences=[valseq, one_hot_valseq], outputs_info=outputs_info)
        
        # all_out is of shape (input_len, n_batch, self.output_width). We want last timestep
        repr_vect = all_out[-1,:,:]

        final_accum = all_accum[-1,:,:,:]
        # Now we also want to extract and accumulate the outputs that directly map to each word
        # We can do this by multipying the final accum's second dimension (word_idx) through by
        # the word_node_matrix
        resh_flat_final_accum = final_accum.dimshuffle([0,2,1]).reshape([-1, self._num_words])
        resh_flat_node_mat = T.dot(resh_flat_final_accum, self._word_node_matrix)
        node_vects = resh_flat_node_mat.reshape([n_batch, self._output_width, self._num_node_ids]).dimshuffle([0,2,1])

        return repr_vect, node_vects 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:41,代码来源:input_sequence_direct.py

示例14: EmbeddingSGD

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def EmbeddingSGD(cost, embedding_matrix, lr=0.01, used_embeddings=None):
    new_values = OrderedDict()

    if used_embeddings:
        grads = T.grad(cost, wrt=used_embeddings)
        new_value = (used_embeddings,
                     T.inc_subtensor(used_embeddings, -lr * grads))
    else:
        new_values = SGD(cost, [embedding_matrix], lr)
        new_value = (embedding_matrix, new_values[embedding_matrix])

    return new_value 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:14,代码来源:blocks.py

示例15: test_simple_2d

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inc_subtensor [as 别名]
def test_simple_2d(self):
        """Increments or sets part of a tensor by a scalar using full slice and
        a partial slice depending on a scalar.
        """
        a = tt.dmatrix()
        increment = tt.dscalar()
        sl1 = slice(None)
        sl2_end = tt.lscalar()
        sl2 = slice(sl2_end)

        for do_set in [False, True]:

            if do_set:
                resut = tt.set_subtensor(a[sl1, sl2], increment)
            else:
                resut = tt.inc_subtensor(a[sl1, sl2], increment)

            f = theano.function([a, increment, sl2_end], resut)

            val_a = numpy.ones((5, 5))
            val_inc = 2.3
            val_sl2_end = 2

            result = f(val_a, val_inc, val_sl2_end)

            expected_result = numpy.copy(val_a)
            if do_set:
                expected_result[:, :val_sl2_end] = val_inc
            else:
                expected_result[:, :val_sl2_end] += val_inc

            utt.assert_allclose(result, expected_result) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:34,代码来源:test_inc_subtensor.py


注:本文中的theano.tensor.inc_subtensor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。