當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.Rop方法代碼示例

本文整理匯總了Python中theano.tensor.Rop方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.Rop方法的具體用法?Python tensor.Rop怎麽用?Python tensor.Rop使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.Rop方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_multiple_outputs

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def test_multiple_outputs(self):
        m = tensor.matrix('m')
        v = tensor.vector('v')
        m_ = tensor.matrix('m_')
        v_ = tensor.vector('v_')

        mval = self.rng.uniform(size=(3, 7)).astype(theano.config.floatX)
        vval = self.rng.uniform(size=(7,)).astype(theano.config.floatX)
        m_val = self.rng.uniform(size=(3, 7)).astype(theano.config.floatX)
        v_val = self.rng.uniform(size=(7,)).astype(theano.config.floatX)

        rop_out1 = tensor.Rop([m, v, m + v], [m, v], [m_, v_])
        assert isinstance(rop_out1, list)
        assert len(rop_out1) == 3
        rop_out2 = tensor.Rop((m, v, m + v), [m, v], [m_, v_])
        assert isinstance(rop_out2, tuple)
        assert len(rop_out2) == 3

        all_outs = []
        for o in rop_out1, rop_out2:
            all_outs.extend(o)
        f = theano.function([m, v, m_, v_], all_outs)
        f(mval, vval, m_val, v_val) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:25,代碼來源:test_rop.py

示例2: hessian_times_vector

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def hessian_times_vector(gradient, parameter, vector, r_op=False):
    """Return an expression for the Hessian times a vector.

    Parameters
    ----------
    gradient : :class:`~tensor.TensorVariable`
        The gradient of a cost with respect to `parameter`
    parameter : :class:`~tensor.TensorVariable`
        The parameter with respect to which to take the gradient
    vector : :class:`~tensor.TensorVariable`
        The vector with which to multiply the Hessian
    r_op : bool, optional
        Whether to use :func:`~tensor.gradient.Rop` or not. Defaults to
        ``False``. Which solution is fastest normally needs to be
        determined by profiling.

    """
    if r_op:
        return tensor.Rop(gradient, parameter, vector)
    return tensor.grad(tensor.sum(gradient * vector), parameter) 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:22,代碼來源:theano_expressions.py

示例3: __call__

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def __call__(self, v, cost, parameters, damp):
        # compute Gauss-Newton Matrix right-multiplied by `v`
        Jv = T.Rop(self._s, parameters, v)
        HJv = T.grad(T.sum(T.grad(cost, self._s) * Jv), self._s,
                     consider_constant=[Jv])
        JHJv = T.grad(T.sum(HJv * self._s), parameters,
                      consider_constant=[HJv, Jv])

        # apply Tikhonov damping
        JHJv = [JHJvi + damp * vi for JHJvi, vi in zip(JHJv, v)]
        return JHJv 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:13,代碼來源:test_scan_opt.py

示例4: check_nondiff_rop

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def check_nondiff_rop(self, y):
        """ If your op is not differentiable(so you can't define Rop)
        test that an error is raised."""
        raised = False
        try:
            tensor.Rop(y, self.x, self.v)
        except ValueError:
            raised = True
        if not raised:
            self.fail((
                'Op did not raise an error even though the function'
                ' is not differentiable')) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:14,代碼來源:test_rop.py

示例5: test_invalid_input

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def test_invalid_input(self):
        success = False

        try:
            tensor.Rop(0., [tensor.matrix()], [tensor.vector()])
            success = True
        except ValueError:
            pass

        assert not success 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:12,代碼來源:test_rop.py

示例6: test_Rop_dot_bug_18Oct2013_Jeremiah

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def test_Rop_dot_bug_18Oct2013_Jeremiah(self):
        # This test refers to a bug reported by Jeremiah Lowin on 18th Oct
        # 2013. The bug consists when through a dot operation there is only
        # one differentiable path (i.e. there is no gradient wrt to one of
        # the inputs).
        x = tensor.arange(20.0).reshape([1, 20])
        v = theano.shared(numpy.ones([20]))
        d = tensor.dot(x, v).sum()
        tensor.Rop(tensor.grad(d, v), v, v) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:11,代碼來源:test_rop.py

示例7: check_nondiff_rop

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def check_nondiff_rop(self, y):
        """ If your op is not differentiable(so you can't define Rop)
        test that an error is raised."""
        raised = False
        try:
            tmp = tensor.Rop(y, self.x, self.v)
        except ValueError:
            raised = True
        if not raised:
            self.fail((
                'Op did not raise an error even though the function'
                ' is not differentiable')) 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:14,代碼來源:test_rop.py

示例8: _get_updates_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def _get_updates_for(self, param, grad):
        D_tm1 = util.shared_like(param, 'D_ewma')
        v = self.rng.normal(param.shape)
        if self.hv_method == 'rop':
            Hv = TT.Rop(grad, param, v)
        if self.hv_method == 'lop':
            Hv = TT.Lop(grad, param, v)
        if self.hv_method == 'grad':
            Hv = TT.grad(TT.sum(grad * v), param)
        D_t = self.ewma * D_tm1 + (1 - self.ewma) * Hv * Hv
        denom = TT.sqrt(D_t) + self.epsilon
        yield D_tm1, D_t
        yield param, grad * self.learning_rate / denom 
開發者ID:lmjohns3,項目名稱:downhill,代碼行數:15,代碼來源:adaptive.py

示例9: test_theano_operator

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def test_theano_operator():
    """Test the ODL->Theano operator wrapper."""
    # Define ODL operator
    matrix = np.random.rand(3, 2)
    odl_op = odl.MatrixOperator(matrix)

    # Define evaluation points
    x = [1., 2.]
    dy = [1., 2., 3.]

    # Create Theano placeholders
    x_theano = T.dvector()
    dy_theano = T.dvector()

    # Create Theano layer from odl operator
    odl_op_layer = odl.contrib.theano.TheanoOperator(odl_op)

    # Build computation graphs
    y_theano = odl_op_layer(x_theano)
    y_theano_func = theano.function([x_theano], y_theano)
    dy_theano_func = theano.function([x_theano, dy_theano],
                                     T.Rop(y_theano, x_theano, dy_theano))

    # Evaluate using Theano
    result = y_theano_func(x)
    expected = odl_op(x)

    assert all_almost_equal(result, expected)

    # Evaluate the adjoint of the derivative, called gradient in Theano
    result = dy_theano_func(x, dy)
    expected = odl_op.derivative(x).adjoint(dy)

    assert all_almost_equal(result, expected) 
開發者ID:odlgroup,項目名稱:odl,代碼行數:36,代碼來源:theano_test.py

示例10: test_rop_lop

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def test_rop_lop():
    mx = tensor.matrix('mx')
    mv = tensor.matrix('mv')
    v = tensor.vector('v')
    y = matrix_inverse(mx).sum(axis=0)

    yv = tensor.Rop(y, mx, mv)
    rop_f = function([mx, mv], yv)

    sy, _ = theano.scan(lambda i, y, x, v: (tensor.grad(y[i], x) * v).sum(),
                        sequences=tensor.arange(y.shape[0]),
                        non_sequences=[y, mx, mv])
    scan_f = function([mx, mv], sy)

    rng = numpy.random.RandomState(utt.fetch_seed())
    vx = numpy.asarray(rng.randn(4, 4), theano.config.floatX)
    vv = numpy.asarray(rng.randn(4, 4), theano.config.floatX)

    v1 = rop_f(vx, vv)
    v2 = scan_f(vx, vv)

    assert _allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))

    raised = False
    try:
        tensor.Rop(
            theano.clone(y, replace={mx: break_op(mx)}),
            mx,
            mv)
    except ValueError:
        raised = True
    if not raised:
        raise Exception((
            'Op did not raised an error even though the function'
            ' is not differentiable'))

    vv = numpy.asarray(rng.uniform(size=(4,)), theano.config.floatX)
    yv = tensor.Lop(y, mx, v)
    lop_f = function([mx, v], yv)

    sy = tensor.grad((v * y).sum(), mx)
    scan_f = function([mx, v], sy)

    v1 = lop_f(vx, vv)
    v2 = scan_f(vx, vv)
    assert _allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2)) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:48,代碼來源:test_linalg.py

示例11: check_mat_rop_lop

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def check_mat_rop_lop(self, y, out_shape):
        """ Test the Rop/Lop when input is a matrix and the output is a vector

        :param y: the output variable of the op applied to self.mx
        :param out_shape: Used to generate a random tensor
                          corresponding to the evaluation point of the Rop
                          (i.e. the tensor with which you multiply the
                          Jacobian). It should be a tuple of ints.

        If the Op has more than 1 input, one of them must be mx, while
        others must be shared variables / constants. We will test only
        against the input self.mx, so you must call
        check_mat_rop_lop/check_rop_lop for the other inputs.

        We expect all inputs/outputs have dtype floatX.

        If you want to test an Op with an output matrix, add a sum
        after the Op you want to test.
        """
        vx = numpy.asarray(self.rng.uniform(size=self.mat_in_shape),
                           theano.config.floatX)
        vv = numpy.asarray(self.rng.uniform(size=self.mat_in_shape),
                           theano.config.floatX)
        yv = tensor.Rop(y, self.mx, self.mv)
        rop_f = function([self.mx, self.mv], yv, on_unused_input='ignore')
        sy, _ = theano.scan(lambda i, y, x, v:
                            (tensor.grad(y[i], x) * v).sum(),
                            sequences=tensor.arange(y.shape[0]),
                            non_sequences=[y, self.mx, self.mv])
        scan_f = function([self.mx, self.mv], sy, on_unused_input='ignore')

        v1 = rop_f(vx, vv)
        v2 = scan_f(vx, vv)

        assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))

        self.check_nondiff_rop(theano.clone(y, replace={self.mx: break_op(self.mx)}))

        vv = numpy.asarray(self.rng.uniform(size=out_shape), theano.config.floatX)
        yv = tensor.Lop(y, self.mx, self.v)
        lop_f = function([self.mx, self.v], yv)

        sy = tensor.grad((self.v * y).sum(), self.mx)
        scan_f = function([self.mx, self.v], sy)

        v1 = lop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2)) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:50,代碼來源:test_rop.py

示例12: check_rop_lop

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def check_rop_lop(self, y, out_shape):
        """
        As check_mat_rop_lop, except the input is self.x which is a
        vector. The output is still a vector.

        """
        # TEST ROP
        vx = numpy.asarray(self.rng.uniform(size=self.in_shape),
                           theano.config.floatX)
        vv = numpy.asarray(self.rng.uniform(size=self.in_shape),
                           theano.config.floatX)

        yv = tensor.Rop(y, self.x, self.v)
        rop_f = function([self.x, self.v], yv, on_unused_input='ignore')
        J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x),
                           sequences=tensor.arange(y.shape[0]),
                           non_sequences=[y, self.x])
        sy = tensor.dot(J, self.v)

        scan_f = function([self.x, self.v], sy, on_unused_input='ignore')

        v1 = rop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
        known_fail = False
        try:
            self.check_nondiff_rop(theano.clone(y, replace={self.x: break_op(self.x)}))
        except AssertionError:
            known_fail = True

        # TEST LOP

        vx = numpy.asarray(self.rng.uniform(size=self.in_shape),
                           theano.config.floatX)
        vv = numpy.asarray(self.rng.uniform(size=out_shape),
                           theano.config.floatX)

        yv = tensor.Lop(y, self.x, self.v)
        lop_f = function([self.x, self.v], yv, on_unused_input='ignore')
        J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x),
                           sequences=tensor.arange(y.shape[0]),
                           non_sequences=[y, self.x])
        sy = tensor.dot(self.v, J)

        scan_f = function([self.x, self.v], sy)

        v1 = lop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))

        if known_fail:
            raise SkipTest('Rop does not handle non-differentiable inputs '
                           'correctly. Bug exposed by fixing Add.grad method.') 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:55,代碼來源:test_rop.py

示例13: get_grads

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def get_grads(self, state_below, target, mask = None, reg = None,
                  scale=None, sum_over_time=True, use_noise=True,
                 additional_inputs=None):
        """
        This function implements both the forward and backwards pass of this
        layer. The reason we do this in a single function is because for the
        factorized softmax layer is hard to rely on grad and get an
        optimized graph. For uniformity I've implemented this method for
        this layer as well (though one doesn't need to use it)

        :param state_below: theano variable representing the input to the
            softmax layer
        :param target: theano variable representing the target for this
            layer
        :return: cost, dC_dstate_below, param_grads, new_properties
            dC_dstate_below is a computational graph representing the
            gradient of the cost wrt to state_below
            param_grads is a list containing the gradients wrt to the
            different parameters of the layer
            new_properties is a dictionary containing additional properties
            of the model; properties are theano expression that are
            evaluated and reported by the model
        """
        cost = self.get_cost(state_below,
                             target,
                             mask = mask,
                             reg = reg,
                             scale=scale,
                             sum_over_time=sum_over_time,
                             use_noise=use_noise,
                             additional_inputs=additional_inputs)
        grads = TT.grad(cost, self.params)
        if self.additional_gradients:
            for new_grads, to_replace, properties in self.additional_gradients:
                gparams, params = new_grads
                prop_expr = [x[1] for x in properties]
                replace = [(x[0], TT.grad(cost, x[1])) for x in to_replace]
                rval = theano.clone(gparams + prop_expr,
                                    replace=replace)
                gparams = rval[:len(gparams)]
                prop_expr = rval[len(gparams):]
                self.properties += [(x[0], y) for x,y in zip(properties,
                                                             prop_expr)]
                for gp, p in zip(gparams, params):
                    grads[self.params.index(p)] += gp

        self.cost = cost
        self.grads = grads
        def Gvs_fn(*args):
            w = (1 - self.model_output) * self.model_output * state_below.shape[1]
            Gvs = TT.Lop(self.model_output, self.params,
                         TT.Rop(self.model_output, self.params, args)/w)
            return Gvs
        self.Gvs = Gvs_fn
        return cost, grads 
開發者ID:pascanur,項目名稱:GroundHog,代碼行數:57,代碼來源:cost_layers.py

示例14: check_mat_rop_lop

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def check_mat_rop_lop(self, y, out_shape):
        """ Test the Rop/Lop when input is a matrix and the output is a vector

        :param y: the output variable of the op applied to self.mx
        :param out_shape: Used to generate a random tensor
                          corresponding to the evaluation point of the Rop
                          (i.e. the tensor with which you multiply the
                          Jacobian). It should be a tuple of ints.

        If the Op has more than 1 input, one of them must be mx, while
        others must be shared variables / constants. We will test only
        against the input self.mx, so you must call
        check_mat_rop_lop/check_rop_lop for the other inputs.

        We expect all inputs/outputs have dtype floatX.

        If you want to test an Op with an output matrix, add a sum
        after the Op you want to test.
        """
        vx = numpy.asarray(self.rng.uniform(size=self.mat_in_shape),
                           theano.config.floatX)
        vv = numpy.asarray(self.rng.uniform(size=self.mat_in_shape),
                           theano.config.floatX)
        yv = tensor.Rop(y, self.mx, self.mv)
        rop_f = function([self.mx, self.mv], yv, on_unused_input='ignore')
        sy, _ = theano.scan(lambda i, y, x, v: \
                                (tensor.grad(y[i], x) * v).sum(),
                           sequences=tensor.arange(y.shape[0]),
                           non_sequences=[y, self.mx, self.mv])
        scan_f = function([self.mx, self.mv], sy, on_unused_input='ignore')

        v1 = rop_f(vx, vv)
        v2 = scan_f(vx, vv)

        assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))

        self.check_nondiff_rop(theano.clone(y,
                                    replace={self.mx: break_op(self.mx)}))

        vv = numpy.asarray(self.rng.uniform(size=out_shape),
                           theano.config.floatX)
        yv = tensor.Lop(y, self.mx, self.v)
        lop_f = function([self.mx, self.v], yv)

        sy = tensor.grad((self.v * y).sum(), self.mx)
        scan_f = function([self.mx, self.v], sy)

        v1 = lop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2)) 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:52,代碼來源:test_rop.py

示例15: check_rop_lop

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import Rop [as 別名]
def check_rop_lop(self, y, out_shape):
        """
        As check_mat_rop_lop, except the input is self.x which is a
        vector. The output is still a vector.

        """
        # TEST ROP
        vx = numpy.asarray(self.rng.uniform(size=self.in_shape),
                           theano.config.floatX)
        vv = numpy.asarray(self.rng.uniform(size=self.in_shape),
                           theano.config.floatX)

        yv = tensor.Rop(y, self.x, self.v)
        rop_f = function([self.x, self.v], yv, on_unused_input='ignore')
        J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x),
                           sequences=tensor.arange(y.shape[0]),
                           non_sequences=[y, self.x])
        sy = tensor.dot(J, self.v)

        scan_f = function([self.x, self.v], sy, on_unused_input='ignore')

        v1 = rop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
        known_fail = False
        try:
            self.check_nondiff_rop(theano.clone(y,
                                replace={self.x: break_op(self.x)}))
        except AssertionError:
            known_fail = True

        # TEST LOP

        vx = numpy.asarray(self.rng.uniform(size=self.in_shape),
                           theano.config.floatX)
        vv = numpy.asarray(self.rng.uniform(size=out_shape),
                           theano.config.floatX)

        yv = tensor.Lop(y, self.x, self.v)
        lop_f = function([self.x, self.v], yv, on_unused_input='ignore')
        J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x),
                           sequences=tensor.arange(y.shape[0]),
                           non_sequences=[y, self.x])
        sy = tensor.dot(self.v, J)

        scan_f = function([self.x, self.v], sy)

        v1 = lop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))

        if known_fail:
            raise SkipTest('Rop does not handle non-differentiable inputs '
                           'correctly. Bug exposed by fixing Add.grad method.') 
開發者ID:rizar,項目名稱:attention-lvcsr,代碼行數:56,代碼來源:test_rop.py


注:本文中的theano.tensor.Rop方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。