当前位置: 首页>>代码示例>>Python>>正文


Python gradient.DisconnectedType方法代码示例

本文整理汇总了Python中theano.gradient.DisconnectedType方法的典型用法代码示例。如果您正苦于以下问题:Python gradient.DisconnectedType方法的具体用法?Python gradient.DisconnectedType怎么用?Python gradient.DisconnectedType使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.gradient的用法示例。


在下文中一共展示了gradient.DisconnectedType方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inputs, g):

        # g[1:] is all integers, so their Jacobian in this op
        # is 0. We thus don't need to worry about what their values
        # are.

        # if g[0] is disconnected, then this op doesn't contribute
        # any gradient anywhere. but we know that at least one of
        # g[1:] is connected, or this grad method wouldn't have been
        # called, so we should report zeros
        (csm,) = inputs
        if isinstance(g[0].type, DisconnectedType):
            return [csm.zeros_like()]

        data, indices, indptr, shape = csm_properties(csm)
        return [CSM(csm.format)(g[0], indices, indptr, shape)]

# don't make this a function or it breaks some optimizations below 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:20,代码来源:basic.py

示例2: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inputs, grads):
        g_output, = grads
        x, y = inputs[:2]
        idx_list = inputs[2:]

        if x.dtype in theano.tensor.discrete_dtypes:
            # The output dtype is the same as x
            gx = x.zeros_like(dtype=theano.config.floatX)
            if y.dtype in theano.tensor.discrete_dtypes:
                gy = y.zeros_like(dtype=theano.config.floatX)
            else:
                gy = y.zeros_like()
        elif x.dtype in theano.tensor.complex_dtypes:
            raise NotImplementedError("No support for complex grad yet")
        else:
            if self.set_instead_of_inc:
                gx = set_subtensor(
                    Subtensor(idx_list=self.idx_list)(g_output, *idx_list),
                    theano.tensor.zeros_like(y))
            else:
                gx = g_output
            gy = Subtensor(idx_list=self.idx_list)(g_output, *idx_list)
            gy = _sum_grad_over_bcasted_dims(y, gy)

        return [gx, gy] + [DisconnectedType()()] * len(idx_list) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:27,代码来源:subtensor.py

示例3: test_disconnected_cost_grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def test_disconnected_cost_grad():

        # Tests that if we say the cost is disconnected via the
        # known_grads mechanism, it is treated as such by the rest of the
        # system.
        # This is so that Ops that are built around minigraphs like OpFromGraph
        # and scan can implement Op.grad by passing ograds to known_grads

        x = theano.tensor.iscalar()
        y = theano.tensor.iscalar()
        cost = x + y
        assert cost.dtype in theano.tensor.discrete_dtypes
        try:
            theano.tensor.grad(cost, [x, y], known_grads={cost: gradient.DisconnectedType()()}, disconnected_inputs='raise')
        except theano.gradient.DisconnectedInputError:
            return
        raise AssertionError("A disconnected gradient has been ignored.") 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:19,代码来源:test_gradient.py

示例4: test_disconnected_cost_grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def test_disconnected_cost_grad():

        # Tests that if we say the cost is disconnected via the
        # known_grads mechanism, it is treated as such by the rest of the
        # system.
        # This is so that Ops that are built around minigraphs like OpFromGraph
        # and scan can implement Op.grad by passing ograds to known_grads

        x = theano.tensor.iscalar()
        y = theano.tensor.iscalar()
        cost = x + y
        assert cost.dtype in theano.tensor.discrete_dtypes
        try:
            grads = theano.tensor.grad(cost, [x, y], known_grads={cost: gradient.DisconnectedType()() },
                    disconnected_inputs='raise')
        except theano.gradient.DisconnectedInputError:
            return
        raise AssertionError("A disconnected gradient has been ignored.") 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:20,代码来源:test_gradient.py

示例5: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inputs, g_outputs):
        """
        .. todo::

            WRITEME
        """
        hid_acts, filters, output_shape = inputs
        g_images, = g_outputs
        g_images = as_cuda_ndarray_variable(g_images)
        assert not isinstance(g_images, list)

        global FilterActs
        global WeightActs
        if FilterActs is None:
            from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
            from pylearn2.sandbox.cuda_convnet.weight_acts import WeightActs

        g_filters = WeightActs(stride=self.stride,
                partial_sum=self.partial_sum, pad=self.pad)(
                        g_images, hid_acts, filters.shape[1:3])[0]
        assert not isinstance(g_filters, list)
        g_hid_acts = FilterActs(stride=self.stride, pad=self.pad,
                partial_sum=self.partial_sum)(g_images, filters)

        return [g_hid_acts, g_filters, DisconnectedType()()] 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:27,代码来源:img_acts.py

示例6: R_op

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def R_op(self, inputs, eval_points):
        outs = self(*inputs, **dict(return_list=True))
        rval = [None for x in outs]
        # For each output
        for idx, out in enumerate(outs):
            # make such that _bgrads computes only the gradients of the
            # current output on the inputs ( and not all outputs)
            ograds = [x.zeros_like() for x in outs]
            ograds[idx] = theano.tensor.ones_like(out)

            bgrads = self._bgrad(inputs, ograds)
            rop_out = None

            for jdx, (inp, eval_point) in enumerate(izip(inputs,
                                                    eval_points)):
                # if None, then we can just ignore this branch ..
                # what we do is to assume that for any non-differentiable
                # branch, the gradient is actually 0, which I think is not
                # the right thing to do .. have to talk to Ian and James
                # about it

                if bgrads[jdx] is None or \
                        isinstance(bgrads[jdx].type, DisconnectedType):
                    pass
                elif eval_point is not None:
                    if rop_out is None:
                        rop_out = bgrads[jdx] * eval_point
                    else:
                        rop_out = rop_out + bgrads[jdx] * eval_point

            rval[idx] = rop_out

        return rval 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:35,代码来源:elemwise.py

示例7: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inp, grads):
        x, b, y_idx = inp
        g_nll, g_sm, g_am = grads

        dx_terms = []
        db_terms = []
        d_idx_terms = []

        if not isinstance(g_nll.type, DisconnectedType):
            nll, sm = crossentropy_softmax_1hot_with_bias(x, b, y_idx)
            dx = crossentropy_softmax_1hot_with_bias_dx(g_nll, sm, y_idx)
            db = tensor.sum(dx, axis=[0])
            dx_terms.append(dx)
            db_terms.append(db)

        if not isinstance(g_sm.type, DisconnectedType):
            dx, db = softmax_with_bias.grad((x, b), (g_sm, ))
            dx_terms.append(dx)
            db_terms.append(db)

        if not isinstance(g_am.type, DisconnectedType):
            dx_terms.append(x.zeros_like())
            db_terms.append(b.zeros_like())
            d_idx_terms.append(y_idx.zeros_like())

        def fancy_sum(terms):
            if len(terms) == 0:
                return DisconnectedType()()
            rval = terms[0]
            for term in terms[1:]:
                rval = rval + term
            return rval

        return [fancy_sum(terms) for terms in
                [dx_terms, db_terms, d_idx_terms]] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:37,代码来源:nnet.py

示例8: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inputs, g_outputs):
        z = tensor.zeros_like(inputs[0])
        gx = inc_diagonal_subtensor(z, inputs[1], inputs[2], g_outputs[0])
        return [gx, DisconnectedType()(), DisconnectedType()()] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:6,代码来源:conv3d2d.py

示例9: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inputs, output_gradients):
        C, d, WShape, B = inputs
        dLdA, = output_gradients

        z = T.zeros_like(C[0, 0, 0, 0, :])
        dLdC = theano.tensor.nnet.convTransp3D(dLdA, z, d, B, C.shape[1:4])
        # d actually does affect the outputs, so it's not disconnected
        dLdd = grad_undefined(self, 1, d)
        # The shape of the weights doesn't affect the output elements
        dLdWShape = DisconnectedType()()
        dLdB = theano.tensor.nnet.conv3D(C, dLdA, T.zeros_like(B[0, 0, 0, 0, :]), d)

        return [dLdC, dLdd, dLdWShape, dLdB] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:15,代码来源:ConvGrad3D.py

示例10: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inputs, output_gradients):
        W, b, d, H, RShape = inputs
        dCdR, = output_gradients
        dCdH = theano.tensor.nnet.conv3D(dCdR, W, T.zeros_like(H[0, 0, 0, 0, :]), d)
        WShape = W.shape
        dCdW = theano.tensor.nnet.convGrad3D(dCdR, d, WShape, H)
        dCdb = T.sum(dCdR, axis=(0, 1, 2, 3))
        # not differentiable, since d affects the output elements
        dCdd = grad_undefined(self, 2, d)
        # disconnected, since RShape just determines the output shape
        dCdRShape = DisconnectedType()()

        if 'name' in dir(dCdR) and dCdR.name is not None:
            dCdR_name = dCdR.name
        else:
            dCdR_name = 'anon_dCdR'

        if 'name' in dir(H) and H.name is not None:
            H_name = H.name
        else:
            H_name = 'anon_H'

        if 'name' in dir(W) and W.name is not None:
            W_name = W.name
        else:
            W_name = 'anon_W'

        if 'name' in dir(b) and b.name is not None:
            b_name = b.name
        else:
            b_name = 'anon_b'

        dCdW.name = ('ConvTransp3D_dCdW.H=' + H_name + ',dCdR=' + dCdR_name +
                     ',W=' + W_name)
        dCdb.name = ('ConvTransp3D_dCdb.H=' + H_name + ',dCdR=' + dCdR_name +
                     ',W=' + W_name + ',b=' + b_name)
        dCdH.name = 'ConvTransp3D_dCdH.H=' + H_name + ',dCdR=' + dCdR_name

        return [dCdW, dCdb, dCdd, dCdH, dCdRShape] 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:41,代码来源:ConvTransp3D.py

示例11: _zero_disconnected

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def _zero_disconnected(outputs, grads):
    l = []
    for o, g in zip(outputs, grads):
        if isinstance(g.type, DisconnectedType):
            l.append(o.zeros_like())
        else:
            l.append(g)
    return l 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:10,代码来源:nlinalg.py

示例12: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inputs, output_gradients):
        return ([DisconnectedType()()] + output_gradients) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:4,代码来源:breakpoint.py

示例13: grad

# 需要导入模块: from theano import gradient [as 别名]
# 或者: from theano.gradient import DisconnectedType [as 别名]
def grad(self, inputs, gout):

        (x, y) = inputs
        (gz,) = gout
        if y.type in continuous_types:
            # x is disconnected because the elements of x are not used
            return DisconnectedType()(), gz
        else:
            # when y is discrete, we assume the function can be extended
            # to deal with real-valued inputs by rounding them to the
            # nearest integer. f(x+eps) thus equals f(x) so the gradient
            # is zero, not disconnected or undefined
            return DisconnectedType()(), y.zeros_like() 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:15,代码来源:basic.py


注:本文中的theano.gradient.DisconnectedType方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。