本文整理汇总了Python中theano.gof.Op方法的典型用法代码示例。如果您正苦于以下问题:Python gof.Op方法的具体用法?Python gof.Op怎么用?Python gof.Op使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.gof
的用法示例。
在下文中一共展示了gof.Op方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: c_code_cache_version_apply
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def c_code_cache_version_apply(self, node):
version = [12] # the version corresponding to the c code in this Op
# now we insert versions for the ops on which we depend...
scalar_node = Apply(
self.scalar_op,
[get_scalar_type(dtype=input.type.dtype).make_variable()
for input in node.inputs],
[get_scalar_type(dtype=output.type.dtype).make_variable()
for output in node.outputs])
version.append(self.scalar_op.c_code_cache_version_apply(scalar_node))
for i in node.inputs + node.outputs:
version.append(get_scalar_type(dtype=i.type.dtype).c_code_cache_version())
version.append(('openmp', self.openmp))
if all(version):
return tuple(version)
else:
return ()
示例2: register_shape_c_code
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def register_shape_c_code(type, code, version=()):
"""
Tell Shape Op how to generate C code for a Theano Type.
Parameters
----------
typ : Theano type
It must be the Theano class itself and not an instance of the class.
code : C code
Returns a vector representing the shape for the Theano type 'typ'.
Use %(iname)s and %(oname)s for the input and output C variable names
respectively.
version
A number indicating the version of the code, for cache.
"""
Shape.c_code_and_version[type] = (code, version)
示例3: test_aliased_outputs_ok
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_aliased_outputs_ok(self):
# here aliased outputs is ok because they are both aliased to an input
# as well
class CustomOp(gof.Op):
view_map = {0: [0], 1: [0]}
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
c[0] = a
d[0] = a[1:]
x = theano.tensor.dvector('x')
y = theano.tensor.dvector('y')
f = theano.function([x, y], CustomOp()(x, y), mode='DEBUG_MODE')
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [1, 2, 3, 4])
assert numpy.all(r1 == [2, 3, 4])
示例4: test_aliased_outputs_ok_output
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_aliased_outputs_ok_output(self):
# here aliased outputs is ok because they are both outputs of the
# function as a whole and thus not destroy-able
class CustomOp(gof.Op):
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
r = a * 2
c[0] = r
d[0] = r[1:]
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], CustomOp()(x, y), mode='DEBUG_MODE')
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [2, 4, 6, 8])
assert numpy.all(r1 == [4, 6, 8])
示例5: test_aliased_outputs_ok_shadow
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_aliased_outputs_ok_shadow(self):
# here the alias between outputs is ok because one of them is not used
# for subsequent computation. This is like the case where we use one
# output as a memory buffer to serve another output.
class CustomOp(gof.Op):
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
r = a * 1
c[0] = r
d[0] = r[1:]
x = theano.tensor.dvector('x')
y = theano.tensor.dvector('y')
f = theano.function([x, y], CustomOp()(x, y)[0] * 2, mode='DEBUG_MODE')
r0 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [2, 4, 6, 8])
示例6: test_retNone1
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_retNone1(self):
"""Test that it is not ok to return None from op.grad()"""
class retNone(gof.op.Op):
__props__ = ()
def make_node(self):
inputs = [theano.tensor.vector()]
outputs = [theano.tensor.vector()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
x, = inp
gz, = grads
pass
a = retNone().make_node()
self.assertRaises(TypeError, grad_sources_inputs, [(a.out, one)], None)
示例7: test_wrong_rval_len1
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_wrong_rval_len1(self):
"""Test that it is not ok to return the wrong number of gradient terms
"""
class retOne(gof.op.Op):
__props__ = ()
def make_node(self, *inputs):
outputs = [theano.tensor.vector()]
return gof.Apply(self, inputs, outputs)
def grad(self, inputs, grads):
return [inputs[0].zeros_like()]
i = theano.tensor.vector()
j = theano.tensor.vector()
a1 = retOne().make_node(i)
grad_sources_inputs([(a1.out, one)], None)
a2 = retOne().make_node(i, j)
self.assertRaises(ValueError, grad_sources_inputs, [(a2.out, one)], None)
示例8: test_1in_1out
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_1in_1out(self):
"""Test grad is called correctly for a 1-to-1 op"""
gval = theano.tensor.matrix()
class O(gof.op.Op):
__props__ = ()
def make_node(self):
inputs = [theano.tensor.matrix()]
outputs = [theano.tensor.matrix()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
return gval,
a1 = O().make_node()
g = grad_sources_inputs([(a1.outputs[0], one)], None)
self.assertTrue(g[a1.inputs[0]] is gval)
示例9: test_1in_Nout
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_1in_Nout(self):
"""Test grad is called correctly for a 1-to-many op"""
gval = theano.tensor.matrix()
class O(gof.op.Op):
__props__ = ()
def make_node(self):
inputs = [theano.tensor.matrix()]
outputs = [theano.tensor.scalar(), theano.tensor.scalar()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
x, = inp
gz1, gz2 = grads
return gval,
a1 = O().make_node()
g = grad_sources_inputs([(a1.outputs[0], one)], None)
self.assertTrue(g[a1.inputs[0]] is gval)
示例10: test_Nin_Nout
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_Nin_Nout(self):
"""Test grad is called correctly for a many-to-many op"""
gval0 = theano.tensor.matrix()
gval1 = theano.tensor.matrix()
class O(gof.op.Op):
__props__ = ()
def make_node(self):
inputs = [theano.tensor.matrix(), theano.tensor.matrix()]
outputs = [theano.tensor.matrix(), theano.tensor.matrix()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
return gval0, gval1
a1 = O().make_node()
g = grad_sources_inputs([(a1.outputs[0], one)], None)
self.assertTrue(g[a1.inputs[0]] is gval0)
self.assertTrue(g[a1.inputs[1]] is gval1)
示例11: test_unimplemented_grad_grad
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_unimplemented_grad_grad(self):
# tests that unimplemented grads are caught in the grad method
class DummyOp(gof.Op):
__props__ = ()
def make_node(self, x):
return gof.Apply(self, [x], [x.type()])
def grad(self, inputs, output_grads):
return [theano.gradient.grad_not_implemented(self, 0, inputs[0])]
a = theano.tensor.scalar()
b = DummyOp()(a)
self.assertRaises(TypeError, theano.gradient.grad, b, a)
示例12: test_dxdx
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_dxdx():
# Tests that the gradient of a scalar with respect to itself is 1
# I use an integer in this case because people keep changing this
# gradient to be 0 on integers but according to our interpretation
# of the gradient as defined in the Op contract, it should be 1.
# If you feel the need to change this unit test you are probably
# modifying the Op contract and should definitely get the approval
# of multiple people on theano-dev.
x = theano.tensor.iscalar()
g = theano.tensor.grad(x, x)
g = g.eval({x: 12})
assert np.allclose(g, 1.)
示例13: test_undefined_cost_grad
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_undefined_cost_grad():
# Tests that if we say the cost is not differentiable via the
# known_grads mechanism, it is treated as such by the rest of the
# system.
# This is so that Ops that are built around minigraphs like OpFromGraph
# and scan can implement Op.grad by passing ograds to known_grads
x = theano.tensor.iscalar()
y = theano.tensor.iscalar()
cost = x + y
assert cost.dtype in theano.tensor.discrete_dtypes
try:
theano.tensor.grad(cost, [x, y], known_grads={cost: NullType()()})
except theano.gradient.NullTypeGradError:
return
raise AssertionError("An undefined gradient has been ignored.")
示例14: test_wrong_rval_len1
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_wrong_rval_len1(self):
"""Test that it is not ok to return the wrong number of gradient terms
"""
class retOne(gof.op.Op):
__props__ = ()
def make_node(self, *inputs):
outputs = [theano.tensor.vector()]
return gof.Apply(self, inputs, outputs)
def grad(self, inputs, grads):
return [inputs[0].zeros_like()]
i = theano.tensor.vector()
j = theano.tensor.vector()
a1 = retOne().make_node(i)
grad_sources_inputs([(a1.out, one)], None)
a2 = retOne().make_node(i, j)
self.assertRaises(ValueError, grad_sources_inputs,
[(a2.out, one)], None)
示例15: test_1in_1out
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import Op [as 别名]
def test_1in_1out(self):
"""Test grad is called correctly for a 1-to-1 op"""
gval = theano.tensor.matrix()
class O(gof.op.Op):
__props__ = ()
def make_node(self):
inputs = [theano.tensor.matrix()]
outputs = [theano.tensor.matrix()]
return gof.Apply(self, inputs, outputs)
def grad(self, inp, grads):
return gval,
a1 = O().make_node()
g = grad_sources_inputs([(a1.outputs[0], one)], None)
self.assertTrue(g[a1.inputs[0]] is gval)