本文整理汇总了Python中theano.gof.local_optimizer方法的典型用法代码示例。如果您正苦于以下问题:Python gof.local_optimizer方法的具体用法?Python gof.local_optimizer怎么用?Python gof.local_optimizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.gof
的用法示例。
在下文中一共展示了gof.local_optimizer方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_badoptimization
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import local_optimizer [as 别名]
def test_badoptimization():
@gof.local_optimizer([theano.tensor.add])
def insert_broken_add(node):
if node.op == theano.tensor.add:
return [off_by_half(*node.inputs)]
return False
edb = gof.EquilibriumDB()
edb.register('insert_broken_add', insert_broken_add, 'all')
opt = edb.query('+all')
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f = theano.function([a, b], a + b,
mode=debugmode.DebugMode(optimizer=opt))
try:
f([1.0, 2.0, 3.0], [2, 3, 4],)
except debugmode.BadOptimization as e:
assert str(e.reason) == 'insert_broken_add'
return # TEST PASS
assert False
示例2: test_badoptimization
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import local_optimizer [as 别名]
def test_badoptimization():
@gof.local_optimizer([theano.tensor.add])
def insert_broken_add(node):
if node.op == theano.tensor.add:
return [off_by_half(*node.inputs)]
return False
edb = gof.EquilibriumDB()
edb.register('insert_broken_add', insert_broken_add, 'all')
opt = edb.query('+all')
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f = theano.function([a, b], a + b,
mode=debugmode.DebugMode(optimizer=opt))
try:
f([1.0, 2.0, 3.0], [2, 3, 4],)
except debugmode.BadOptimization as e:
assert str(e.reason) == 'insert_broken_add'
return # TEST PASS
assert False
示例3: test_badoptimization_opt_err
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import local_optimizer [as 别名]
def test_badoptimization_opt_err():
"""This variant of test_badoptimization() replace the working code
with a new apply node that will raise an error.
"""
@gof.local_optimizer([theano.tensor.add])
def insert_bigger_b_add(node):
if node.op == theano.tensor.add:
inputs = list(node.inputs)
if inputs[-1].owner is None:
inputs[-1] = theano.tensor.concatenate((inputs[-1],
inputs[-1]))
return [node.op(*inputs)]
return False
edb = gof.EquilibriumDB()
edb.register('insert_bigger_b_add', insert_bigger_b_add, 'all')
opt = edb.query('+all')
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f = theano.function([a, b], a + b,
mode=debugmode.DebugMode(optimizer=opt))
try:
f([1.0, 2.0, 3.0], [2, 3, 4],)
except Exception as e:
assert 'insert_bigger_b_add' in exc_message(e)
return # TEST PASS
assert False
示例4: test_stochasticoptimization
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import local_optimizer [as 别名]
def test_stochasticoptimization():
# this optimization alternates between triggering and not triggering.
last_time_replaced = [False]
@gof.local_optimizer([theano.tensor.add])
def insert_broken_add_sometimes(node):
if node.op == theano.tensor.add:
last_time_replaced[0] = not last_time_replaced[0]
if last_time_replaced[0]:
return [off_by_half(*node.inputs)]
return False
edb = gof.EquilibriumDB()
edb.register(
'insert_broken_add_sometimes',
insert_broken_add_sometimes,
'all')
opt = edb.query('+all')
a = theano.tensor.dvector()
b = theano.tensor.dvector()
try:
theano.function([a, b],
theano.tensor.add(a, b),
mode=debugmode.DebugMode(
optimizer=opt,
check_c_code=True,
stability_patience=max(2, config.DebugMode.patience)))
except debugmode.StochasticOrder:
return # TEST PASS
assert False
示例5: alpha_merge
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import local_optimizer [as 别名]
def alpha_merge(cls, alpha_in, beta_in):
def wrapper(maker):
@local_optimizer([GpuElemwise])
@wraps(maker)
def opt(node):
if (isinstance(node.op, GpuElemwise) and
node.op.scalar_op == scal.mul and
node.nin == 2):
targ = find_node(node.inputs[0], cls)
if targ is None:
targ = find_node(node.inputs[1], cls)
if targ is None:
return
lr = grab_cpu_scalar(node.inputs[0],
nd=targ.outputs[0].ndim)
else:
lr = grab_cpu_scalar(node.inputs[1],
nd=targ.outputs[0].ndim)
if lr is None or targ is None:
return None
inputs = list(targ.inputs)
try:
c = get_scalar_constant_value(lr)
if c == 0:
inputs[alpha_in] = lr
inputs[beta_in] = lr
elif c == 1:
inputs[alpha_in] = targ.inputs[alpha_in]
inputs[beta_in] = targ.inputs[beta_in]
else:
inputs[alpha_in] = lr * targ.inputs[alpha_in]
inputs[beta_in] = lr * targ.inputs[beta_in]
except NotScalarConstantError:
inputs[alpha_in] = lr * targ.inputs[alpha_in]
inputs[beta_in] = lr * targ.inputs[beta_in]
return maker(targ, *inputs)
return opt
return wrapper
示例6: output_merge
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import local_optimizer [as 别名]
def output_merge(cls, alpha_in, beta_in, out_in):
def wrapper(maker):
@local_optimizer([GpuElemwise])
@wraps(maker)
def opt(node):
if (isinstance(node.op, GpuElemwise) and
node.op.scalar_op == scal.add and
node.nin == 2):
targ = find_node(node.inputs[0], cls)
W = node.inputs[1]
if targ is None:
targ = find_node(node.inputs[1], cls)
W = node.inputs[0]
if targ is None:
return None
if not is_equal(targ.inputs[beta_in], 0.0):
# other cases are too complex for now
return None
if W.broadcastable != targ.inputs[out_in].broadcastable:
# May change later to do the broadcast, but it's
# under discussion.
return None
inputs = list(targ.inputs)
inputs[out_in] = W
inputs[beta_in] = _one.clone()
return maker(targ, *inputs)
return opt
return wrapper
示例7: test_stochasticoptimization
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import local_optimizer [as 别名]
def test_stochasticoptimization():
# this optimization alternates between triggering and not triggering.
last_time_replaced = [False]
@gof.local_optimizer([theano.tensor.add])
def insert_broken_add_sometimes(node):
if node.op == theano.tensor.add:
last_time_replaced[0] = not last_time_replaced[0]
if last_time_replaced[0]:
return [off_by_half(*node.inputs)]
return False
edb = gof.EquilibriumDB()
edb.register(
'insert_broken_add_sometimes',
insert_broken_add_sometimes,
'all')
opt = edb.query('+all')
a = theano.tensor.dvector()
b = theano.tensor.dvector()
try:
theano.function([a, b],
theano.tensor.add(a, b),
mode=debugmode.DebugMode(
optimizer=opt,
check_c_code=True,
stability_patience=max(2, config.DebugMode.patience)))
except debugmode.StochasticOrder:
return # TEST PASS
assert False
示例8: op_lifter
# 需要导入模块: from theano import gof [as 别名]
# 或者: from theano.gof import local_optimizer [as 别名]
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if i.owner and i.owner.op == host_from_gpu:
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != 'cuda')):
return False
# tag the inputs with the context in case
# the context was derived from the outputs
for i in node.inputs:
i.tag.context_name = context_name
new_op = maker(node, context_name)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
return [safe_to_cpu(o) for o in
new_op(*node.inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [safe_to_cpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [host_from_gpu(new_op)]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f