本文整理汇总了Python中cntk.Axis.all_axes方法的典型用法代码示例。如果您正苦于以下问题:Python Axis.all_axes方法的具体用法?Python Axis.all_axes怎么用?Python Axis.all_axes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cntk.Axis
的用法示例。
在下文中一共展示了Axis.all_axes方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_op_reduce_mean_all_constant
# 需要导入模块: from cntk import Axis [as 别名]
# 或者: from cntk.Axis import all_axes [as 别名]
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
value = AA(input_data, dtype=dt)
from .. import reduce_mean
from cntk import Axis, Constant
a = Constant(value, name='a')
input_op = reduce_mean(a, axis=Axis.all_axes())
expected_forward = AA(np.mean(value))
actual_forward = input_op.eval()
assert np.allclose(actual_forward, expected_forward)
示例2: test_op_reduce_mean_all_constant
# 需要导入模块: from cntk import Axis [as 别名]
# 或者: from cntk.Axis import all_axes [as 别名]
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
# dt = PRECISION_TO_TYPE[precision]
# FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
# however there seems to be an issue with actual_forward below
# that gets computed correctly but by the time np.allclose executes
# it contains garbage values. The problem goes away if one uses
# actual_forward = np.copy(input_op.eval())
dt = np.float32
value = AA(input_data, dtype=dt)
from .. import reduce_mean
from cntk import Axis, Constant
a = Constant(value, name='a')
input_op = reduce_mean(a, axis=Axis.all_axes())
expected_forward = AA(np.mean(value))
actual_forward = input_op.eval()
assert np.allclose(actual_forward, expected_forward)
示例3: test_op_reduce_all
# 需要导入模块: from cntk import Axis [as 别名]
# 或者: from cntk.Axis import all_axes [as 别名]
def test_op_reduce_all(input_data, axis, device_id, precision):
# FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
# however there seems to be an issue with actual_forward below
# that gets computed correctly but by the time np.allclose executes
# it contains garbage values. The problem goes away if one uses
# actual_forward = np.copy(input_op.eval(binding))
dt = np.float32
data = AA(input_data, dtype=dt)
a = I(shape=data.shape,
dtype=sanitize_dtype_cntk(dt),
needs_gradient=True,
name='a')
# create batch
value = [AA([data,data-0.5], dtype=dt),AA([data+0.25], dtype=dt)]
from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
from cntk import Axis
def max_bwd(x,f):
y = np.zeros_like(x)
yr = y.ravel()
xr = x.ravel()
for i in range(x.size):
if xr[i] == f: yr[i] = 1
return y
ops = [ (reduce_sum, lambda x:AA(sum(np.sum(xi) for xi in x)), lambda x,f:[np.ones_like(xi) for xi in x]),
(reduce_max, lambda x:AA(max(np.max(xi) for xi in x)), lambda x,f:[max_bwd(xi,f) for xi in x]),
(reduce_min, lambda x:AA(min(np.min(xi) for xi in x)), lambda x,f:[max_bwd(xi,f) for xi in x]),
(reduce_mean, lambda x:AA(sum(np.sum(xi) for xi in x)/sum(xi.size for xi in x)), lambda x,f:[np.ones_like(xi)/sum(xj.size for xj in x) for xi in x]),
(reduce_log_sum_exp, lambda x:AA(np.log(sum(np.sum(np.exp(xi)) for xi in x))), lambda x,f:[np.exp(xi-f) for xi in x]),
(reduce_prod, lambda x:AA(np.prod([np.prod(xi) for xi in x])), lambda x,f:[f/xi for xi in x])
]
for op,fwd,bwd in ops:
input_op = op(a, axis=Axis.all_axes())
expected_forward = fwd(value)
expected_backward = bwd(value,expected_forward)
binding = {a: value}
actual_backward = input_op.grad(binding)[0]
actual_forward = np.copy(input_op.eval(binding))
assert np.allclose(actual_forward, expected_forward)
for ab,eb in zip (actual_backward, expected_backward):
assert np.allclose(ab, eb)
示例4: test_op_reduce_all
# 需要导入模块: from cntk import Axis [as 别名]
# 或者: from cntk.Axis import all_axes [as 别名]
def test_op_reduce_all(input_data, axis, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
data = AA(input_data, dtype=dt)
a = C.sequence.input_variable(shape=data.shape,
dtype=sanitize_dtype_cntk(dt),
needs_gradient=True,
name='a')
# create batch
value = [AA([data,data-0.5], dtype=dt),AA([data+0.25], dtype=dt)]
from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
from cntk import Axis
def max_bwd(x,f):
y = np.zeros_like(x)
yr = y.ravel()
xr = x.ravel()
for i in range(x.size):
if xr[i] == f: yr[i] = 1
return y
ops = [ (reduce_sum, lambda x:AA(sum(np.sum(xi) for xi in x)), lambda x,f:[np.ones_like(xi) for xi in x]),
(reduce_max, lambda x:AA(max(np.max(xi) for xi in x)), lambda x,f:[max_bwd(xi,f) for xi in x]),
(reduce_min, lambda x:AA(min(np.min(xi) for xi in x)), lambda x,f:[max_bwd(xi,f) for xi in x]),
(reduce_mean, lambda x:AA(sum(np.sum(xi) for xi in x)/sum(xi.size for xi in x)), lambda x,f:[np.ones_like(xi)/sum(xj.size for xj in x) for xi in x]),
(reduce_log_sum_exp, lambda x:AA(np.log(sum(np.sum(np.exp(xi)) for xi in x))), lambda x,f:[np.exp(xi-f) for xi in x]),
(reduce_prod, lambda x:AA(np.prod([np.prod(xi) for xi in x])), lambda x,f:[f/xi for xi in x])
]
for op,fwd,bwd in ops:
input_op = op(a, axis=Axis.all_axes())
expected_forward = fwd(value)
expected_backward = bwd(value,expected_forward)
binding = {a: value}
actual_backward = input_op.grad(binding)
actual_forward = input_op.eval(binding)
assert np.allclose(actual_forward, expected_forward)
for ab,eb in zip (actual_backward, expected_backward):
assert np.allclose(ab, eb)