本文整理汇总了Python中cntk.reduce_sum方法的典型用法代码示例。如果您正苦于以下问题:Python cntk.reduce_sum方法的具体用法?Python cntk.reduce_sum怎么用?Python cntk.reduce_sum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cntk
的用法示例。
在下文中一共展示了cntk.reduce_sum方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_reduce_sum
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import reduce_sum [as 别名]
def test_reduce_sum():
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
assert_cntk_ngraph_flat_equal(C.reduce_sum([1, 0], 0))
assert_cntk_ngraph_flat_equal(C.reduce_sum([[1., 1.], [3., 5.]], 0))
assert_cntk_ngraph_flat_equal(C.reduce_sum([[1., 1.], [3., 5.]], 1))
assert_cntk_ngraph_flat_equal(C.reduce_sum([[1., 1.], [3., 5.]], -1))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, 0))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, 1))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, 2))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, -1))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, (0, 1)))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, (0, 2)))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, (1, 2)))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, (-1, -2)))
示例2: sum
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import reduce_sum [as 别名]
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_sum')
return _remove_dims(output, axis, keepdims)
示例3: any
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import reduce_sum [as 别名]
def any(x, axis=None, keepdims=False):
reduce_result = sum(x, axis, keepdims=keepdims)
any_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(any_matrix)
else:
return any_matrix
示例4: all
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import reduce_sum [as 别名]
def all(x, axis=None, keepdims=False):
reduce_result = prod(x, axis, keepdims=keepdims)
all_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(all_matrix)
else:
return all_matrix
示例5: l2_normalize
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import reduce_sum [as 别名]
def l2_normalize(x, axis=None):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm
示例6: categorical_crossentropy
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import reduce_sum [as 别名]
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
# Here, unlike other backends, the tensors lack a batch dimension:
axis_without_batch = -1 if axis == -1 else axis - 1
output_dimensions = list(range(len(output.shape)))
if axis_without_batch != -1 and axis_without_batch not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis_without_batch),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(output.shape))))
# If the channels are not in the last axis, move them to be there:
if axis_without_batch != -1 and axis_without_batch != output_dimensions[-1]:
permutation = output_dimensions[:axis_without_batch]
permutation += output_dimensions[axis_without_batch + 1:]
permutation += [axis_without_batch]
output = C.transpose(output, permutation)
target = C.transpose(target, permutation)
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)
示例7: categorical_crossentropy
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import reduce_sum [as 别名]
def categorical_crossentropy(target, output, from_logits=False):
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)