本文整理匯總了Python中cntk.reduce_sum方法的典型用法代碼示例。如果您正苦於以下問題:Python cntk.reduce_sum方法的具體用法?Python cntk.reduce_sum怎麽用?Python cntk.reduce_sum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cntk
的用法示例。
在下文中一共展示了cntk.reduce_sum方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_reduce_sum
# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import reduce_sum [as 別名]
def test_reduce_sum():
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
assert_cntk_ngraph_flat_equal(C.reduce_sum([1, 0], 0))
assert_cntk_ngraph_flat_equal(C.reduce_sum([[1., 1.], [3., 5.]], 0))
assert_cntk_ngraph_flat_equal(C.reduce_sum([[1., 1.], [3., 5.]], 1))
assert_cntk_ngraph_flat_equal(C.reduce_sum([[1., 1.], [3., 5.]], -1))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, 0))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, 1))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, 2))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, -1))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, (0, 1)))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, (0, 2)))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, (1, 2)))
assert_cntk_ngraph_flat_equal(C.reduce_sum(data, (-1, -2)))
示例2: sum
# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import reduce_sum [as 別名]
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_sum')
return _remove_dims(output, axis, keepdims)
示例3: any
# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import reduce_sum [as 別名]
def any(x, axis=None, keepdims=False):
reduce_result = sum(x, axis, keepdims=keepdims)
any_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(any_matrix)
else:
return any_matrix
示例4: all
# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import reduce_sum [as 別名]
def all(x, axis=None, keepdims=False):
reduce_result = prod(x, axis, keepdims=keepdims)
all_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(all_matrix)
else:
return all_matrix
示例5: l2_normalize
# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import reduce_sum [as 別名]
def l2_normalize(x, axis=None):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm
示例6: categorical_crossentropy
# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import reduce_sum [as 別名]
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
# Here, unlike other backends, the tensors lack a batch dimension:
axis_without_batch = -1 if axis == -1 else axis - 1
output_dimensions = list(range(len(output.shape)))
if axis_without_batch != -1 and axis_without_batch not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis_without_batch),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(output.shape))))
# If the channels are not in the last axis, move them to be there:
if axis_without_batch != -1 and axis_without_batch != output_dimensions[-1]:
permutation = output_dimensions[:axis_without_batch]
permutation += output_dimensions[axis_without_batch + 1:]
permutation += [axis_without_batch]
output = C.transpose(output, permutation)
target = C.transpose(target, permutation)
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)
示例7: categorical_crossentropy
# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import reduce_sum [as 別名]
def categorical_crossentropy(target, output, from_logits=False):
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)