本文整理汇总了Python中cntk.sqrt方法的典型用法代码示例。如果您正苦于以下问题:Python cntk.sqrt方法的具体用法?Python cntk.sqrt怎么用?Python cntk.sqrt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cntk
的用法示例。
在下文中一共展示了cntk.sqrt方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: batch_normalization
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import sqrt [as 别名]
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
# The mean / var / beta / gamma may be processed by broadcast
# so it may have an extra batch axis with 1, it is not needed
# in cntk, need to remove those dummy axis.
if ndim(mean) == ndim(x) and shape(mean)[0] == 1:
mean = _reshape_dummy_dim(mean, [0])
if ndim(var) == ndim(x) and shape(var)[0] == 1:
var = _reshape_dummy_dim(var, [0])
if gamma is None:
gamma = ones_like(var)
elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:
gamma = _reshape_dummy_dim(gamma, [0])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:
beta = _reshape_dummy_dim(beta, [0])
return (x - mean) / C.sqrt(var + epsilon) * gamma + beta
示例2: _layer_BatchNorm
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import sqrt [as 别名]
def _layer_BatchNorm(self):
self.add_body(0, """
def batch_normalization(input, name, epsilon, **kwargs):
mean = cntk.Parameter(init = __weights_dict[name]['mean'],
name = name + "_mean")
var = cntk.Parameter(init = __weights_dict[name]['var'],
name = name + "_var")
layer = (input - mean) / cntk.sqrt(var + epsilon)
if 'scale' in __weights_dict[name]:
scale = cntk.Parameter(init = __weights_dict[name]['scale'],
name = name + "_scale")
layer = scale * layer
if 'bias' in __weights_dict[name]:
bias = cntk.Parameter(init = __weights_dict[name]['bias'],
name = name + "_bias")
layer = layer + bias
return layer
""")
示例3: batch_normalization
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import sqrt [as 别名]
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
# The mean / var / beta / gamma may be processed by broadcast
# so it may have an extra batch axis with 1, it is not needed
# in cntk, need to remove those dummy axis.
if ndim(mean) == ndim(x) and shape(mean)[0] == 1:
mean = _reshape_dummy_dim(mean, [0])
if ndim(var) == ndim(x) and shape(var)[0] == 1:
var = _reshape_dummy_dim(var, [0])
if gamma is None:
gamma = ones_like(var)
elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:
gamma = _reshape_dummy_dim(gamma, [0])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:
beta = _reshape_dummy_dim(beta, [0])
return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta
示例4: batch_normalization
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import sqrt [as 别名]
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
# The mean / var / beta / gamma may be processed by broadcast
# so it may have an extra batch axis with 1, it is not needed
# in cntk, need to remove those dummy axis.
if ndim(mean) == ndim(x) and shape(mean)[0] == 1:
mean = _reshape_dummy_dim(mean, [0])
if ndim(var) == ndim(x) and shape(var)[0] == 1:
var = _reshape_dummy_dim(var, [0])
if gamma is None:
gamma = ones_like(var)
elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:
gamma = _reshape_dummy_dim(gamma, [0])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:
beta = _reshape_dummy_dim(beta, [0])
return gamma * ((x - mean) / C.sqrt(var + epsilon)) + beta
示例5: test_sqrt
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import sqrt [as 别名]
def test_sqrt():
assert_cntk_ngraph_isclose(C.sqrt([0., 4.]))
assert_cntk_ngraph_isclose(C.sqrt([[1, 2], [3, 4]]))
assert_cntk_ngraph_isclose(C.sqrt([[[1, 2], [3, 4]], [[1, 2], [3, 4]]]))
示例6: std
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import sqrt [as 别名]
def std(x, axis=None, keepdims=False):
return C.sqrt(var(x, axis=axis, keepdims=keepdims))
示例7: sqrt
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import sqrt [as 别名]
def sqrt(x):
return C.sqrt(x)
示例8: l2_normalize
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import sqrt [as 别名]
def l2_normalize(x, axis=None):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm