本文整理汇总了Python中mxnet.nd.sqrt方法的典型用法代码示例。如果您正苦于以下问题:Python nd.sqrt方法的具体用法?Python nd.sqrt怎么用?Python nd.sqrt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.nd
的用法示例。
在下文中一共展示了nd.sqrt方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: hybrid_forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def hybrid_forward(self, F, X, y=None):
# import pdb; pdb.set_trace()
X = self.net[0](X) # Conv1
X = self.net[1](X) # Primary Capsule
X = self.net[2](X) # Digital Capsule
# import pdb ; pdb.set_trace()
X = X.reshape((X.shape[0],X.shape[2], X.shape[4]))
# get length of vector for margin loss calculation
X_l2norm = nd.sqrt((X**2).sum(axis=-1))
# import pdb ; pdb.set_trace()
prob = nd.softmax(X_l2norm, axis=-1)
if y is not None:
max_len_indices = y
else:
max_len_indices = nd.argmax(prob,axis=-1)
y_tile = nd.tile(y.expand_dims(axis=1), reps=(1, X.shape[-1]))
batch_activated_capsules = nd.pick(X, y_tile, axis=1, keepdims=True)
reconstrcutions = self.net[3](batch_activated_capsules)
return prob, X_l2norm, reconstrcutions
示例2: global_norm
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def global_norm(arrays: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]]) -> NDArray:
"""
Calculate global norm on list or tuple of NDArrays using this formula:
`global_norm = sqrt(sum([l2norm(p)**2 for p in parameters]))`
:param arrays: list or tuple of parameters to calculate global norm on
:return: single-value NDArray
"""
def _norm(array):
if array.stype == 'default':
x = array.reshape((-1,))
return nd.dot(x, x)
return array.norm().square()
total_norm = nd.add_n(*[_norm(arr) for arr in arrays])
total_norm = nd.sqrt(total_norm)
return total_norm
示例3: get_distance_matrix
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def get_distance_matrix(x):
"""Get distance matrix given a matrix. Used in testing."""
square = nd.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
return nd.sqrt(distance_square)
示例4: grad_clipping
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def grad_clipping(params, clipping_norm, ctx):
"""Gradient clipping."""
if clipping_norm is not None:
norm = nd.array([0.0], ctx)
for p in params:
norm += nd.sum(p.grad ** 2)
norm = nd.sqrt(norm).asscalar()
if norm > clipping_norm:
for p in params:
p.grad[:] *= clipping_norm / norm
示例5: _merge_bn
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def _merge_bn(net, conv_name="conv", bn_name="batchnorm", exclude=[]):
conv_lst = []
def _collect_conv(m):
if isinstance(m, nn.Conv2D):
assert not hasattr(m, "gamma"), "Don't merge bn to a conv with fake bn! ({})".format(m.name)
conv_lst.append(m)
net.apply(_collect_conv)
bn_names = [c.name.replace(conv_name, bn_name) for c in conv_lst]
for conv, bn in zip(conv_lst, bn_names):
params = net.collect_params(bn + "_")
if len(params.keys()) != 0 and conv not in exclude:
print("Merge {} to {}".format(bn, conv.name))
gamma = params[bn + "_gamma"].data()
beta = params[bn + "_beta"].data()
mean = params[bn + "_running_mean"].data()
var = params[bn + "_running_var"].data()
weight = conv.weight.data()
w_shape = conv.weight.shape
cout = w_shape[0]
conv.weight.set_data( (weight.reshape(cout, -1) * gamma.reshape(-1, 1) \
/ nd.sqrt(var + 1e-10).reshape(-1, 1)).reshape(w_shape) )
if conv.bias is None:
conv._kwargs['no_bias'] = False
conv.bias = conv.params.get('bias',
shape=(cout,), init="zeros",
allow_deferred_init=True)
conv.bias.initialize()
bias = conv.bias.data()
conv.bias.set_data(gamma * (bias - mean) / nd.sqrt(var + 1e-10) + beta)
示例6: squash
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def squash(x, axis):
s_squared_norm = nd.sum(nd.square(x), axis, keepdims=True)
# if s_squared_norm is really small, we will be in trouble
# so I removed the s_quare terms
# scale = s_squared_norm / ((1 + s_squared_norm) * nd.sqrt(s_squared_norm + 1e-9))
# return x * scale
scale = nd.sqrt(s_squared_norm + 1e-9)
return x / scale
示例7: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def forward(self, x):
x = nd.sqrt(nd.sum(nd.square(x), 1))
return x
示例8: get_value
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def get_value(self):
return { self.name: nd.sqrt(self.loss / (self.cnt + 1e-8)) }
示例9: squash
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def squash(self,vectors,axis):
epsilon = 1e-9
vectors_l2norm = nd.square(vectors).sum(axis=axis,keepdims=True)#.expand_dims(axis=axis)
scale_factor = vectors_l2norm / (1 + vectors_l2norm)
vectors_squashed = scale_factor * (vectors / nd.sqrt(vectors_l2norm+epsilon)) # element-wise
return vectors_squashed
示例10: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def forward(self, x):
#(batch_size, 1, 10, 16, 1) =>(batch_size,10, 16)=> (batch_size, 10, 1)
x_shape = x.shape
x = x.reshape(shape=(x_shape[0],x_shape[2],x_shape[3]))
x_l2norm = nd.sqrt((x.square()).sum(axis=-1))
# prob = nd.softmax(x_l2norm, axis=-1)
return x_l2norm
示例11: test_periodic_kernel
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import sqrt [as 别名]
def test_periodic_kernel(x1, x2, amplitude, length_scale, exact) -> None:
tol = 1e-5
batch_size = amplitude.shape[0]
history_length_1 = x1.shape[0]
history_length_2 = x2.shape[0]
num_features = x1.shape[1]
if batch_size > 1:
x1 = nd.tile(x1, reps=(batch_size, 1, 1))
x2 = nd.tile(x2, reps=(batch_size, 1, 1))
for i in range(1, batch_size):
x1[i, :, :] = (i + 1) * x1[i, :, :]
x2[i, :, :] = (i - 3) * x2[i, :, :]
else:
x1 = x1.reshape(batch_size, history_length_1, num_features)
x2 = x2.reshape(batch_size, history_length_2, num_features)
amplitude = amplitude.reshape(batch_size, 1, 1)
length_scale = length_scale.reshape(batch_size, 1, 1)
frequency = 1 / 24 * nd.ones_like(length_scale)
periodic = PeriodicKernel(amplitude, length_scale, frequency)
exact = amplitude * nd.exp(
-2
* nd.sin(frequency * math.pi * nd.sqrt(exact)) ** 2
/ length_scale ** 2
)
res = periodic.kernel_matrix(x1, x2)
assert nd.norm(exact - res) < tol
# This test is based off of a simple single batch with different history lengths
# from gpytorch, where the exact is computed inside the test rather than hard-coded