当前位置: 首页>>代码示例>>Python>>正文


Python nd.square方法代码示例

本文整理汇总了Python中mxnet.nd.square方法的典型用法代码示例。如果您正苦于以下问题:Python nd.square方法的具体用法?Python nd.square怎么用?Python nd.square使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet.nd的用法示例。


在下文中一共展示了nd.square方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: CapLoss

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def CapLoss(y_pred, y_true):
    L = y_true * nd.square(nd.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * nd.square(nd.maximum(0., y_pred - 0.1))
    return nd.mean(nd.sum(L, 1)) 
开发者ID:Godricly,项目名称:comment_toxic_CapsuleNet,代码行数:6,代码来源:train_k_fold.py

示例2: squash

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def squash(x, axis):
    s_squared_norm = nd.sum(nd.square(x), axis, keepdims=True)
    # if s_squared_norm is really small, we will be in trouble
    # so I removed the s_quare terms
    # scale = s_squared_norm / ((1 + s_squared_norm) * nd.sqrt(s_squared_norm + 1e-9))
    # return x * scale
    scale = nd.sqrt(s_squared_norm + 1e-9)
    return x / scale 
开发者ID:Godricly,项目名称:comment_toxic_CapsuleNet,代码行数:10,代码来源:capsule_block.py

示例3: Route

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def Route(self, x):
        # print x.context
        b_mat = nd.zeros((x.shape[0],1,self.num_cap, self.num_locations), ctx=x.context)
        x_expand = nd.expand_dims(nd.expand_dims(x, axis=2),2)
        w_expand = nd.repeat(nd.expand_dims(self.w_ij.data(x.context),axis=0), repeats=x.shape[0], axis=0)
        u_ = w_expand*x_expand
        u = nd.sum(u_, axis = 1)
        # u_ = nd.square(w_expand - x_expand)
        # u = -nd.sum(u_, axis = 1)
        u_no_gradient = nd.stop_gradient(u)
        for i in range(self.route_num):
            # c_mat = nd.softmax(b_mat, axis=2)
            c_mat = nd.sigmoid(b_mat)
            if i == self.route_num -1:
                s = nd.sum(u * c_mat, axis=-1)
            else:
                s = nd.sum(u_no_gradient * c_mat, axis=-1)
            v = squash(s, 1)
            if i != self.route_num - 1:
                v1 = nd.expand_dims(v, axis=-1)
                update_term = nd.sum(u_no_gradient*v1, axis=1, keepdims=True)
                b_mat = b_mat + update_term
                # b_mat = update_term
            # else:
            #    v = s
        return v 
开发者ID:Godricly,项目名称:comment_toxic_CapsuleNet,代码行数:28,代码来源:capsule_block.py

示例4: forward

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def forward(self, x):
        x = nd.sqrt(nd.sum(nd.square(x), 1))
        return x 
开发者ID:Godricly,项目名称:comment_toxic_CapsuleNet,代码行数:5,代码来源:capsule_block.py

示例5: forward

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
            for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        num_pos = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pos_samples = (ct > 0)
            num_pos.append(pos_samples.sum())
        num_pos_all = sum([p.asscalar() for p in num_pos])
        if num_pos_all < 1:
            # no positive samples found, return dummy losses
            return nd.zeros((1,)), nd.zeros((1,)), nd.zeros((1,))

        # compute element-wise cross entropy loss and sort, then perform negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < (pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
            cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / num_pos_all)

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
                                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / num_pos_all)
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses 
开发者ID:zzdang,项目名称:cascade_rcnn_gluon,代码行数:41,代码来源:loss.py

示例6: loss

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def loss(y_pred,y_true):
    
    L = y_true * nd.square(nd.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * nd.square(nd.maximum(0., y_pred - 0.1))

    return nd.mean(nd.sum(L, 1)) 
开发者ID:sxhxliang,项目名称:CapsNet_Mxnet,代码行数:8,代码来源:CapsNet.py

示例7: squash

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def squash(self,vectors,axis):
        epsilon = 1e-9
        vectors_l2norm = nd.square(vectors).sum(axis=axis,keepdims=True)#.expand_dims(axis=axis)
    
        scale_factor = vectors_l2norm / (1 + vectors_l2norm) 
        vectors_squashed = scale_factor * (vectors / nd.sqrt(vectors_l2norm+epsilon)) # element-wise

        return vectors_squashed 
开发者ID:sxhxliang,项目名称:CapsNet_Mxnet,代码行数:10,代码来源:CapsLayers.py

示例8: forward

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def forward(self, x):
        #(batch_size, 1, 10, 16, 1) =>(batch_size,10, 16)=> (batch_size, 10, 1)
        x_shape = x.shape
        x = x.reshape(shape=(x_shape[0],x_shape[2],x_shape[3]))

        x_l2norm = nd.sqrt((x.square()).sum(axis=-1))
        # prob = nd.softmax(x_l2norm, axis=-1)
        return x_l2norm 
开发者ID:sxhxliang,项目名称:CapsNet_Mxnet,代码行数:10,代码来源:CapsLayers.py

示例9: forward

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import square [as 别名]
def forward(self, cls_pred, box_pred, cls_target, box_target):
        """Compute loss in entire batch across devices."""
        # require results across different devices at this time
        cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
            for x in (cls_pred, box_pred, cls_target, box_target)]
        # cross device reduction to obtain positive samples in entire batch
        num_pos = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pos_samples = (ct > 0)
            num_pos.append(pos_samples.sum())
        num_pos_all = sum([p.asscalar() for p in num_pos])
        if num_pos_all < 1 and self._min_hard_negatives < 1:
            # no positive samples and no hard negatives, return dummy losses
            cls_losses = [nd.sum(cp * 0) for cp in cls_pred]
            box_losses = [nd.sum(bp * 0) for bp in box_pred]
            sum_losses = [nd.sum(cp * 0) + nd.sum(bp * 0) for cp, bp in zip(cls_pred, box_pred)]
            return sum_losses, cls_losses, box_losses


        # compute element-wise cross entropy loss and sort, then perform negative mining
        cls_losses = []
        box_losses = []
        sum_losses = []
        for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
            pred = nd.log_softmax(cp, axis=-1)
            pos = ct > 0
            cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
            rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
            hard_negative = rank < nd.maximum(self._min_hard_negatives, pos.sum(axis=1)
                                              * self._negative_mining_ratio).expand_dims(-1)
            # mask out if not positive or negative
            cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
            cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / max(1., num_pos_all))

            bp = _reshape_like(nd, bp, bt)
            box_loss = nd.abs(bp - bt)
            box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
                                (0.5 / self._rho) * nd.square(box_loss))
            # box loss only apply to positive samples
            box_loss = box_loss * pos.expand_dims(axis=-1)
            box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / max(1., num_pos_all))
            sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])

        return sum_losses, cls_losses, box_losses 
开发者ID:Angzz,项目名称:panoptic-fpn-gluon,代码行数:46,代码来源:loss.py


注:本文中的mxnet.nd.square方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。