当前位置: 首页>>代码示例>>Python>>正文


Python nd.softmax方法代码示例

本文整理汇总了Python中mxnet.nd.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python nd.softmax方法的具体用法?Python nd.softmax怎么用?Python nd.softmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mxnet.nd的用法示例。


在下文中一共展示了nd.softmax方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _convert_score

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def _convert_score(self, score):
        """from cls to score

        Parameters
        ----------
            score : ndarray
                network output

        Returns
        -------
            get feature map score though softmax
        """
        score = nd.transpose(score, axes=(1, 2, 3, 0))
        score = nd.reshape(score, shape=(2, -1))
        score = nd.transpose(score, axes=(1, 0))
        score = nd.softmax(score, axis=1)
        score = nd.slice_axis(score, axis=1, begin=1, end=2)
        score = nd.squeeze(score, axis=1)
        return score.asnumpy() 
开发者ID:dmlc,项目名称:gluon-cv,代码行数:21,代码来源:siamrpn_tracker.py

示例2: Route

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def Route(self, x):
        # b_mat = nd.repeat(self.b_mat.data(), repeats=x.shape[0], axis=0)#nd.stop_gradient(nd.repeat(self.b_mat.data(), repeats=x.shape[0], axis=0))
        b_mat = nd.zeros((x.shape[0],1,self.num_cap, self.num_locations), ctx=x.context)
        x_expand = nd.expand_dims(nd.expand_dims(x, axis=2),2)
        w_expand = nd.repeat(nd.expand_dims(self.w_ij.data(x.context),axis=0), repeats=x.shape[0], axis=0)
        u_ = w_expand*x_expand
        # u_ = nd.abs(w_expand - x_expand)
        u = nd.sum(u_, axis = 1)
        u_no_gradient = nd.stop_gradient(u)
        for i in range(self.route_num):
            c_mat = nd.softmax(b_mat, axis=2)
            if i == self.route_num -1:
                s = nd.sum(u * c_mat, axis=-1)
            else:
                s = nd.sum(u_no_gradient * c_mat, axis=-1)
            v = squash(s, 1)
            v1 = nd.expand_dims(v, axis=-1)
            if i != self.route_num - 1:
                update_term = nd.sum(u_no_gradient*v1, axis=1, keepdims=True)
                b_mat = b_mat + update_term
        return v 
开发者ID:Godricly,项目名称:comment_toxic_CapsuleNet,代码行数:23,代码来源:capsule_block.py

示例3: hybrid_forward

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def hybrid_forward(self, F, X, y=None):
        # import pdb; pdb.set_trace()
        X = self.net[0](X) # Conv1
        X = self.net[1](X) # Primary Capsule
        X = self.net[2](X) # Digital Capsule
        # import pdb ; pdb.set_trace()
        X = X.reshape((X.shape[0],X.shape[2], X.shape[4]))
        # get length of vector for margin loss calculation
        X_l2norm = nd.sqrt((X**2).sum(axis=-1))
        # import pdb ; pdb.set_trace()
        prob = nd.softmax(X_l2norm, axis=-1)

        if y is not None:
            max_len_indices = y
        else:
            
            max_len_indices = nd.argmax(prob,axis=-1)


        y_tile = nd.tile(y.expand_dims(axis=1), reps=(1, X.shape[-1]))
        batch_activated_capsules = nd.pick(X, y_tile, axis=1, keepdims=True)

        reconstrcutions = self.net[3](batch_activated_capsules)

        return  prob, X_l2norm, reconstrcutions 
开发者ID:tonysy,项目名称:CapsuleNet-Gluon,代码行数:27,代码来源:CapsuleNet.py

示例4: batch_attention

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def batch_attention(encoder,decoder):
    attention = nd.softmax(nd.batch_dot(encoder,nd.transpose(decoder,axes = (0,2,1))),axis=1)
    new_decoder = nd.batch_dot(attention,nd.transpose(encoder,axes=(0,1,2)))
    return new_decoder 
开发者ID:NonvolatileMemory,项目名称:AAAI_2019_EXAM,代码行数:6,代码来源:TextEXAM_multi-label.py

示例5: pseudo_labeling

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def pseudo_labeling(self, logits, confidence=0.):
        softmax = nd.softmax(logits, axis=1)
        prob = nd.max(softmax, axis=1)
        p_label = nd.argmax(softmax, axis=1)
        mask = prob > confidence
        return p_label, mask

    # def update_beta(self):
    #     return self.args.beta 
开发者ID:aws-samples,项目名称:d-SNE,代码行数:11,代码来源:training_ssda.py

示例6: get_attribute

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def get_attribute(self, image): 
        """Face attribute predictor.
        Parameters
        ----------
        image: NDArray.
            The NDArray data format for MXNet to process, such as (H, W, C).
        Returns
        -------
        type: tuple
            Results of Face Attribute Predict:
            (str(gender), int(age), str(expression)).
        """     
        img = transform_eval(image, resize_short=self._image_size, crop_size=self._image_size)
        img = img.as_in_context(self.ctx[0])   
        tic = time.time()
        pred = self.net(img)
        toc = time.time() - tic
        print('Attribute inference time: %fms' % (toc*1000))

        topK = 1
        topK_age = 6
        topK_exp = 2
        age = 0
        ind_1 = nd.topk(pred[0], k=topK)[0].astype('int')
        ind_2 = nd.topk(pred[1], k=topK_age)[0].astype('int')
        ind_3 = nd.topk(pred[2], k=topK_exp)[0].astype('int')
        for i in range(topK_age):
            age += int(nd.softmax(pred[1])[0][ind_2[i]].asscalar() * self.attribute_map2[1][ind_2[i].asscalar()])
        gender = self.attribute_map2[0][ind_1[0].asscalar()]
        if  nd.softmax(pred[2])[0][ind_3[0]].asscalar() < 0.45:
            expression = self.attribute_map2[2][7]
        else:
            expression_1 = self.attribute_map2[2][ind_3[0].asscalar()]
            expression_2 = self.attribute_map2[2][ind_3[1].asscalar()]  

        return (gender, age, (expression_1, expression_2)) 
开发者ID:becauseofAI,项目名称:MobileFace,代码行数:38,代码来源:mobileface_attribute_predictor.py

示例7: route

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def route(self, u):
        b_mat = nd.zeros((u.shape[0], self.num_cap_in, self.num_cap, 1, u.shape[4], u.shape[5]), ctx=u.context)
        for i in range(self.route_num):
            c_mat = nd.softmax(b_mat, axis=2)
            s = nd.sum(u * c_mat, axis=1)
            v = squash(s, 2)
            if i != self.route_num - 1:
                v1 = nd.expand_dims(v, axis=1)
                update_term = nd.sum(u*v1, axis=3, keepdims=True)
                b_mat = b_mat + update_term
        return v 
开发者ID:Godricly,项目名称:comment_toxic_CapsuleNet,代码行数:13,代码来源:conv_cap.py

示例8: msg_reduce

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def msg_reduce(self, node):
        state = node.mailbox['state']
        alpha = node.mailbox['alpha']
        alpha = nd.softmax(alpha, axis=1)

        new_state = nd.relu(nd.sum(alpha * state, axis=1))
        return { 'new_state': new_state } 
开发者ID:panzheyi,项目名称:ST-MetaNet,代码行数:9,代码来源:graph.py

示例9: msg_reduce

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def msg_reduce(self, node):
        state = node.mailbox['state']
        alpha = node.mailbox['alpha']
        alpha = nd.softmax(alpha, axis=1)

        new_state = nd.relu(nd.sum(alpha * state, axis=1)) * nd.sigmoid(self.weight.data(state.context))
        return { 'new_state': new_state } 
开发者ID:panzheyi,项目名称:ST-MetaNet,代码行数:9,代码来源:graph.py

示例10: forward

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def forward(self, x):
        #(batch_size, 1, 10, 16, 1) =>(batch_size,10, 16)=> (batch_size, 10, 1)
        x_shape = x.shape
        x = x.reshape(shape=(x_shape[0],x_shape[2],x_shape[3]))

        x_l2norm = nd.sqrt((x.square()).sum(axis=-1))
        # prob = nd.softmax(x_l2norm, axis=-1)
        return x_l2norm 
开发者ID:sxhxliang,项目名称:CapsNet_Mxnet,代码行数:10,代码来源:CapsLayers.py

示例11: _predict_tabular_data

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def _predict_tabular_data(self, new_data, process=True, predict_proba=True):  # TODO ensure API lines up with tabular.Model class.
        """ Specific TabularNN method to produce predictions on new (unprocessed) data.
            Returns 1D numpy array unless predict_proba=True and task is multi-class classification (not binary).
            Args:
                new_data (pd.Dataframe or TabularNNDataset): new data to make predictions on.
                If you want to make prediction for just a single row of new_data, pass in: new_data.iloc[[row_index]]
                process (bool): should new data be processed (if False, new_data must be TabularNNDataset)
                predict_proba (bool): should we output class-probabilities (not used for regression)
        """
        if process:
            new_data = self.process_test_data(new_data, batch_size=self.batch_size, num_dataloading_workers=self.num_dataloading_workers_inference, labels=None)
        if not isinstance(new_data, TabularNNDataset):
            raise ValueError("new_data must of of type TabularNNDataset if process=False")
        if self.problem_type == REGRESSION or not predict_proba:
            preds = nd.zeros((new_data.num_examples,1))
        else:
            preds = nd.zeros((new_data.num_examples, self.num_net_outputs))
        i = 0
        for batch_idx, data_batch in enumerate(new_data.dataloader):
            data_batch = new_data.format_batch_data(data_batch, self.ctx)
            preds_batch = self.model(data_batch)
            batch_size = len(preds_batch)
            if self.problem_type != REGRESSION:
                if not predict_proba: # need to take argmax
                    preds_batch = nd.argmax(preds_batch, axis=1, keepdims=True)
                else: # need to take softmax
                    preds_batch = nd.softmax(preds_batch, axis=1)
            preds[i:(i+batch_size)] = preds_batch
            i = i+batch_size
        if self.problem_type == REGRESSION or not predict_proba:
            return preds.asnumpy().flatten()  # return 1D numpy array
        elif self.problem_type == BINARY and predict_proba:
            return preds[:,1].asnumpy()  # for binary problems, only return P(Y==+1)

        return preds.asnumpy()  # return 2D numpy array 
开发者ID:awslabs,项目名称:autogluon,代码行数:37,代码来源:tabular_nn_model.py

示例12: get_inception_score

# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import softmax [as 别名]
def get_inception_score(images, splits=10):
    """
    Inception_score function.
        The images will be divided into 'splits' parts, and calculate each inception_score separately,
        then return the mean and std of inception_scores of these parts.
    :param images: Images(num x c x w x h) that needs to calculate inception_score.
    :param splits:
    :return: mean and std of inception_score
    """
    assert (images.shape[1] == 3)

    # load inception model
    if inception_model is None:
        _init_inception()

    # resize images to adapt inception model(inceptionV3)
    if images.shape[2] != 299:
        images = resize(images, 299, 299)

    preds = []
    bs = 4
    n_batches = int(math.ceil(float(images.shape[0])/float(bs)))

    # to get the predictions/picture of inception model
    for i in range(n_batches):
        sys.stdout.write(".")
        sys.stdout.flush()
        inps = images[(i * bs):min((i + 1) * bs, len(images))]
        # inps size. bs x 3 x 299 x 299
        pred = nd.softmax(inception_model(inps))
        # pred size. bs x 1000
        preds.append(pred.asnumpy())

    # list to array
    preds = np.concatenate(preds, 0)
    scores = []

    # to calculate the inception_score each split.
    for i in range(splits):
        # extract per split image pred
        part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
        kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
        kl = np.mean(np.sum(kl, 1))
        scores.append(np.exp(kl))

    return np.mean(scores), np.std(scores) 
开发者ID:mlperf,项目名称:training_results_v0.6,代码行数:48,代码来源:inception_score.py


注:本文中的mxnet.nd.softmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。