当前位置: 首页>>代码示例>>Python>>正文


Python logistic_sgd.LogisticRegression方法代码示例

本文整理汇总了Python中logistic_sgd.LogisticRegression方法的典型用法代码示例。如果您正苦于以下问题:Python logistic_sgd.LogisticRegression方法的具体用法?Python logistic_sgd.LogisticRegression怎么用?Python logistic_sgd.LogisticRegression使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块logistic_sgd的用法示例。

在下文中一共展示了logistic_sgd.LogisticRegression方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import logistic_sgd [as 别名]
# 或者: from logistic_sgd import LogisticRegression [as 别名]
def __init__(self, rng, input, n_in, n_hidden, n_out):
        # ???????????????????????????????????
        # ??????????????????????????
        self.hiddenLayer = HiddenLayer(rng=rng, input=input, n_in=n_in, n_out=n_hidden, activation=T.tanh)
        self.logRegressionLayer = LogisticRegression(input=self.hiddenLayer.output, n_in=n_hidden, n_out=n_out)

        # L1/L2?????????????????
        self.L1 = abs(self.hiddenLayer.W).sum() + abs(self.logRegressionLayer.W).sum()
        self.L2_sqr = (self.hiddenLayer.W ** 2).sum() + (self.logRegressionLayer.W ** 2).sum()

        # MLP??????????????
        # ??????????????????????????????
        self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood

        # ???????????
        self.errors = self.logRegressionLayer.errors

        # ???????????????
        self.params = self.hiddenLayer.params + self.logRegressionLayer.params 
开发者ID:aidiary,项目名称:deep-learning-theano,代码行数:21,代码来源:mlp.py


示例2: __init__

# 需要导入模块: import logistic_sgd [as 别名]
# 或者: from logistic_sgd import LogisticRegression [as 别名]
def __init__(self, rng,is_train, input, n_in, n_hidden, n_out,drop_p=0.5):
        self.hiddenLayer = HiddenLayer(rng=rng,is_train=is_train,input=input,
                                    n_in=n_in,n_out=n_hidden,
                                    activation=T.tanh,p=drop_p)

        self.logRegressionLayer = LogisticRegression(
            input=self.hiddenLayer.output,
            n_in=n_hidden,
            n_out=n_out
        )

        self.L1 = (
            abs(self.hiddenLayer.W).sum()
            + abs(self.logRegressionLayer.W).sum()
        )

        self.L2_sqr = (
            (self.hiddenLayer.W ** 2).sum()
            + (self.logRegressionLayer.W ** 2).sum()
        )

        self.negative_log_likelihood = (self.logRegressionLayer.negative_log_likelihood)
        self.errors = self.logRegressionLayer.errors
        #self define
        self.pp_errors=self.logRegressionLayer.pp_errors
        self.p_y_given_x=self.logRegressionLayer.p_y_given_x
        self.y_pred=T.argmax(self.p_y_given_x,axis=1)
        self.max_prob=self.p_y_given_x[T.arange(input.shape[0]),self.y_pred]
        #self define end
        self.params = self.hiddenLayer.params + self.logRegressionLayer.params
        self.input = input 
开发者ID:jiangnanhugo,项目名称:mlee-nce,代码行数:33,代码来源:mlp.py


示例3: __init__

# 需要导入模块: import logistic_sgd [as 别名]
# 或者: from logistic_sgd import LogisticRegression [as 别名]
def __init__(self, rng, input, n_in, n_hidden, n_out):

        self.hiddenLayer1 = HiddenLayer(
            rng=rng,
            input=input,
            n_in=n_in,
            n_out=n_hidden,
            activation=T.tanh
        )

        self.hiddenLayer2 = HiddenLayer(
            rng=rng,
            input=self.hiddenLayer1.output,
            n_in=n_hidden,
            n_out=n_hidden,
            activation=T.tanh
        )

        self.logRegressionLayer = LogisticRegression(
            input=self.hiddenLayer2.output,
            n_in=n_hidden,
            n_out=n_out
        )

        self.L1 = (
            abs(self.hiddenLayer1.W).sum()
            + abs(self.hiddenLayer2.W).sum()
            + abs(self.logRegressionLayer.W).sum()
        )

        self.L2_sqr = (
            (self.hiddenLayer1.W ** 2).sum()
            + (self.hiddenLayer2.W ** 2).sum()
            + (self.logRegressionLayer.W ** 2).sum()
        )

        self.negative_log_likelihood = (
            self.logRegressionLayer.negative_log_likelihood
        )

        self.errors = self.logRegressionLayer.errors
        self.params = self.hiddenLayer1.params + self.hiddenLayer2.params + self.logRegressionLayer.params
        self.input = input
        self.output = self.logRegressionLayer.p_y_given_x; 
开发者ID:domainxz,项目名称:top-k-rec,代码行数:46,代码来源:predict_by_mlp.py


示例4: __init__

# 需要导入模块: import logistic_sgd [as 别名]
# 或者: from logistic_sgd import LogisticRegression [as 别名]
def __init__(self, rng, input, n_in, n_hidden, n_out):
        """Initialize the parameters for the multilayer perceptron

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
        architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
        which the datapoints lie

        :type n_hidden: int
        :param n_hidden: number of hidden units

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
        which the labels lie

        """

        # Since we are dealing with a one hidden layer MLP, this will
        # translate into a TanhLayer connected to the LogisticRegression
        # layer; this can be replaced by a SigmoidalLayer, or a layer
        # implementing any other nonlinearity
        self.hiddenLayer = HiddenLayer(rng=rng, input=input,
                                       n_in=n_in, n_out=n_hidden,
                                       activation=T.tanh)

        # The logistic regression layer gets as input the hidden units
        # of the hidden layer
        self.logRegressionLayer = LogisticRegression(
            input=self.hiddenLayer.output,
            n_in=n_hidden,
            n_out=n_out)

        # L1 norm ; one regularization option is to enforce L1 norm to
        # be small
        self.L1 = abs(self.hiddenLayer.W).sum() \
                + abs(self.logRegressionLayer.W).sum()

        # square of L2 norm ; one regularization option is to enforce
        # square of L2 norm to be small
        self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \
                    + (self.logRegressionLayer.W ** 2).sum()

        # negative log likelihood of the MLP is given by the negative
        # log likelihood of the output of the model, computed in the
        # logistic regression layer
        self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
        # same holds for the function computing the number of errors
        self.errors = self.logRegressionLayer.errors

        # the parameters of the model are the parameters of the two layer it is
        # made out of
        self.params = self.hiddenLayer.params + self.logRegressionLayer.params 
开发者ID:zhaoyu611,项目名称:DeepLearningTutorialForChinese,代码行数:60,代码来源:mlp.py



注:本文中的logistic_sgd.LogisticRegression方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。