当前位置: 首页>>代码示例>>Python>>正文


Python losses.mean_squared_error方法代码示例

本文整理汇总了Python中keras.losses.mean_squared_error方法的典型用法代码示例。如果您正苦于以下问题:Python losses.mean_squared_error方法的具体用法?Python losses.mean_squared_error怎么用?Python losses.mean_squared_error使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.losses的用法示例。


在下文中一共展示了losses.mean_squared_error方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def __init__(self, hidden_neurons=None,
                 hidden_activation='relu', output_activation='sigmoid',
                 loss=mean_squared_error, optimizer='adam',
                 epochs=100, batch_size=32, dropout_rate=0.2,
                 l2_regularizer=0.1, validation_size=0.1, preprocessing=True,
                 verbose=1, random_state=None, contamination=0.1):
        super(AutoEncoder, self).__init__(contamination=contamination)
        self.hidden_neurons = hidden_neurons
        self.hidden_activation = hidden_activation
        self.output_activation = output_activation
        self.loss = loss
        self.optimizer = optimizer
        self.epochs = epochs
        self.batch_size = batch_size
        self.dropout_rate = dropout_rate
        self.l2_regularizer = l2_regularizer
        self.validation_size = validation_size
        self.preprocessing = preprocessing
        self.verbose = verbose
        self.random_state = random_state

        # default values
        if self.hidden_neurons is None:
            self.hidden_neurons = [64, 32, 32, 64]

        # Verify the network design is valid
        if not self.hidden_neurons == self.hidden_neurons[::-1]:
            print(self.hidden_neurons)
            raise ValueError("Hidden units should be symmetric")

        self.hidden_neurons_ = self.hidden_neurons

        check_parameter(dropout_rate, 0, 1, param_name='dropout_rate',
                        include_left=True) 
开发者ID:yzhao062,项目名称:pyod,代码行数:36,代码来源:auto_encoder.py

示例2: l2

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def l2(y_true, y_pred):
    """ L2 metric (MSE) """
    return losses.mean_squared_error(y_true, y_pred)


###############################################################################
# Helper Functions
############################################################################### 
开发者ID:voxelmorph,项目名称:voxelmorph,代码行数:10,代码来源:metrics.py

示例3: build

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def build(args):
    model = build_model(args)
    model.compile(loss=['categorical_crossentropy', 'mean_squared_error'],
                    optimizer=SGD(lr=args['learning_rate'], momentum = args['momentum']),
                    #optimizer='adam',
                    loss_weights=[0.5, 0.5])
    return model 
开发者ID:witchu,项目名称:alphazero,代码行数:9,代码来源:keras_model.py

示例4: test_updatable_model_flag_mse_adam

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def test_updatable_model_flag_mse_adam(self):
        """
        Test to ensure that respect_trainable is honored during convert of a
        model with mean squared error loss and the Adam optimizer.
        """
        import coremltools
        from keras.layers import Dense
        from keras.losses import mean_squared_error
        from keras.optimizers import Adam

        input = ["data"]
        output = ["output"]

        # Again, this should give an updatable model.
        updatable = Sequential()
        updatable.add(Dense(128, input_shape=(16,)))
        updatable.add(Dense(10, name="foo", activation="softmax", trainable=True))
        updatable.compile(
            loss=mean_squared_error,
            optimizer=Adam(lr=1.0, beta_1=0.5, beta_2=0.75, epsilon=0.25),
            metrics=["accuracy"],
        )
        cml = coremltools.converters.keras.convert(
            updatable, input, output, respect_trainable=True
        )
        spec = cml.get_spec()
        self.assertTrue(spec.isUpdatable)
        layers = spec.neuralNetwork.layers
        self.assertIsNotNone(layers[1].innerProduct)
        self.assertTrue(layers[1].innerProduct)
        self.assertTrue(layers[1].isUpdatable)
        self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
        adopt = spec.neuralNetwork.updateParams.optimizer.adamOptimizer
        self.assertEqual(adopt.learningRate.defaultValue, 1.0)
        self.assertEqual(adopt.beta1.defaultValue, 0.5)
        self.assertEqual(adopt.beta2.defaultValue, 0.75)
        self.assertEqual(adopt.eps.defaultValue, 0.25) 
开发者ID:apple,项目名称:coremltools,代码行数:39,代码来源:test_keras2.py

示例5: objective_function_for_value

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def objective_function_for_value(y_true, y_pred):
    return mean_squared_error(y_true, y_pred) 
开发者ID:Zeta36,项目名称:connect4-alpha-zero,代码行数:4,代码来源:model_connect4.py

示例6: latitude_weighted_loss

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def latitude_weighted_loss(loss_function=mean_squared_error, lats=None, output_shape=(), axis=-2, weighting='cosine'):
    """
    Create a loss function that weights inputs by a function of latitude before calculating the loss.

    :param loss_function: method: Keras loss function to apply after the weighting
    :param lats: ndarray: 1-dimensional array of latitude coordinates
    :param output_shape: tuple: shape of expected model output
    :param axis: int: latitude axis in model output shape
    :param weighting: str: type of weighting to apply. Options are:
            cosine: weight by the cosine of the latitude (default)
            midlatitude: weight by the cosine of the latitude but also apply a 25% reduction to the equator and boost
                to the mid-latitudes
    :return: callable loss function
    """
    if weighting not in ['cosine', 'midlatitude']:
        raise ValueError("'weighting' must be one of 'cosine' or 'midlatitude'")
    if lats is not None:
        lat_tensor = K.zeros(lats.shape)
        lat_tensor.assign(K.cast_to_floatx(lats[:]))

        weights = K.cos(lat_tensor * np.pi / 180.)
        if weighting == 'midlatitude':
            weights = weights + 0.5 * K.pow(K.sin(lat_tensor * 2 * np.pi / 180.), 2.)

        weight_shape = output_shape[axis:]
        for d in weight_shape[1:]:
            weights = K.expand_dims(weights, axis=-1)
            weights = K.repeat_elements(weights, d, axis=-1)

    else:
        weights = K.ones(output_shape)

    def lat_loss(y_true, y_pred):
        return loss_function(y_true * weights, y_pred * weights)

    return lat_loss 
开发者ID:jweyn,项目名称:DLWP,代码行数:38,代码来源:custom.py

示例7: anomaly_correlation

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def anomaly_correlation(y_true, y_pred, mean=0., regularize_mean='mse', reverse=True):
    """
    Calculate the anomaly correlation. FOR NOW, ASSUMES THAT THE CLIMATOLOGICAL MEAN IS 0, AND THEREFORE REQUIRES DATA
    TO BE SCALED TO REMOVE SPATIALLY-DEPENDENT MEAN.

    :param y_true: Tensor: target values
    :param y_pred: Tensor: model-predicted values
    :param mean: float: subtract this global mean from all predicted and target array values. IGNORED FOR NOW.
    :param regularize_mean: str or None: if not None, also penalizes a form of mean squared error:
        global: penalize differences in the global mean
        spatial: penalize differences in spatially-averaged mean (last two dimensions)
        mse: penalize the mean squared error
        mae: penalize the mean absolute error
    :param reverse: bool: if True, inverts the loss so that -1 is the target score
    :return: float: anomaly correlation loss
    """
    if regularize_mean is not None:
        assert regularize_mean in ['global', 'spatial', 'mse', 'mae']
    a = (K.mean(y_pred * y_true)
         / K.sqrt(K.mean(K.square(y_pred)) * K.mean(K.square(y_true))))
    if regularize_mean is not None:
        if regularize_mean == 'global':
            m = K.abs((K.mean(y_true) - K.mean(y_pred)) / K.mean(y_true))
        elif regularize_mean == 'spatial':
            m = K.mean(K.abs((K.mean(y_true, axis=[-2, -1]) - K.mean(y_pred, axis=[-2, -1]))
                             / K.mean(y_true, axis=[-2, -1])))
        elif regularize_mean == 'mse':
            m = mean_squared_error(y_true, y_pred)
        elif regularize_mean == 'mae':
            m = mean_absolute_error(y_true, y_pred)
    if reverse:
        if regularize_mean is not None:
            return m - a
        else:
            return -a
    else:
        if regularize_mean:
            return a - m
        else:
            return a 
开发者ID:jweyn,项目名称:DLWP,代码行数:42,代码来源:custom.py

示例8: loss_dict

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def loss_dict(self):
        """ Return the loss dict """
        loss_dict = dict(mae=losses.mean_absolute_error,
                         mse=losses.mean_squared_error,
                         logcosh=losses.logcosh,
                         smooth_loss=generalized_loss,
                         l_inf_norm=l_inf_norm,
                         ssim=DSSIMObjective(),
                         gmsd=gmsd_loss,
                         pixel_gradient_diff=gradient_loss)
        return loss_dict 
开发者ID:deepfakes,项目名称:faceswap,代码行数:13,代码来源:_base.py

示例9: discriminator_loss

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def discriminator_loss(y_true, y_pred):
    loss = mean_squared_error(y_true, y_pred)
    is_large = k.greater(loss, k.constant(_disc_train_thresh)) # threshold
    is_large = k.cast(is_large, k.floatx())
    return loss * is_large # binary threshold the loss to prevent overtraining the discriminator 
开发者ID:alecGraves,项目名称:cyclegan_keras,代码行数:7,代码来源:losses.py

示例10: objective_function_for_value

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def objective_function_for_value(y_true, y_pred):
        return mean_squared_error(y_true, y_pred) 
开发者ID:bhansconnect,项目名称:alpha_zero_othello,代码行数:4,代码来源:aiplayer.py

示例11: combined_loss

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def combined_loss(y_true, y_pred):
    '''
    Uses a combination of mean_squared_error and an L1 penalty on the output of AE
    '''
    return mse(y_true, y_pred) + 0.01*mae(0, y_pred) 
开发者ID:ECP-CANDLE,项目名称:Benchmarks,代码行数:7,代码来源:helper.py

示例12: anomaly_correlation_loss

# 需要导入模块: from keras import losses [as 别名]
# 或者: from keras.losses import mean_squared_error [as 别名]
def anomaly_correlation_loss(mean=None, regularize_mean='mse', reverse=True):
    """
    Create a Keras loss function for anomaly correlation.

    :param mean: ndarray or None: if not None, must be an array with the same shape as the expected prediction, except
        that the first (batch) axis should have a dimension of 1.
    :param regularize_mean: str or None: if not None, also penalizes a form of mean squared error:
        global: penalize differences in the global mean
        spatial: penalize differences in spatially-averaged mean (last two dimensions)
        mse: penalize the mean squared error
        mae: penalize the mean absolute error
    :param reverse: bool: if True, inverts the loss so that -1 is the (minimized) target score. Must be True if
        regularize_mean is not None.
    :return: method: anomaly correlation loss function
    """
    if mean is not None:
        assert len(mean.shape) > 1
        assert mean.shape[0] == 1
        mean_tensor = K.variable(mean, name='anomaly_correlation_mean')

    if regularize_mean is not None:
        assert regularize_mean in ['global', 'spatial', 'mse', 'mae']
        reverse = True

    def acc_loss(y_true, y_pred):
        if mean is not None:
            a = (K.mean((y_pred - mean_tensor) * (y_true - mean_tensor))
                 / K.sqrt(K.mean(K.square((y_pred - mean_tensor))) * K.mean(K.square((y_true - mean_tensor)))))
        else:
            a = (K.mean(y_pred * y_true)
                 / K.sqrt(K.mean(K.square(y_pred)) * K.mean(K.square(y_true))))
        if regularize_mean is not None:
            if regularize_mean == 'global':
                m = K.abs((K.mean(y_true) - K.mean(y_pred)) / K.mean(y_true))
            elif regularize_mean == 'spatial':
                m = K.mean(K.abs((K.mean(y_true, axis=[-2, -1]) - K.mean(y_pred, axis=[-2, -1]))
                                 / K.mean(y_true, axis=[-2, -1])))
            elif regularize_mean == 'mse':
                m = mean_squared_error(y_true, y_pred)
            elif regularize_mean == 'mae':
                m = mean_absolute_error(y_true, y_pred)
        if reverse:
            if regularize_mean is not None:
                return m - a
            else:
                return -a
        else:
            if regularize_mean:
                return a - m
            else:
                return a

    return acc_loss


# Compatibility names 
开发者ID:jweyn,项目名称:DLWP,代码行数:58,代码来源:custom.py


注:本文中的keras.losses.mean_squared_error方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。