当前位置: 首页>>代码示例>>Python>>正文


Python tensor.inv方法代码示例

本文整理汇总了Python中theano.tensor.inv方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.inv方法的具体用法?Python tensor.inv怎么用?Python tensor.inv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.inv方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: normalize_batch_in_training

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:25,代码来源:theano_backend.py

示例2: normalize_batch_in_training

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:15,代码来源:theano_backend.py

示例3: _old_normalize_batch_in_training

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:42,代码来源:theano_backend.py

示例4: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def get_output_for(self, input, style=None, **kwargs):

		mean = input.mean(self.axes)
		inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

		pattern = [0, 1, 'x', 'x']

		if style == None:
			pattern_params = ['x', 0, 'x', 'x']
			beta = 0 if self.beta is None else self.beta.dimshuffle(pattern_params)
			gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern_params)
		else:
			pattern_params = pattern
			beta = 0 if self.beta is None else self.beta[style].dimshuffle(pattern_params)
			gamma = 1 if self.gamma is None else self.gamma[style].dimshuffle(pattern_params)
			# if self.beta is not None:
			# 	beta = ifelse(T.eq(style.shape[0], 1), T.addbroadcast(beta, 0), beta)
			# if self.gamma is not None:
			# 	gamma = ifelse(T.eq(style.shape[0], 1), T.addbroadcast(gamma, 0), gamma)

		mean = mean.dimshuffle(pattern)
		inv_std = inv_std.dimshuffle(pattern)

		# normalize
		normalized = (input - mean) * (gamma * inv_std) + beta
		return normalized 
开发者ID:joelmoniz,项目名称:gogh-figure,代码行数:28,代码来源:layers.py

示例5: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def get_output_for(self, input, deterministic=False,
                       batch_norm_use_averages=None,
                       batch_norm_update_averages=None, **kwargs):
        # If the BN vars shall be updates as before, redirect to the parent
        # implementation.
        if not isinstance(batch_norm_update_averages, dict):
            return super(BatchNormLayer, self).get_output_for(
                input, deterministic, batch_norm_use_averages,
                batch_norm_update_averages, **kwargs)
        else:
            input_mean = input.mean(self.axes)
            input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

            # Decide whether to use the stored averages or mini-batch statistics
            if batch_norm_use_averages is None:
                batch_norm_use_averages = deterministic
            use_averages = batch_norm_use_averages

            if use_averages:
                mean = self.mean
                inv_std = self.inv_std
            else:
                mean = input_mean
                inv_std = input_inv_std

            # Instead of automatically updating the averages, we add the update
            # ops to a dictionary.
            update_averages = batch_norm_update_averages
            if isinstance(update_averages, dict):
                update_averages[self.mean] = ((1 - self.alpha) * self.mean +
                                              self.alpha * input_mean)
                update_averages[self.inv_std] = ((1 - self.alpha) *
                                                 self.inv_std + self.alpha *
                                                 input_inv_std)

            # prepare dimshuffle pattern inserting broadcastable axes as needed
            param_axes = iter(range(input.ndim - len(self.axes)))
            pattern = ['x' if input_axis in self.axes
                       else next(param_axes)
                       for input_axis in range(input.ndim)]

            # apply dimshuffle pattern to all parameters
            beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
            gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
            mean = mean.dimshuffle(pattern)
            inv_std = inv_std.dimshuffle(pattern)

            # normalize
            normalized = (input - mean) * (gamma * inv_std) + beta
            return normalized 
开发者ID:TobyPDE,项目名称:FRRN,代码行数:52,代码来源:layers.py

示例6: _old_normalize_batch_in_training

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def _old_normalize_batch_in_training(x, gamma, beta, reduction_axes,
                                     epsilon=1e-3):  # pragma: no cover
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    if gamma is None:
        gamma = ones_like(x)
    if beta is None:
        beta = zeros_like(x)

    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            normed = theano.tensor.as_tensor_variable(normed)
            mean = theano.tensor.as_tensor_variable(mean)
            stdinv = theano.tensor.as_tensor_variable(stdinv)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:50,代码来源:theano_backend.py

示例7: _old_normalize_batch_in_training

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    if gamma is None:
        gamma = ones_like(x)
    if beta is None:
        beta = zeros_like(x)

    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            normed = theano.tensor.as_tensor_variable(normed)
            mean = theano.tensor.as_tensor_variable(mean)
            stdinv = theano.tensor.as_tensor_variable(stdinv)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:50,代码来源:theano_backend.py

示例8: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def get_output_for(self, input, deterministic=False,
                       batch_norm_use_averages=None,
                       batch_norm_update_averages=None, **kwargs):
        input_mean = input.mean(self.axes)
        input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

        # Decide whether to use the stored averages or mini-batch statistics
        if batch_norm_use_averages is None:
            batch_norm_use_averages = deterministic
        use_averages = batch_norm_use_averages

        if use_averages:
            mean = self.mean
            inv_std = self.inv_std
        else:
            mean = input_mean
            inv_std = input_inv_std

        # Decide whether to update the stored averages
        if batch_norm_update_averages is None:
            batch_norm_update_averages = not deterministic
        update_averages = batch_norm_update_averages

        if update_averages:
            # Trick: To update the stored statistics, we create memory-aliased
            # clones of the stored statistics:
            running_mean = theano.clone(self.mean, share_inputs=False)
            running_inv_std = theano.clone(self.inv_std, share_inputs=False)
            # set a default update for them:
            running_mean.default_update = ((1 - self.alpha) * running_mean +
                                           self.alpha * input_mean)
            running_inv_std.default_update = ((1 - self.alpha) *
                                              running_inv_std +
                                              self.alpha * input_inv_std)
            # and make sure they end up in the graph without participating in
            # the computation (this way their default_update will be collected
            # and applied, but the computation will be optimized away):
            mean += 0 * running_mean
            inv_std += 0 * running_inv_std

        # prepare dimshuffle pattern inserting broadcastable axes as needed
        param_axes = iter(range(input.ndim - len(self.axes)))
        pattern = ['x' if input_axis in self.axes
                   else next(param_axes)
                   for input_axis in range(input.ndim)]

        # apply dimshuffle pattern to all parameters
        beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
        gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
        mean = mean.dimshuffle(pattern)
        inv_std = inv_std.dimshuffle(pattern)

        # normalize
        normalized = (input - mean) * (gamma * inv_std) + beta
        return normalized 
开发者ID:SBU-BMI,项目名称:u24_lymphocyte,代码行数:57,代码来源:batch_norms.py

示例9: new_update_deltas

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def new_update_deltas(self, network):
        if not network.find_hyperparameter(["bn_update_moving_stats"], True):
            return super(AdvancedBatchNormalizationNode,
                         self).new_update_deltas(network)

        moving_var_type = network.find_hyperparameter(
            ["moving_var_type"], DEFAULT_MOVING_VAR_TYPE)
        epsilon = network.find_hyperparameter(["epsilon"], 1e-8)

        if moving_var_type == "log_var":
            moving_var_init_value = 1.0

            def transform_var(v):
                return T.log(v + epsilon)

            def untransform_var(v):
                return T.exp(v)
        elif moving_var_type == "var":
            moving_var_init_value = 0.0

            def transform_var(v):
                return v

            def untransform_var(v):
                return v
        elif moving_var_type == "inv_std":
            moving_var_init_value = 0.0

            def transform_var(v):
                return T.inv(T.sqrt(v) + epsilon)

            def untransform_var(v):
                return T.sqr(T.inv(v))

        moving_mean = network.get_vw("mean").variable
        moving_var = network.get_vw("var").variable
        in_mean = network.get_vw("in_mean").variable
        in_var = network.get_vw("in_var").variable
        alpha = network.find_hyperparameter(["alpha"], 0.1)

        updates = [
            (moving_mean, moving_mean * (1 - alpha) + in_mean * alpha),
            (moving_var,
             moving_var * (1 - alpha) + transform_var(in_var) * alpha),
        ]

        return treeano.UpdateDeltas.from_updates(updates) 
开发者ID:SBU-BMI,项目名称:u24_lymphocyte,代码行数:49,代码来源:batch_normalization.py

示例10: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import inv [as 别名]
def get_output_for(self, input, deterministic=False,
                       batch_norm_use_averages=None,
                       batch_norm_update_averages=None, **kwargs):
        input_mean = input.mean(self.axes)
        input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

        # Decide whether to use the stored averages or mini-batch statistics
        if batch_norm_use_averages is None:
            batch_norm_use_averages = deterministic
        use_averages = batch_norm_use_averages

        if use_averages:
            mean = self.mean
            inv_std = self.inv_std
        else:
            mean = input_mean
            inv_std = input_inv_std

        # Decide whether to update the stored averages
        if batch_norm_update_averages is None:
            batch_norm_update_averages = not deterministic
        update_averages = batch_norm_update_averages

        if update_averages:
            # Trick: To update the stored statistics, we create memory-aliased
            # clones of the stored statistics:
            running_mean = theano.clone(self.mean, share_inputs=False)
            running_inv_std = theano.clone(self.inv_std, share_inputs=False)
            # set a default update for them:
            running_mean.default_update = ((1 - self.alpha) * running_mean +
                                           self.alpha * input_mean)
            running_inv_std.default_update = ((1 - self.alpha) *
                                              running_inv_std +
                                              self.alpha * input_inv_std)
            # and make sure they end up in the graph without participating in
            # the computation (this way their default_update will be collected
            # and applied, but the computation will be optimized away):
            mean += 0 * running_mean
            inv_std += 0 * running_inv_std

        # prepare dimshuffle pattern inserting broadcastable axes as needed
        param_axes = iter(range(input.ndim - len(self.axes)))
        pattern = ['x' if input_axis in self.axes
                   else next(param_axes)
                   for input_axis in range(input.ndim)]

        # apply dimshuffle pattern to all parameters
        beta = self.beta.dimshuffle(pattern)
        mean = mean.dimshuffle(pattern)
        inv_std = inv_std.dimshuffle(pattern)

        # normalize
        normalized = (input - mean) * inv_std + beta
        return normalized 
开发者ID:SBU-BMI,项目名称:u24_lymphocyte,代码行数:56,代码来源:batch_norms.py


注:本文中的theano.tensor.inv方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。