當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.inv方法代碼示例

本文整理匯總了Python中theano.tensor.inv方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.inv方法的具體用法?Python tensor.inv怎麽用?Python tensor.inv使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.inv方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:25,代碼來源:theano_backend.py

示例2: normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:15,代碼來源:theano_backend.py

示例3: _old_normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:42,代碼來源:theano_backend.py

示例4: get_output_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def get_output_for(self, input, style=None, **kwargs):

		mean = input.mean(self.axes)
		inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

		pattern = [0, 1, 'x', 'x']

		if style == None:
			pattern_params = ['x', 0, 'x', 'x']
			beta = 0 if self.beta is None else self.beta.dimshuffle(pattern_params)
			gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern_params)
		else:
			pattern_params = pattern
			beta = 0 if self.beta is None else self.beta[style].dimshuffle(pattern_params)
			gamma = 1 if self.gamma is None else self.gamma[style].dimshuffle(pattern_params)
			# if self.beta is not None:
			# 	beta = ifelse(T.eq(style.shape[0], 1), T.addbroadcast(beta, 0), beta)
			# if self.gamma is not None:
			# 	gamma = ifelse(T.eq(style.shape[0], 1), T.addbroadcast(gamma, 0), gamma)

		mean = mean.dimshuffle(pattern)
		inv_std = inv_std.dimshuffle(pattern)

		# normalize
		normalized = (input - mean) * (gamma * inv_std) + beta
		return normalized 
開發者ID:joelmoniz,項目名稱:gogh-figure,代碼行數:28,代碼來源:layers.py

示例5: get_output_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def get_output_for(self, input, deterministic=False,
                       batch_norm_use_averages=None,
                       batch_norm_update_averages=None, **kwargs):
        # If the BN vars shall be updates as before, redirect to the parent
        # implementation.
        if not isinstance(batch_norm_update_averages, dict):
            return super(BatchNormLayer, self).get_output_for(
                input, deterministic, batch_norm_use_averages,
                batch_norm_update_averages, **kwargs)
        else:
            input_mean = input.mean(self.axes)
            input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

            # Decide whether to use the stored averages or mini-batch statistics
            if batch_norm_use_averages is None:
                batch_norm_use_averages = deterministic
            use_averages = batch_norm_use_averages

            if use_averages:
                mean = self.mean
                inv_std = self.inv_std
            else:
                mean = input_mean
                inv_std = input_inv_std

            # Instead of automatically updating the averages, we add the update
            # ops to a dictionary.
            update_averages = batch_norm_update_averages
            if isinstance(update_averages, dict):
                update_averages[self.mean] = ((1 - self.alpha) * self.mean +
                                              self.alpha * input_mean)
                update_averages[self.inv_std] = ((1 - self.alpha) *
                                                 self.inv_std + self.alpha *
                                                 input_inv_std)

            # prepare dimshuffle pattern inserting broadcastable axes as needed
            param_axes = iter(range(input.ndim - len(self.axes)))
            pattern = ['x' if input_axis in self.axes
                       else next(param_axes)
                       for input_axis in range(input.ndim)]

            # apply dimshuffle pattern to all parameters
            beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
            gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
            mean = mean.dimshuffle(pattern)
            inv_std = inv_std.dimshuffle(pattern)

            # normalize
            normalized = (input - mean) * (gamma * inv_std) + beta
            return normalized 
開發者ID:TobyPDE,項目名稱:FRRN,代碼行數:52,代碼來源:layers.py

示例6: _old_normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def _old_normalize_batch_in_training(x, gamma, beta, reduction_axes,
                                     epsilon=1e-3):  # pragma: no cover
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    if gamma is None:
        gamma = ones_like(x)
    if beta is None:
        beta = zeros_like(x)

    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            normed = theano.tensor.as_tensor_variable(normed)
            mean = theano.tensor.as_tensor_variable(mean)
            stdinv = theano.tensor.as_tensor_variable(stdinv)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:50,代碼來源:theano_backend.py

示例7: _old_normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    if gamma is None:
        gamma = ones_like(x)
    if beta is None:
        beta = zeros_like(x)

    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            normed = theano.tensor.as_tensor_variable(normed)
            mean = theano.tensor.as_tensor_variable(mean)
            stdinv = theano.tensor.as_tensor_variable(stdinv)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:50,代碼來源:theano_backend.py

示例8: get_output_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def get_output_for(self, input, deterministic=False,
                       batch_norm_use_averages=None,
                       batch_norm_update_averages=None, **kwargs):
        input_mean = input.mean(self.axes)
        input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

        # Decide whether to use the stored averages or mini-batch statistics
        if batch_norm_use_averages is None:
            batch_norm_use_averages = deterministic
        use_averages = batch_norm_use_averages

        if use_averages:
            mean = self.mean
            inv_std = self.inv_std
        else:
            mean = input_mean
            inv_std = input_inv_std

        # Decide whether to update the stored averages
        if batch_norm_update_averages is None:
            batch_norm_update_averages = not deterministic
        update_averages = batch_norm_update_averages

        if update_averages:
            # Trick: To update the stored statistics, we create memory-aliased
            # clones of the stored statistics:
            running_mean = theano.clone(self.mean, share_inputs=False)
            running_inv_std = theano.clone(self.inv_std, share_inputs=False)
            # set a default update for them:
            running_mean.default_update = ((1 - self.alpha) * running_mean +
                                           self.alpha * input_mean)
            running_inv_std.default_update = ((1 - self.alpha) *
                                              running_inv_std +
                                              self.alpha * input_inv_std)
            # and make sure they end up in the graph without participating in
            # the computation (this way their default_update will be collected
            # and applied, but the computation will be optimized away):
            mean += 0 * running_mean
            inv_std += 0 * running_inv_std

        # prepare dimshuffle pattern inserting broadcastable axes as needed
        param_axes = iter(range(input.ndim - len(self.axes)))
        pattern = ['x' if input_axis in self.axes
                   else next(param_axes)
                   for input_axis in range(input.ndim)]

        # apply dimshuffle pattern to all parameters
        beta = 0 if self.beta is None else self.beta.dimshuffle(pattern)
        gamma = 1 if self.gamma is None else self.gamma.dimshuffle(pattern)
        mean = mean.dimshuffle(pattern)
        inv_std = inv_std.dimshuffle(pattern)

        # normalize
        normalized = (input - mean) * (gamma * inv_std) + beta
        return normalized 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:57,代碼來源:batch_norms.py

示例9: new_update_deltas

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def new_update_deltas(self, network):
        if not network.find_hyperparameter(["bn_update_moving_stats"], True):
            return super(AdvancedBatchNormalizationNode,
                         self).new_update_deltas(network)

        moving_var_type = network.find_hyperparameter(
            ["moving_var_type"], DEFAULT_MOVING_VAR_TYPE)
        epsilon = network.find_hyperparameter(["epsilon"], 1e-8)

        if moving_var_type == "log_var":
            moving_var_init_value = 1.0

            def transform_var(v):
                return T.log(v + epsilon)

            def untransform_var(v):
                return T.exp(v)
        elif moving_var_type == "var":
            moving_var_init_value = 0.0

            def transform_var(v):
                return v

            def untransform_var(v):
                return v
        elif moving_var_type == "inv_std":
            moving_var_init_value = 0.0

            def transform_var(v):
                return T.inv(T.sqrt(v) + epsilon)

            def untransform_var(v):
                return T.sqr(T.inv(v))

        moving_mean = network.get_vw("mean").variable
        moving_var = network.get_vw("var").variable
        in_mean = network.get_vw("in_mean").variable
        in_var = network.get_vw("in_var").variable
        alpha = network.find_hyperparameter(["alpha"], 0.1)

        updates = [
            (moving_mean, moving_mean * (1 - alpha) + in_mean * alpha),
            (moving_var,
             moving_var * (1 - alpha) + transform_var(in_var) * alpha),
        ]

        return treeano.UpdateDeltas.from_updates(updates) 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:49,代碼來源:batch_normalization.py

示例10: get_output_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import inv [as 別名]
def get_output_for(self, input, deterministic=False,
                       batch_norm_use_averages=None,
                       batch_norm_update_averages=None, **kwargs):
        input_mean = input.mean(self.axes)
        input_inv_std = T.inv(T.sqrt(input.var(self.axes) + self.epsilon))

        # Decide whether to use the stored averages or mini-batch statistics
        if batch_norm_use_averages is None:
            batch_norm_use_averages = deterministic
        use_averages = batch_norm_use_averages

        if use_averages:
            mean = self.mean
            inv_std = self.inv_std
        else:
            mean = input_mean
            inv_std = input_inv_std

        # Decide whether to update the stored averages
        if batch_norm_update_averages is None:
            batch_norm_update_averages = not deterministic
        update_averages = batch_norm_update_averages

        if update_averages:
            # Trick: To update the stored statistics, we create memory-aliased
            # clones of the stored statistics:
            running_mean = theano.clone(self.mean, share_inputs=False)
            running_inv_std = theano.clone(self.inv_std, share_inputs=False)
            # set a default update for them:
            running_mean.default_update = ((1 - self.alpha) * running_mean +
                                           self.alpha * input_mean)
            running_inv_std.default_update = ((1 - self.alpha) *
                                              running_inv_std +
                                              self.alpha * input_inv_std)
            # and make sure they end up in the graph without participating in
            # the computation (this way their default_update will be collected
            # and applied, but the computation will be optimized away):
            mean += 0 * running_mean
            inv_std += 0 * running_inv_std

        # prepare dimshuffle pattern inserting broadcastable axes as needed
        param_axes = iter(range(input.ndim - len(self.axes)))
        pattern = ['x' if input_axis in self.axes
                   else next(param_axes)
                   for input_axis in range(input.ndim)]

        # apply dimshuffle pattern to all parameters
        beta = self.beta.dimshuffle(pattern)
        mean = mean.dimshuffle(pattern)
        inv_std = inv_std.dimshuffle(pattern)

        # normalize
        normalized = (input - mean) * inv_std + beta
        return normalized 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:56,代碼來源:batch_norms.py


注:本文中的theano.tensor.inv方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。