當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.std方法代碼示例

本文整理匯總了Python中theano.tensor.std方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.std方法的具體用法?Python tensor.std怎麽用?Python tensor.std使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.std方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    """Computes mean and std for batch then apply batch_normalization on batch.
    """
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    if gamma is None:
        if beta is None:
            gamma = ones_like(x)
        else:
            gamma = ones_like(beta)
    if beta is None:
        if gamma is None:
            beta = zeros_like(x)
        beta = zeros_like(gamma)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:25,代碼來源:theano_backend.py

示例2: truncated_normal

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)

    try:
        return rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype,
                          truncate=True)
    except TypeError:
        normal_t = rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype)
        # Poor man's truncated normal: we literally clip the tensor
        return T.clip(normal_t, mean - 2 * stddev, mean + 2 * stddev)


# Theano implementation of CTC
# Used with permission from Shawn Tan
# https://github.com/shawntan/
# Note that TensorFlow's native CTC code is significantly
# faster than this 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:23,代碼來源:theano_backend.py

示例3: zca_whiten

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def zca_whiten(data, W, batchsize=500, use_gpu=True, verbose=True):
    data = data.astype(np.float32)
    ncases = data.shape[0]
    nbatches = (ncases - 1) / batchsize + 1

    data_white = np.zeros((ncases, data.shape[1]), dtype=np.float32)
    for bidx in range(nbatches):
        start = bidx * batchsize
        end = min((bidx + 1) * batchsize, ncases)
        if use_gpu:
            data[start:end] = theano_subtract_m1(data[start:end])
            data[start:end] = theano_divide_s1(data[start:end])
            data_white[start:end] = theano_dot(data[start:end], W)
        else:
            data[start:end] -= data[start:end].mean(1)[:, None]
            s1 = data[start:end].std(1)[:, None]
            data[start:end] /= s1 + s1.mean()
            data_white[start:end] = np.dot(data[start:end], W)
    return data_white 
開發者ID:saebrahimi,項目名稱:Emotion-Recognition-RNN,代碼行數:21,代碼來源:pca.py

示例4: whiten

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def whiten(data, V, m0, s0, var_fracs, retain_var, nprincomps=0, batchsize=1000, use_gpu=True, verbose=True):
    data = data.astype(np.float32)
    ncases = data.shape[0]
    nbatches = (ncases - 1) / batchsize + 1
    if nprincomps == 0:
        nprincomps = np.where(var_fracs > retain_var)[0][0]
    data_white = np.zeros((ncases, nprincomps), dtype=np.float32)
    for bidx in range(nbatches):
        start = bidx * batchsize
        end = min((bidx + 1) * batchsize, ncases)
        data[start:end] -= data[start:end].mean(1)[:, None]
        s1 = data[start:end].std(1)[:, None]
        data[start:end] /= s1 + s1.mean()
        data[start:end] -= m0
        data[start:end] /= s0
        if use_gpu:
            data_white[start:end] = theano_dot(
                data[start:end], V[:nprincomps].T)
        else:
            data_white[start:end] = np.dot(data[start:end], V[:nprincomps].T)
    return data_white 
開發者ID:saebrahimi,項目名稱:Emotion-Recognition-RNN,代碼行數:23,代碼來源:pca.py

示例5: truncated_normal

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    normal_tensor = rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype)
    # Poor man's truncated normal: we literally clip the tensor
    return T.clip(normal_tensor, mean - 2 * stddev, mean + 2 * stddev)


# Theano implementation of CTC
# Used with permission from Shawn Tan
# https://github.com/shawntan/
# Note that TensorFlow's native CTC code is significantly
# faster than this 
開發者ID:hello-sea,項目名稱:DeepLearning_Wavelet-LSTM,代碼行數:18,代碼來源:theano_backend.py

示例6: normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=0.0001):
    '''Compute mean and std for batch then apply batch_normalization on batch.
    '''
    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var 
開發者ID:GUR9000,項目名稱:KerasNeuralFingerprint,代碼行數:25,代碼來源:theano_backend.py

示例7: truncated_normal

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    normal_tensor = rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype)
    # Poor man's truncated normal: we literally clip the tensor
    return T.clip(normal_tensor, mean - 2 * stddev, mean + 2 * stddev)


# Theano implementation of CTC
# Used with permission from Shawn Tan
# https://github.com/shawntan/
# Note that tensorflow's native CTC code is significantly
# faster than this 
開發者ID:sunilmallya,項目名稱:keras-lambda,代碼行數:18,代碼來源:theano_backend.py

示例8: std

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def std(x, axis=None, keepdims=False):
    return T.std(x, axis=axis, keepdims=keepdims) 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:4,代碼來源:theano_backend.py

示例9: normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def normalize_batch_in_training(x, gamma, beta,
                                reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    # TODO remove this if statement when Theano without
    # T.nnet.bn.batch_normalization_train is deprecated
    if not hasattr(T.nnet.bn, 'batch_normalization_train'):
        return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)

    normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
        x, gamma, beta, reduction_axes, epsilon)

    return normed, mean, T.inv(stdinv ** 2) 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:15,代碼來源:theano_backend.py

示例10: _old_normalize_batch_in_training

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def _old_normalize_batch_in_training(x, gamma, beta,
                                     reduction_axes, epsilon=1e-3):
    '''Computes mean and std for batch then apply batch_normalization on batch.
    '''
    dev = theano.config.device
    use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
    if use_cudnn:
        broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
        broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
        try:
            normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
                x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
            var = T.inv(stdinv ** 2)
            return normed, T.flatten(mean), T.flatten(var)
        except AttributeError:
            pass

    var = x.var(reduction_axes)
    mean = x.mean(reduction_axes)

    target_shape = []
    for axis in range(ndim(x)):
        if axis in reduction_axes:
            target_shape.append(1)
        else:
            target_shape.append(x.shape[axis])
    target_shape = T.stack(*target_shape)

    broadcast_mean = T.reshape(mean, target_shape)
    broadcast_var = T.reshape(var, target_shape)
    broadcast_beta = T.reshape(beta, target_shape)
    broadcast_gamma = T.reshape(gamma, target_shape)
    normed = batch_normalization(x, broadcast_mean, broadcast_var,
                                 broadcast_beta, broadcast_gamma,
                                 epsilon)
    return normed, mean, var


# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:42,代碼來源:theano_backend.py

示例11: random_normal

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def random_normal(shape, mean=0.0, std=1.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(1, 10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype) 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:9,代碼來源:theano_backend.py

示例12: random_normal

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):
    if seed is None:
        seed = np.random.randint(10e6)
    rng = RandomStreams(seed=seed)
    return rng.normal(size=shape, avg=mean, std=std, dtype=dtype) 
開發者ID:mathDR,項目名稱:reading-text-in-the-wild,代碼行數:7,代碼來源:theano_backend.py

示例13: get_stats

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import std [as 別名]
def get_stats(input, stat=None):
    """
    Returns a dictionary mapping the name of the statistic to the result on the input.
    Currently gets mean, var, std, min, max, l1, l2.

    Parameters
    ----------
    input : tensor
        Theano tensor to grab stats for.

    Returns
    -------
    dict
        Dictionary of all the statistics expressions {string_name: theano expression}
    """
    stats = {
        'mean': T.mean(input),
        'var': T.var(input),
        'std': T.std(input),
        'min': T.min(input),
        'max': T.max(input),
        'l1': input.norm(L=1),
        'l2': input.norm(L=2),
        #'num_nonzero': T.sum(T.nonzero(input)),
    }
    stat_list = raise_to_list(stat)
    compiled_stats = {}
    if stat_list is None:
        return stats

    for stat in stat_list:
        if isinstance(stat, string_types) and stat in stats:
            compiled_stats.update({stat: stats[stat]})
    return compiled_stats 
開發者ID:vitruvianscience,項目名稱:OpenDeep,代碼行數:36,代碼來源:statistics.py


注:本文中的theano.tensor.std方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。