當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.max方法代碼示例

本文整理匯總了Python中numpy.max方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.max方法的具體用法?Python numpy.max怎麽用?Python numpy.max使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在numpy的用法示例。


在下文中一共展示了numpy.max方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: to_radians

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def to_radians(arr, is_delta=False):
    """Force data with units either degrees or radians to be radians."""
    # Infer the units from embedded metadata, if it's there.
    try:
        units = arr.units
    except AttributeError:
        pass
    else:
        if units.lower().startswith('degrees'):
            warn_msg = ("Conversion applied: degrees -> radians to array: "
                        "{}".format(arr))
            logging.debug(warn_msg)
            return np.deg2rad(arr)
    # Otherwise, assume degrees if the values are sufficiently large.
    threshold = 0.1*np.pi if is_delta else 4*np.pi
    if np.max(np.abs(arr)) > threshold:
        warn_msg = ("Conversion applied: degrees -> radians to array: "
                    "{}".format(arr))
        logging.debug(warn_msg)
        return np.deg2rad(arr)
    return arr 
開發者ID:spencerahill,項目名稱:aospy,代碼行數:23,代碼來源:vertcoord.py

示例2: load_RSM

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def load_RSM(filename):
    om, tt, psd = xu.io.getxrdml_map(filename)
    om = np.deg2rad(om)
    tt = np.deg2rad(tt)
    wavelength = 1.54056

    q_y = (1 / wavelength) * (np.cos(tt) - np.cos(2 * om - tt))
    q_x = (1 / wavelength) * (np.sin(tt) - np.sin(2 * om - tt))

    xi = np.linspace(np.min(q_x), np.max(q_x), 100)
    yi = np.linspace(np.min(q_y), np.max(q_y), 100)
    psd[psd < 1] = 1
    data_grid = griddata(
        (q_x, q_y), psd, (xi[None, :], yi[:, None]), fill_value=1, method="cubic"
    )
    nx, ny = data_grid.shape

    range_values = [np.min(q_x), np.max(q_x), np.min(q_y), np.max(q_y)]
    output_data = (
        Panel(np.log(data_grid).reshape(nx, ny, 1), minor_axis=["RSM"])
        .transpose(2, 0, 1)
        .to_frame()
    )

    return range_values, output_data 
開發者ID:materialsproject,項目名稱:MPContribs,代碼行數:27,代碼來源:pre_submission.py

示例3: create_mnist

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def create_mnist(tfrecord_dir, mnist_dir):
    print('Loading MNIST from "%s"' % mnist_dir)
    import gzip
    with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
        images = np.frombuffer(file.read(), np.uint8, offset=16)
    with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
        labels = np.frombuffer(file.read(), np.uint8, offset=8)
    images = images.reshape(-1, 1, 28, 28)
    images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
    assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (60000,) and labels.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0
    
    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:25,代碼來源:dataset_tool.py

示例4: create_mnistrgb

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):
    print('Loading MNIST from "%s"' % mnist_dir)
    import gzip
    with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
        images = np.frombuffer(file.read(), np.uint8, offset=16)
    images = images.reshape(-1, 28, 28)
    images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
    assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    
    with TFRecordExporter(tfrecord_dir, num_images) as tfr:
        rnd = np.random.RandomState(random_seed)
        for idx in range(num_images):
            tfr.add_image(images[rnd.randint(images.shape[0], size=3)])

#---------------------------------------------------------------------------- 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:18,代碼來源:dataset_tool.py

示例5: create_cifar100

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def create_cifar100(tfrecord_dir, cifar100_dir):
    print('Loading CIFAR-100 from "%s"' % cifar100_dir)
    import pickle
    with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
        data = pickle.load(file, encoding='latin1')
    images = data['data'].reshape(-1, 3, 32, 32)
    labels = np.array(data['fine_labels'])
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 99
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
開發者ID:zalandoresearch,項目名稱:disentangling_conditional_gans,代碼行數:23,代碼來源:dataset_tool.py

示例6: convert_image

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def convert_image(self, filename):
        pic = img.imread(filename)
        # Set FFT size to be double the image size so that the edge of the spectrum stays clear
        # preventing some bandfilter artifacts
        self.NFFT = 2*pic.shape[1]

        # Repeat image lines until each one comes often enough to reach the desired line time
        ffts = (np.flipud(np.repeat(pic[:, :, 0], self.repetitions, axis=0) / 16.)**2.) / 256.

        # Embed image in center bins of the FFT
        fftall = np.zeros((ffts.shape[0], self.NFFT))
        startbin = int(self.NFFT/4)
        fftall[:, startbin:(startbin+pic.shape[1])] = ffts

        # Generate random phase vectors for the FFT bins, this is important to prevent high peaks in the output
        # The phases won't be visible in the spectrum
        phases = 2*np.pi*np.random.rand(*fftall.shape)
        rffts = fftall * np.exp(1j*phases)

        # Perform the FFT per image line, then concatenate them to form the final signal
        timedata = np.fft.ifft(np.fft.ifftshift(rffts, axes=1), axis=1) / np.sqrt(float(self.NFFT))
        linear = timedata.flatten()
        linear = linear / np.max(np.abs(linear))
        return linear 
開發者ID:polygon,項目名稱:spectrum_painter,代碼行數:26,代碼來源:spectrum_painter.py

示例7: wave2input_image

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
開發者ID:pstuvwx,項目名稱:Deep_VoiceChanger,代碼行數:19,代碼來源:dataset.py

示例8: forward

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def forward(self, x):
        N, C, H, W = x.shape
        out_h = int(1 + (H - self.pool_h) / self.stride)
        out_w = int(1 + (W - self.pool_w) / self.stride)

        col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
        col = col.reshape(-1, self.pool_h * self.pool_w)

        arg_max = np.argmax(col, axis=1)
        out = np.max(col, axis=1)
        out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)

        self.x = x
        self.arg_max = arg_max

        return out 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:18,代碼來源:layers.py

示例9: extract_logmel

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def extract_logmel(y, sr, size=3):
    """
    extract log mel spectrogram feature
    :param y: the input signal (audio time series)
    :param sr: sample rate of 'y'
    :param size: the length (seconds) of random crop from original audio, default as 3 seconds
    :return: log-mel spectrogram feature
    """
    # normalization
    y = y.astype(np.float32)
    normalization_factor = 1 / np.max(np.abs(y))
    y = y * normalization_factor

    # random crop
    start = random.randint(0, len(y) - size * sr)
    y = y[start: start + size * sr]

    # extract log mel spectrogram #####
    melspectrogram = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=2048, hop_length=1024, n_mels=60)
    logmelspec = librosa.power_to_db(melspectrogram)

    return logmelspec 
開發者ID:JasonZhang156,項目名稱:Sound-Recognition-Tutorial,代碼行數:24,代碼來源:feature_extraction.py

示例10: extract_mfcc

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def extract_mfcc(y, sr, size=3):
    """
    extract MFCC feature
    :param y: np.ndarray [shape=(n,)], real-valued the input signal (audio time series)
    :param sr: sample rate of 'y'
    :param size: the length (seconds) of random crop from original audio, default as 3 seconds
    :return: MFCC feature
    """
    # normalization
    y = y.astype(np.float32)
    normalization_factor = 1 / np.max(np.abs(y))
    y = y * normalization_factor

    # random crop
    start = random.randint(0, len(y) - size * sr)
    y = y[start: start + size * sr]

    # extract log mel spectrogram #####
    melspectrogram = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=2048, hop_length=1024)
    mfcc = librosa.feature.mfcc(S=librosa.power_to_db(melspectrogram), n_mfcc=20)
    mfcc_delta = librosa.feature.delta(mfcc)
    mfcc_delta_delta = librosa.feature.delta(mfcc_delta)
    mfcc_comb = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta], axis=0)

    return mfcc_comb 
開發者ID:JasonZhang156,項目名稱:Sound-Recognition-Tutorial,代碼行數:27,代碼來源:feature_extraction.py

示例11: cortex_cmap_plot_2D

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def cortex_cmap_plot_2D(the_map, zs, cmap, vmin=None, vmax=None, axes=None, triangulation=None):
    '''
    cortex_cmap_plot_2D(map, zs, cmap, axes) plots the given cortical map values zs on the given
      axes using the given given color map and yields the resulting polygon collection object.
    cortex_cmap_plot_2D(map, zs, cmap) uses matplotlib.pyplot.gca() for the axes.

    The following options may be passed:
      * triangulation (None) may specify the triangularion object for the mesh if it has already
        been created; otherwise it is generated fresh.
      * axes (None) specify the axes on which to plot; if None, then matplotlib.pyplot.gca() is
        used. If Ellipsis, then a tuple (triangulation, z, cmap) is returned; to recreate the plot,
        one would call:
          axes.tripcolor(triangulation, z, cmap, shading='gouraud', vmin=vmin, vmax=vmax)
      * vmin (default: None) specifies the minimum value for scaling the property when one is passed
        as the color option. None means to use the min value of the property.
      * vmax (default: None) specifies the maximum value for scaling the property when one is passed
        as the color option. None means to use the max value of the property.
    '''
    if triangulation is None:
        triangulation = matplotlib.tri.Triangulation(the_map.coordinates[0], the_map.coordinates[1],
                                                     triangles=the_map.tess.indexed_faces.T)
    if axes is Ellipsis: return (triangulation, zs, cmap)
    return axes.tripcolor(triangulation, zs, cmap=cmap, shading='gouraud', vmin=vmin, vmax=vmax) 
開發者ID:noahbenson,項目名稱:neuropythy,代碼行數:25,代碼來源:core.py

示例12: db

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def db(audio):
    if len(audio.shape) > 1:
        maxx = np.max(np.abs(audio), axis=1)
        return 20 * np.log10(maxx) if np.any(maxx != 0) else np.array([0])
    maxx = np.max(np.abs(audio))
    return 20 * np.log10(maxx) if maxx != 0 else np.array([0]) 
開發者ID:rtaori,項目名稱:Black-Box-Audio,代碼行數:8,代碼來源:run_audio_attack.py

示例13: get_new_pop

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def get_new_pop(elite_pop, elite_pop_scores, pop_size):
    scores_logits = np.exp(elite_pop_scores - elite_pop_scores.max()) 
    elite_pop_probs = scores_logits / scores_logits.sum()
    cand1 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)]
    cand2 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)]
    mask = np.random.rand(pop_size, elite_pop.shape[1]) < 0.5 
    next_pop = mask * cand1 + (1 - mask) * cand2
    return next_pop 
開發者ID:rtaori,項目名稱:Black-Box-Audio,代碼行數:10,代碼來源:run_audio_attack.py

示例14: posterior

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def posterior(self, psi):
        """
        Class-posterior estimation.

        Parameters
        ----------
        psi : array
            weighted data-classifier output (N samples by K classes)

        Returns
        -------
        pyx : array
            class-posterior estimation (N samples by K classes)

        """
        # Data shape
        N, K = psi.shape

        # Preallocate array
        pyx = np.zeros((N, K))

        # Subtract maximum value for numerical stability
        psi = (psi.T - np.max(psi, axis=1).T).T

        # Loop over classes
        for k in range(K):

            # Estimate posterior p^(Y=y | x_i)
            pyx[:, k] = np.exp(psi[:, k]) / np.sum(np.exp(psi), axis=1)

        return pyx 
開發者ID:wmkouw,項目名稱:libTLDA,代碼行數:33,代碼來源:rba.py

示例15: predict

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import max [as 別名]
def predict(self, Z):
        """
        Make predictions on new dataset.

        Parameters
        ----------
        Z : array
            new data set (M samples by D features)

        Returns
        -------
        preds : array
            label predictions (M samples by 1)

        """
        # Data shape
        M, D = Z.shape

        # If classifier is trained, check for same dimensionality
        if self.is_trained:
            if not self.train_data_dim == D:
                raise ValueError('''Test data is of different dimensionality
                                 than training data.''')

        # Compute posteriors
        post = self.predict_proba(Z)

        # Predictions through max-posteriors
        preds = np.argmax(post, axis=1)

        # Map predictions back to original labels
        return self.classes[preds] 
開發者ID:wmkouw,項目名稱:libTLDA,代碼行數:34,代碼來源:rba.py


注:本文中的numpy.max方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。