當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.hamming方法代碼示例

本文整理匯總了Python中numpy.hamming方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.hamming方法的具體用法?Python numpy.hamming怎麽用?Python numpy.hamming使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在numpy的用法示例。


在下文中一共展示了numpy.hamming方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _mfcc_and_labels

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def _mfcc_and_labels(audio, labels):
  """ Convert to MFCC features and corresponding (interpolated) labels.

  Returns:
    A tuple, `(mfcc_features, mfcc_labels)`. A 1-D float array and a 1-D int
      array, both with the same shape.
  """
  mfcc_sample_rate = 100.0
  winfunc = lambda x: np.hamming(x)
  mfcc_features = python_speech_features.mfcc(audio, samplerate=timit.SAMPLE_RATE, winlen=0.025,
                                              winstep=1.0/mfcc_sample_rate, lowfreq=85.0,
                                              highfreq=timit.SAMPLE_RATE/2, winfunc=winfunc)
  t_audio = np.linspace(0.0, audio.shape[0] * 1.0 / timit.SAMPLE_RATE, audio.size, endpoint=False)
  t_mfcc = np.linspace(0.0, mfcc_features.shape[0] * 1.0 / mfcc_sample_rate, mfcc_features.shape[0], endpoint=False)
  interp_func = scipy.interpolate.interp1d(t_audio, labels, kind='nearest')
  mfcc_labels = interp_func(t_mfcc)
  return mfcc_features, mfcc_labels 
開發者ID:rdipietro,項目名稱:mist-rnns,代碼行數:19,代碼來源:timitphonemerec.py

示例2: plot_window

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def plot_window(self):
        """Plot the window in the time domain

        .. plot::
            :width: 80%
            :include-source:

            from spectrum.window import Window
            w = Window(64, name='hamming')
            w.plot_window()

        """
        from pylab import plot, xlim, grid, title, ylabel, axis
        x = linspace(0, 1, self.N)
        xlim(0, 1)
        plot(x, self.data)
        grid(True)
        title('%s Window (%s points)' % (self.name.capitalize(), self.N))
        ylabel('Amplitude')
        axis([0, 1, 0, 1.1]) 
開發者ID:cokelaer,項目名稱:spectrum,代碼行數:22,代碼來源:window.py

示例3: window_hamming

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def window_hamming(N):
    r"""Hamming window

    :param N: window length


    The Hamming window is defined as

    .. math:: 0.54 -0.46 \cos\left(\frac{2\pi n}{N-1}\right)
               \qquad 0 \leq n \leq M-1

    .. plot::
        :width: 80%
        :include-source:

        from spectrum import window_visu
        window_visu(64, 'hamming')

    .. seealso:: numpy.hamming, :func:`create_window`, :class:`Window`.
    """
    from numpy import hamming
    return hamming(N) 
開發者ID:cokelaer,項目名稱:spectrum,代碼行數:24,代碼來源:window.py

示例4: spectrogram

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def spectrogram(audio):
    """Calculate magnitude spectrogram of an audio sequence. 
    
    Args: 
      audio: 1darray, audio sequence. 
      
    Returns:
      x: ndarray, spectrogram (n_time, n_freq)
    """
    n_window = cfg.n_window
    n_overlap = cfg.n_overlap
    
    ham_win = np.hamming(n_window)
    [f, t, x] = signal.spectral.spectrogram(
                    audio, 
                    window=ham_win,
                    nperseg=n_window, 
                    noverlap=n_overlap, 
                    detrend=False, 
                    return_onesided=True, 
                    mode='magnitude') 
    x = x.T
    x = x.astype(np.float32)
    return x 
開發者ID:qiuqiangkong,項目名稱:music_transcription_MAPS,代碼行數:26,代碼來源:prepare_data.py

示例5: _smooth

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def _smooth(params, win, type="HAMMING"):
    

    win = int(win+0.5)
    if win >= len(params)-1:
        win = len(params)-1
    if win % 2 == 0:
        win+=1

    s = np.r_[params[win-1:0:-1],params,params[-1:-win:-1]]

    
    if type=="HAMMING":
        w = np.hamming(win)
        third = int(win/5)
        #w[:third] = 0
    else:
        w = np.ones(win)
        
        
    y = np.convolve(w/w.sum(),s,mode='valid')
    return y[(win/2):-(win/2)] 
開發者ID:CSTR-Edinburgh,項目名稱:Ossian,代碼行數:24,代碼來源:acoustic_feats.py

示例6: audiofile_to_input_vector

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def audiofile_to_input_vector(audio_filename, numcep, numcontext):
    r"""
    Given a WAV audio file at ``audio_filename``, calculates ``numcep`` MFCC features
    at every 0.01s time step with a window length of 0.025s. Appends ``numcontext``
    context frames to the left and right of each time step, and returns this data
    in a numpy array.
    """
    # Load wav files
    fs, audio = wav.read(audio_filename)

    # Get mfcc coefficients
    features = mfcc(audio, samplerate=fs, numcep=numcep, winlen=0.032, winstep=0.02, winfunc=np.hamming)

    # Add empty initial and final contexts
    empty_context = np.zeros((numcontext, numcep), dtype=features.dtype)
    features = np.concatenate((empty_context, features, empty_context))

    return features 
開發者ID:AASHISHAG,項目名稱:deepspeech-german,代碼行數:20,代碼來源:audio.py

示例7: phormants

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def phormants(x, sampling_rate):
    N = len(x)
    w = np.hamming(N)

    # Apply window and high pass filter.
    x1 = x * w
    x1 = lfilter([1], [1., 0.63], x1)

    # Get LPC.
    ncoeff = 2 + sampling_rate / 1000
    A, e, k = lpc(x1, ncoeff)
    # A, e, k = lpc(x1, 8)

    # Get roots.
    rts = np.roots(A)
    rts = [r for r in rts if np.imag(r) >= 0]

    # Get angles.
    angz = np.arctan2(np.imag(rts), np.real(rts))

    # Get frequencies.
    frqs = sorted(angz * (sampling_rate / (2 * math.pi)))

    return frqs 
開發者ID:tyiannak,項目名稱:pyAudioAnalysis,代碼行數:26,代碼來源:ShortTermFeatures.py

示例8: __init__

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def __init__(self, window_duration, fs):
        self.dur = window_duration         # in seconds
        self.length = int(self.dur*fs+1)
        if not self.length %2:
            self.length -= 1
        self.data = np.hamming(self.length)
        self.data2 = self.data**2
        self.N = int(self.dur*fs/2)
        self.half_len_vec = np.arange(self.N+1)
        self.len_vec = np.arange(-self.N, self.N+1)

        self.a0 = 0.54**2 + (0.46**2)/2
        self.a1 = 0.54*0.46
        self.a2 = (0.46**2)/4

        self.R0_diag = R_eq(0, g0, self)
        self.R2_diag = sum(self.data2*(self.len_vec**2)) 
開發者ID:bjbschmitt,項目名稱:AMFM_decompy,代碼行數:19,代碼來源:pyQHM.py

示例9: logfbank

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def logfbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,
          nfilt=40,nfft=512,lowfreq=64,highfreq=None,dither=1.0,remove_dc_offset=True,preemph=0.97,wintype='hamming'):
    """Compute log Mel-filterbank energy features from an audio signal.

    :param signal: the audio signal from which to compute features. Should be an N*1 array
    :param samplerate: the samplerate of the signal we are working with.
    :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
    :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
    :param nfilt: the number of filters in the filterbank, default 26.
    :param nfft: the FFT size. Default is 512.
    :param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
    :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
    :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
    :returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector.
    """
    feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,dither, remove_dc_offset,preemph,wintype)
    return numpy.log(feat) 
開發者ID:ZitengWang,項目名稱:python_kaldi_features,代碼行數:19,代碼來源:base.py

示例10: __init__

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def __init__(self, config=None, fs=16000, fft_size=512, frame_len=400, frame_shift=160, window='hamming', do_dither=True, dc_removal=False, use_gpu=False):
        self.fs = fs
        self.fft_size = fft_size
        self.frame_len = frame_len
        self.frame_shift = frame_shift
        self.window = window
        self.do_dither = do_dither
        self.dc_removal = dc_removal
        self.use_gpu = use_gpu

        if config is not None:
            for attr in config:
                setattr(self, attr, config[attr])

        self.n_bin = self.fft_size/2+1
        self.frame_overlap = self.frame_len - self.frame_shift 
開發者ID:jzlianglu,項目名稱:pykaldi2,代碼行數:18,代碼來源:freq_analysis.py

示例11: _logfbank_extractor

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def _logfbank_extractor(self, wav):
        # typical log fbank extraction for 16kHz speech data
        preemphasis = 0.96

        t1 = np.sum(self._window, 0)
        t1[t1 == 0] = -1
        inv = np.diag(1 / t1)
        mel = self._window.dot(inv).T

        wav = wav[1:] - preemphasis * wav[:-1]
        S = stft(wav, n_fft=512, hop_length=160, win_length=400, window=np.hamming(400), center=False).T

        spec_mag = np.abs(S)
        spec_power = spec_mag ** 2
        fbank_power = spec_power.T.dot(mel * 32768 ** 2) + 1
        log_fbank = np.log(fbank_power)

        return log_fbank 
開發者ID:jzlianglu,項目名稱:pykaldi2,代碼行數:20,代碼來源:sr_dataset.py

示例12: __init__

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def __init__(self,
                 Fs=0.0,
                 corrorigin=0,
                 lagmininpts=0,
                 lagmaxinpts=0,
                 ncprefilter=None,
                 reftc=None,
                 detrendorder=1,
                 windowfunc='hamming',
                 corrweighting='none'):
        self.Fs = Fs
        self.corrorigin = corrorigin
        self.lagmininpts = lagmininpts
        self.lagmaxinpts = lagmaxinpts
        self.ncprefilter = ncprefilter
        self.reftc = reftc
        self.detrendorder = detrendorder
        self.windowfunc = windowfunc
        if self.windowfunc is not None:
            self.usewindowfunc = True
        else:
            self.usewindowfunc = False
        self.corrweighting = corrweighting
        if self.reftc is not None:
            self.setreftc(self.reftc) 
開發者ID:bbfrederick,項目名稱:rapidtide,代碼行數:27,代碼來源:helper_classes.py

示例13: smooth

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def smooth(params, win, type="HAMMING"):

    """
    gaussian type smoothing, convolution with hamming window
    """
    win = int(win+0.5)
    if win >= len(params)-1:
        win = len(params)-1

    if win % 2 == 0:
        win += 1

    s = np.r_[params[win-1:0:-1], params, params[-1:-win:-1]]

    if type == "HAMMING":
        w = np.hamming(win)
        # third = int(win/3)
        # w[:third] = 0
    else:
        w = np.ones(win)

    y = np.convolve(w/w.sum(), s, mode='valid')
    return y[int(win/2):-int(win/2)] 
開發者ID:asuni,項目名稱:wavelet_prosody_toolkit,代碼行數:25,代碼來源:smooth_and_interp.py

示例14: fade_in_and_out

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def fade_in_and_out(infile):
    """
    Add a fade in and out effect to the audio file.

    Args:
        - infile (str) : input filename/path.
    """
    # read input file
    fs, sig = read_file(filename=infile)
    window = np.hamming(len(sig))

    # construct file names
    output_file_path = os.path.dirname(infile)
    name_attribute = "_augmented_fade_in_out.wav"

    # fade in and out
    window = np.hamming(len(sig))
    augmented_sig = window * sig
    augmented_sig /= np.mean(np.abs(augmented_sig))

    # export data to file
    write_file(output_file_path=output_file_path,
               input_file_name=infile,
               name_attribute=name_attribute,
               sig=augmented_sig,
               fs=fs) 
開發者ID:SuperKogito,項目名稱:pydiogment,代碼行數:28,代碼來源:auga.py

示例15: window_fft

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import hamming [as 別名]
def window_fft(data,fft_size,step_size):
    window = np.hamming(fft_size)
    number_windows = (data.shape[0]-fft_size)//step_size
    output = np.ndarray((number_windows,fft_size),dtype=data.dtype)

    for i in range(number_windows):
        head = int(i*step_size)
        tail = int(head+fft_size)
        output[i] = data[head:tail]*window

    F = np.fft.rfft(output,axis=-1)
    return F 
開發者ID:bill9800,項目名稱:speech_separation,代碼行數:14,代碼來源:simple_model.py


注:本文中的numpy.hamming方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。