當前位置: 首頁>>代碼示例>>Python>>正文


Python librosa.get_duration方法代碼示例

本文整理匯總了Python中librosa.get_duration方法的典型用法代碼示例。如果您正苦於以下問題:Python librosa.get_duration方法的具體用法?Python librosa.get_duration怎麽用?Python librosa.get_duration使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在librosa的用法示例。


在下文中一共展示了librosa.get_duration方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def __init__(self, file: str, *, sample_rate: int = 44100):
        """        
        Parameters
        ----------
        file
            Audio file to load
        """

        self.file = file
        self.samples, self.sample_rate = librosa.load(file, sr=sample_rate)
        self.duration = librosa.get_duration(y=self.samples, sr=self.sample_rate) 
開發者ID:scherroman,項目名稱:mugen,代碼行數:13,代碼來源:Audio.py

示例2: transform_audio

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def transform_audio(self, y):
        '''Compute the tempogram

        Parameters
        ----------
        y : np.ndarray
            Audio buffer

        Returns
        -------
        data : dict
            data['tempogram'] : np.ndarray, shape=(n_frames, win_length)
                The tempogram
        '''
        n_frames = self.n_frames(get_duration(y=y, sr=self.sr))

        tgram = tempogram(y=y, sr=self.sr,
                          hop_length=self.hop_length,
                          win_length=self.win_length)

        tgram = to_dtype(fix_length(tgram, n_frames), self.dtype)
        return {'tempogram': tgram.T[self.idx]} 
開發者ID:bmcfee,項目名稱:pumpp,代碼行數:24,代碼來源:rhythm.py

示例3: test_ir_convolution

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def test_ir_convolution(ir_files,jam_fixture,n_fft,rolloff_value):
    D = muda.deformers.IRConvolution(ir_files = ir_files, n_fft=n_fft, rolloff_value = rolloff_value)

    jam_orig = deepcopy(jam_fixture)
    orig_duration = librosa.get_duration(**jam_orig.sandbox.muda['_audio'])

    for jam_new in D.transform(jam_orig):
        # Verify that the original jam reference hasn't changed
        assert jam_new is not jam_orig

        #Testing with shifted impulse
        __test_shifted_impulse(jam_orig, jam_new, ir_files, orig_duration,n_fft=n_fft, rolloff_value = rolloff_value)

        #Verify that the state and history objects are intact
        __test_deformer_history(D, jam_new.sandbox.muda.history[-1])

    # Serialization test
    D2 = muda.deserialize(muda.serialize(D))
    __test_params(D, D2) 
開發者ID:bmcfee,項目名稱:muda,代碼行數:21,代碼來源:test_deformers.py

示例4: test_save

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def test_save(jam_in, audio_file, strict, fmt):

    jam = muda.load_jam_audio(jam_in, audio_file)

    _, jamfile = tempfile.mkstemp(suffix='.jams')
    _, audfile = tempfile.mkstemp(suffix='.wav')

    muda.save(audfile, jamfile, jam, strict=strict, fmt=fmt)

    jam2 = muda.load_jam_audio(jamfile, audfile, fmt=fmt)
    jam2_raw = jams.load(jamfile, fmt=fmt)

    os.unlink(audfile)
    os.unlink(jamfile)

    assert hasattr(jam2.sandbox, 'muda')
    assert '_audio' in jam2.sandbox.muda
    assert '_audio' not in jam2_raw.sandbox.muda

    duration = librosa.get_duration(**jam2.sandbox.muda['_audio'])

    assert jam2.file_metadata.duration == duration 
開發者ID:bmcfee,項目名稱:muda,代碼行數:24,代碼來源:test_core.py

示例5: audio_files_to_numpy

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def audio_files_to_numpy(audio_dir, list_audio_files, sample_rate, frame_length, hop_length_frame, min_duration):
    """This function take audio files of a directory and merge them
    in a numpy matrix of size (nb_frame,frame_length) for a sliding window of size hop_length_frame"""

    list_sound_array = []

    for file in list_audio_files:
        # open the audio file
        y, sr = librosa.load(os.path.join(audio_dir, file), sr=sample_rate)
        total_duration = librosa.get_duration(y=y, sr=sr)

        if (total_duration >= min_duration):
            list_sound_array.append(audio_to_audio_frame_stack(
                y, frame_length, hop_length_frame))
        else:
            print(
                f"The following file {os.path.join(audio_dir,file)} is below the min duration")

    return np.vstack(list_sound_array) 
開發者ID:vbelz,項目名稱:Speech-enhancement,代碼行數:21,代碼來源:data_tools.py

示例6: audio_dur

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def audio_dur(path, ext='', root=''):
    path = os.path.join(root, '{}{}'.format(path, ext))
    try:
        return lr.get_duration(filename=path)
    except Exception as ex:        
        print_err('could not read {}\n{}'.format(path, ex))
        return 0 
開發者ID:hcmlab,項目名稱:vadnet,代碼行數:9,代碼來源:utils.py

示例7: randomPositionInAudio

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def randomPositionInAudio(audio_path, duration):
    length = librosa.get_duration(filename=audio_path)
    if duration >= length:
        return 0.0, None
    else:
        offset = np.random.uniform() * (length - duration)
        return offset, duration 
開發者ID:Veleslavia,項目名稱:vimss,代碼行數:9,代碼來源:Input.py

示例8: transform_audio

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def transform_audio(self, y):
        '''Compute the time position encoding

        Parameters
        ----------
        y : np.ndarray
            Audio buffer

        Returns
        -------
        data : dict
            data['relative'] = np.ndarray, shape=(n_frames, 2)
            data['absolute'] = np.ndarray, shape=(n_frames, 2)

                Relative and absolute time positional encodings.
        '''

        duration = get_duration(y=y, sr=self.sr)
        n_frames = self.n_frames(duration)

        relative = np.zeros((n_frames, 2), dtype=np.float32)
        relative[:, 0] = np.cos(np.pi * np.linspace(0, 1, num=n_frames))
        relative[:, 1] = np.sin(np.pi * np.linspace(0, 1, num=n_frames))

        absolute = relative * np.sqrt(duration)

        return {'relative': to_dtype(relative[self.idx], self.dtype),
                'absolute': to_dtype(absolute[self.idx], self.dtype)} 
開發者ID:bmcfee,項目名稱:pumpp,代碼行數:30,代碼來源:time.py

示例9: transform_audio

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def transform_audio(self, y):
        '''Compute the STFT magnitude and phase.

        Parameters
        ----------
        y : np.ndarray
            The audio buffer

        Returns
        -------
        data : dict
            data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
                STFT magnitude

            data['phase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
                STFT phase
        '''
        n_frames = self.n_frames(get_duration(y=y, sr=self.sr))

        D = stft(y, hop_length=self.hop_length,
                 n_fft=self.n_fft)

        D = fix_length(D, n_frames)

        mag, phase = magphase(D)
        if self.log:
            mag = amplitude_to_db(mag, ref=np.max)

        return {'mag': to_dtype(mag.T[self.idx], self.dtype),
                'phase': to_dtype(np.angle(phase.T)[self.idx], self.dtype)} 
開發者ID:bmcfee,項目名稱:pumpp,代碼行數:32,代碼來源:fft.py

示例10: transform_audio

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def transform_audio(self, y):
        '''Compute the CQT

        Parameters
        ----------
        y : np.ndarray
            The audio buffer

        Returns
        -------
        data : dict
            data['mag'] : np.ndarray, shape = (n_frames, n_bins)
                The CQT magnitude

            data['phase']: np.ndarray, shape = mag.shape
                The CQT phase
        '''
        n_frames = self.n_frames(get_duration(y=y, sr=self.sr))

        C = cqt(y=y, sr=self.sr, hop_length=self.hop_length,
                fmin=self.fmin,
                n_bins=(self.n_octaves * self.over_sample * 12),
                bins_per_octave=(self.over_sample * 12))

        C = fix_length(C, n_frames)

        cqtm, phase = magphase(C)
        if self.log:
            cqtm = amplitude_to_db(cqtm, ref=np.max)

        return {'mag': to_dtype(cqtm.T[self.idx], self.dtype),
                'phase': to_dtype(np.angle(phase).T[self.idx], self.dtype)} 
開發者ID:bmcfee,項目名稱:pumpp,代碼行數:34,代碼來源:cqt.py

示例11: transform_audio

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def transform_audio(self, y):
        '''Compute the Mel spectrogram

        Parameters
        ----------
        y : np.ndarray
            The audio buffer

        Returns
        -------
        data : dict
            data['mag'] : np.ndarray, shape=(n_frames, n_mels)
                The Mel spectrogram
        '''
        n_frames = self.n_frames(get_duration(y=y, sr=self.sr))

        mel = np.sqrt(melspectrogram(y=y, sr=self.sr,
                                     n_fft=self.n_fft,
                                     hop_length=self.hop_length,
                                     n_mels=self.n_mels,
                                     fmax=self.fmax))

        mel = fix_length(mel, n_frames)

        if self.log:
            mel = amplitude_to_db(mel, ref=np.max)

        # Type convert
        mel = to_dtype(mel, self.dtype)

        return {'mag': mel.T[self.idx]} 
開發者ID:bmcfee,項目名稱:pumpp,代碼行數:33,代碼來源:mel.py

示例12: stat_acoustic

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def stat_acoustic():
    print("\nAcoustic Data:")
    wav_folder = join(ROOT_FOLDER, "data", "vlsp", "wav")
    files = listdir(wav_folder)
    files = [join(wav_folder, file) for file in files]
    durations = [librosa.get_duration(filename=file) for file in files]
    durations = pd.Series(durations)
    print(f"Total: {durations.sum():.2f} seconds ({durations.sum() / 3600:.2f} hours)")
    print(durations.describe()) 
開發者ID:undertheseanlp,項目名稱:automatic_speech_recognition,代碼行數:11,代碼來源:eda_vlsp.py

示例13: predict

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def predict(self, filename=None, y=None, sr=None, outputs=None):
        '''Predict annotations

        Parameters
        ----------
        filename : str (optional)
            Path to audio file

        y, sr : (optional)
            Audio buffer and sample rate

        outputs : (optional)
            Pre-computed model outputs as produced by `CremaModel.outputs`.
            If provided, then predictions are derived from these instead of
            `filename` or `(y, sr)`.


        .. note:: At least one of `filename`, `y, sr` must be provided.

        Returns
        -------
        jams.Annotation
            The predicted annotation
        '''

        # Pump the input features
        output_key = self.model.output_names[0]

        if outputs is None:
            outputs = self.outputs(filename=filename, y=y, sr=sr)

        # Invert the prediction.  This is always the first output layer.
        ann = self.pump[output_key].inverse(outputs[output_key])

        # Populate the metadata
        ann.annotation_metadata.version = self.version
        ann.annotation_metadata.annotation_tools = 'CREMA {}'.format(version)
        ann.annotation_metadata.data_source = 'program'
        ann.duration = librosa.get_duration(y=y, sr=sr, filename=filename)

        return ann 
開發者ID:bmcfee,項目名稱:crema,代碼行數:43,代碼來源:base.py

示例14: duration_in_s

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def duration_in_s(self) -> float:
        try:
            return librosa.get_duration(filename=str(self.audio_file))
        except Exception as e:
            log("Failed to get duration of {}: {}".format(self.audio_file, e))
            return 0 
開發者ID:JuliusKunze,項目名稱:speechless,代碼行數:8,代碼來源:labeled_example.py

示例15: get_duration

# 需要導入模塊: import librosa [as 別名]
# 或者: from librosa import get_duration [as 別名]
def get_duration(self, filename, sr):  #pylint: disable=invalid-name
    ''' time in second '''
    if filename.endswith('.npy'):
      nframe = np.load(filename).shape[0]
      return librosa.frames_to_time(
          nframe, hop_length=self._winstep * sr, sr=sr)

    if filename.endswith('.wav'):
      return librosa.get_duration(filename=filename)

    raise ValueError("filename suffix not .npy or .wav: {}".format(
        os.path.splitext(filename)[-1])) 
開發者ID:didi,項目名稱:delta,代碼行數:14,代碼來源:speech_cls_task.py


注:本文中的librosa.get_duration方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。