当前位置: 首页>>代码示例>>Python>>正文


Python librosa.frames_to_time方法代码示例

本文整理汇总了Python中librosa.frames_to_time方法的典型用法代码示例。如果您正苦于以下问题:Python librosa.frames_to_time方法的具体用法?Python librosa.frames_to_time怎么用?Python librosa.frames_to_time使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在librosa的用法示例。


在下文中一共展示了librosa.frames_to_time方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_beats

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def _get_beats(self):
        """
        Gets beats using librosa's beat tracker.
        """
        _, beat_frames = librosa.beat.beat_track(
            y=self.analysis_samples, sr=self.analysis_sample_rate, trim=False
        )

        # pad beat times to full duration
        f_max = librosa.time_to_frames(self.duration, sr=self.analysis_sample_rate)
        beat_frames = librosa.util.fix_frames(beat_frames, x_min=0, x_max=f_max)

        # convert frames to times
        beat_times = librosa.frames_to_time(beat_frames, sr=self.analysis_sample_rate)

        # make the list of (start, duration) tuples that TimingList expects
        starts_durs = [(s, t - s) for (s, t) in zip(beat_times, beat_times[1:])]

        return starts_durs 
开发者ID:algorithmic-music-exploration,项目名称:amen,代码行数:21,代码来源:audio.py

示例2: _get_segments

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def _get_segments(self):
        """
        Gets Echo Nest style segments using librosa's onset detection and backtracking.
        """

        onset_frames = librosa.onset.onset_detect(
            y=self.analysis_samples, sr=self.analysis_sample_rate, backtrack=True
        )
        segment_times = librosa.frames_to_time(
            onset_frames, sr=self.analysis_sample_rate
        )

        # make the list of (start, duration) tuples that TimingList expects
        starts_durs = [(s, t - s) for (s, t) in zip(segment_times, segment_times[1:])]

        return starts_durs 
开发者ID:algorithmic-music-exploration,项目名称:amen,代码行数:18,代码来源:audio.py

示例3: _convert_to_dataframe

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def _convert_to_dataframe(cls, feature_data, columns):
        """
        Take raw librosa feature data, convert to a pandas dataframe.

        Parameters
        ---------
        feature_data: numpy array
            a N by T array, where N is the number of features, and T is the number of time dimensions

        columns: list [strings]
            a list of column names of length N, the same as the N dimension of feature_data

        Returns
        -----
        pandas.DataFrame
        """
        feature_data = feature_data.transpose()
        frame_numbers = np.arange(len(feature_data))
        indexes = librosa.frames_to_time(frame_numbers)
        indexes = pd.to_timedelta(indexes, unit='s')
        data = pd.DataFrame(data=feature_data, index=indexes, columns=columns)
        return data 
开发者ID:algorithmic-music-exploration,项目名称:amen,代码行数:24,代码来源:audio.py

示例4: run

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def run(self):
        while self.recorder.start_time is None:
            time.sleep(1)
        self.current_b = time.time()
        self.start_time = self.recorder.start_time
        while self.running.isSet():
            if len(self.audio_data) < 4 * self.sr:
                time.sleep(.5)
                self.logger.debug("The data is not enough...")
                continue
            start_samples = len(self.audio_data) - self.rec_size if len(self.audio_data) > self.rec_size else 0
            data = np.array(self.audio_data[start_samples:]).astype(np.float32)
            start_time = start_samples / self.sr
            tmpo, _beat_frames = librosa.beat.beat_track(y=data,sr = self.sr)
            beat_times = librosa.frames_to_time(_beat_frames) + start_time + self.start_time

            if len(beat_times) < 5:
                self.logger.debug("The beats count <%d> is not enough..."%len(beat_times))
                continue
            
            self.expected_k,self.expected_b = np.polyfit(range(len(beat_times)),beat_times,1) 
开发者ID:mhy12345,项目名称:rcaudio,代码行数:23,代码来源:beat_analyzer.py

示例5: __init__

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def __init__(self, loadedAudio):
        self.wav = loadedAudio[0]
        self.samplefreq = loadedAudio[1]
        #If imported as 16-bit, convert to floating 32-bit ranging from -1 to 1
        if (self.wav.dtype == 'int16'):
            self.wav = self.wav/(2.0**15)
        self.channels = 1  #Assumes mono, if stereo then 2 (found by self.wav.shape[1])
        self.sample_points = self.wav.shape[0]
        self.audio_length_seconds = self.sample_points/self.samplefreq
        self.time_array_seconds = np.arange(0, self.sample_points, 1)/self.samplefreq
        self.tempo_bpm = librosa.beat.beat_track(y=self.wav, sr=self.samplefreq)[0]
        self.beat_frames = librosa.beat.beat_track(y=self.wav, sr=self.samplefreq)[1]
        #Transform beat array into seconds (these are the times when the beat hits)
        self.beat_times = librosa.frames_to_time(self.beat_frames, sr=self.samplefreq)
        #Get the rolloff frequency - the frequency at which the loudness drops off by 90%, like a low pass filter
        self.rolloff_freq = np.mean(librosa.feature.spectral_rolloff(y=self.wav, sr=self.samplefreq, hop_length=512, roll_percent=0.9)) 
开发者ID:nlinc1905,项目名称:Convolutional-Autoencoder-Music-Similarity,代码行数:18,代码来源:02_wav_features_and_spectrogram.py

示例6: estimate_beats

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def estimate_beats(self):
        """Estimates the beats using librosa.

        Returns
        -------
        times: np.array
            Times of estimated beats in seconds.
        frames: np.array
            Frame indeces of estimated beats.
        """
        # Compute harmonic-percussive source separation if needed
        if self._audio_percussive is None:
            self._audio_harmonic, self._audio_percussive = self.compute_HPSS()

        # Compute beats
        tempo, frames = librosa.beat.beat_track(
            y=self._audio_percussive, sr=self.sr,
            hop_length=self.hop_length)

        # To times
        times = librosa.frames_to_time(frames, sr=self.sr,
                                       hop_length=self.hop_length)

        # TODO: Is this really necessary?
        if len(times) > 0 and times[0] == 0:
            times = times[1:]
            frames = frames[1:]

        return times, frames 
开发者ID:urinieto,项目名称:msaf,代码行数:31,代码来源:base.py

示例7: _compute_framesync_times

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def _compute_framesync_times(self):
        """Computes the framesync times based on the framesync features."""
        self._framesync_times = librosa.core.frames_to_time(
            np.arange(self._framesync_features.shape[0]), self.sr,
            self.hop_length) 
开发者ID:urinieto,项目名称:msaf,代码行数:7,代码来源:base.py

示例8: get_duration

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def get_duration(self, filename, sr):  #pylint: disable=invalid-name
    ''' time in second '''
    if filename.endswith('.npy'):
      nframe = np.load(filename).shape[0]
      return librosa.frames_to_time(
          nframe, hop_length=self._winstep * sr, sr=sr)

    if filename.endswith('.wav'):
      return librosa.get_duration(filename=filename)

    raise ValueError("filename suffix not .npy or .wav: {}".format(
        os.path.splitext(filename)[-1])) 
开发者ID:didi,项目名称:delta,代码行数:14,代码来源:speech_cls_task.py

示例9: beat_track

# 需要导入模块: import librosa [as 别名]
# 或者: from librosa import frames_to_time [as 别名]
def beat_track(infile, outfile):

    # Load the audio file
    y, sr = librosa.load(infile)

    # Compute the track duration
    track_duration = librosa.get_duration(y=y, sr=sr)

    # Extract tempo and beat estimates
    tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)

    # Convert beat frames to time
    beat_times = librosa.frames_to_time(beat_frames, sr=sr)

    # Construct a new JAMS object and annotation records
    jam = jams.JAMS()

    # Store the track duration
    jam.file_metadata.duration = track_duration

    beat_a = jams.Annotation(namespace='beat')
    beat_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa beat tracker')

    # Add beat timings to the annotation record.
    # The beat namespace does not require value or confidence fields,
    # so we can leave those blank.
    for t in beat_times:
        beat_a.append(time=t, duration=0.0)

    # Store the new annotation in the jam
    jam.annotations.append(beat_a)

    # Add tempo estimation to the annotation.
    tempo_a = jams.Annotation(namespace='tempo', time=0, duration=track_duration)
    tempo_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa tempo estimator')

    # The tempo estimate is global, so it should start at time=0 and cover the full
    # track duration.
    # If we had a likelihood score on the estimation, it could be stored in
    # `confidence`.  Since we have no competing estimates, we'll set it to 1.0.
    tempo_a.append(time=0.0,
                   duration=track_duration,
                   value=tempo,
                   confidence=1.0)

    # Store the new annotation in the jam
    jam.annotations.append(tempo_a)

    # Save to disk
    jam.save(outfile) 
开发者ID:marl,项目名称:jams,代码行数:52,代码来源:example_beat.py


注:本文中的librosa.frames_to_time方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。