当前位置: 首页>>代码示例>>Python>>正文


Python Stream.select方法代码示例

本文整理汇总了Python中obspy.core.Stream.select方法的典型用法代码示例。如果您正苦于以下问题:Python Stream.select方法的具体用法?Python Stream.select怎么用?Python Stream.select使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在obspy.core.Stream的用法示例。


在下文中一共展示了Stream.select方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_timeseries

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import select [as 别名]
    def get_timeseries(self, starttime, endtime, observatory=None,
            channels=None, type=None, interval=None):
        """Implements get_timeseries

        Notes: Calls IMFV283Factory.parse_string in place of
            IMFV283Factory.get_timeseries.
        """
        observatory = observatory or self.observatory
        channels = channels or self.channels
        self.criteria_file_name = observatory + '.sc'
        timeseries = Stream()
        output = self._retrieve_goes_messages(starttime, endtime, observatory)
        timeseries += self.parse_string(output)
        # merge channel traces for multiple days
        timeseries.merge()
        # trim to requested start/end time
        timeseries.trim(starttime, endtime)
        # output the number of points we read for logging
        if len(timeseries):
            print("Read %s points from %s" % (timeseries[0].stats.npts,
                observatory), file=sys.stderr)

        self._post_process(timeseries)
        if observatory is not None:
            timeseries = timeseries.select(station=observatory)

        return timeseries
开发者ID:erigler-usgs,项目名称:geomag-algorithms,代码行数:29,代码来源:GOESIMFV283Factory.py

示例2: add

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import select [as 别名]
    def add(self, stream, verbose=False):
        """
        Process all traces with compatible information and add their spectral
        estimates to the histogram containg the probabilistic psd.
        Also ensures that no piece of data is inserted twice.

        :type stream: :class:`~obspy.core.stream.Stream` or
                :class:`~obspy.core.trace.Trace`
        :param stream: Stream or trace with data that should be added to the
                probabilistic psd histogram.
        :returns: True if appropriate data were found and the ppsd statistics
                were changed, False otherwise.
        """
        # return later if any changes were applied to the ppsd statistics
        changed = False
        # prepare the list of traces to go through
        if isinstance(stream, Trace):
            stream = Stream([stream])
        # select appropriate traces
        stream = stream.select(id=self.id,
                               sampling_rate=self.sampling_rate)
        # save information on available data and gaps
        self.__insert_data_times(stream)
        self.__insert_gap_times(stream)
        # merge depending on skip_on_gaps set during __init__
        stream.merge(self.merge_method, fill_value=0)

        for tr in stream:
            # the following check should not be necessary due to the select()..
            if not self.__sanity_check(tr):
                msg = "Skipping incompatible trace."
                warnings.warn(msg)
                continue
            t1 = tr.stats.starttime
            t2 = tr.stats.endtime
            while t1 + PPSD_LENGTH <= t2:
                if self.__check_time_present(t1):
                    msg = "Already covered time spans detected (e.g. %s), " + \
                          "skipping these slices."
                    msg = msg % t1
                    warnings.warn(msg)
                else:
                    # throw warnings if trace length is different
                    # than one hour..!?!
                    slice = tr.slice(t1, t1 + PPSD_LENGTH)
                    # XXX not good, should be working in place somehow
                    # XXX how to do it with the padding, though?
                    success = self.__process(slice)
                    if success:
                        self.__insert_used_time(t1)
                        if verbose:
                            print t1
                        changed = True
                t1 += PPSD_STRIDE  # advance half an hour

            # enforce time limits, pad zeros if gaps
            #tr.trim(t, t+PPSD_LENGTH, pad=True)
        return changed
开发者ID:kasra-hosseini,项目名称:obspy,代码行数:60,代码来源:psd.py

示例3: _format_data

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import select [as 别名]
    def _format_data(self, timeseries, channels, stats):
        """Format all data lines.

        Parameters
        ----------
            timeseries : obspy.core.Stream
                Stream containing traces with channel listed in channels
            channels : sequence
                List and order of channel values to output.

        Returns
        -------
        str
            A string formatted to be the data lines in a PCDCP file.
        """
        buf = []

        # create new stream
        timeseriesLocal = Stream()
        # Use a copy of the trace so that we don't modify the original.
        for trace in timeseries:
            traceLocal = trace.copy()
            if traceLocal.stats.channel == "D":
                traceLocal.data = ChannelConverter.get_minutes_from_radians(traceLocal.data)

            # TODO - we should look into multiplying the trace all at once
            # like this, but this gives an error on Windows at the moment.
            # traceLocal.data = \
            #     numpy.round(numpy.multiply(traceLocal.data, 100)).astype(int)

            timeseriesLocal.append(traceLocal)

        traces = [timeseriesLocal.select(channel=c)[0] for c in channels]
        starttime = float(traces[0].stats.starttime)
        delta = traces[0].stats.delta

        for i in xrange(len(traces[0].data)):
            buf.append(
                self._format_values(
                    datetime.utcfromtimestamp(starttime + i * delta), (t.data[i] for t in traces), stats
                )
            )

        return "".join(buf)
开发者ID:jmfee-usgs,项目名称:geomag-algorithms,代码行数:46,代码来源:PCDCPWriter.py

示例4: corrblock

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import select [as 别名]
def corrblock(block,dir,corrname,rank,ofid=None):
    """
    Receives a block with station pairs
    Loops through those station pairs
    Checks if the required station data are already
    in memory by checking the list of ids in memory;
    if not, read the data and enter them into the list of ids in memory
    then assigns the two traces in question to dat1, dat2 
    These are passed on to stacking routine, which passes
    back the correlation stack and writes this stack to sac file. 
    Metadata is written to sac header.
    
    input:
    inp, python dict object: a dictionary containing the input read in from 
    the xmlinput file
    block, python list object: a list of tuples where every tuple is two 
    station ids of stations that should be correlated
    dir: Directory ro write to; needed so that every rank can write to its own 
    directory
    ofid: output file id
    verbose: talk or shut up
    
    ouput:
    None
    
    """
    print('Rank %g: Working on a block of station pairs...\n' %rank,file=None)
    
    
    datstr=Stream()
    idlist=list()
    verbose=inp.verbose
    
#==============================================================================
    #- Get some information needed for the cross correlation   
#============================================================================== 
    
    
    cha=inp.channel
    comp=inp.components
    mix_cha=inp.mix_cha
    

    for pair in block:
        str1=Stream()
        str2=Stream()
        id1 = pair[0]
        id2 = pair[1]
        
        if comp=='Z':
            
            id1 = [id1+cha+'Z']
            id2 = [id2+cha+'Z']
            
        elif comp=='RT' or comp=='R' or comp=='T':
            id1=[id1+cha+'E', id1+cha+'N', id1+cha+'1', id1+cha+'2']
            id2=[id2+cha+'E', id2+cha+'N', id2+cha+'1', id2+cha+'2']
            
        
#==============================================================================
        #- check if data for first station is in memory
        #- if it isn't, it needs to be read in
        #- typically if should be filtered 
#==============================================================================
        for id in id1:
            
            station = id.split('.')[1]
            channel = id.split('.')[-1]
            
            if id in idlist:
                str1 += datstr.select(station=station, channel=channel).split()
            else:
                (colltr,readsuccess) = addtr(id,rank)
        
                #- add this entire trace (which contains all data of this 
                #- station that are available in this directory) to datstr and 
                #- update the idlist
                if readsuccess == True:
                    datstr += colltr
                    str1 += colltr.split()
                    idlist += id
                    
                    if verbose:
                        print('Read in traces for channel '+id,file=ofid)
                    del colltr
                else:
                    if verbose:
                        print('No traces found for channel '+id,file=ofid)
                    continue
            
        
#==============================================================================
        #- Same thing for the second station, unless it's identical to the 1st
        #- check if data is in memory
        #- if it isn't, it needs to be read in
        #- typically if should be filtered        
#==============================================================================
        if id2 == id1:
            str2 = str1
        else:
#.........这里部分代码省略.........
开发者ID:echolite,项目名称:ANTS,代码行数:103,代码来源:ant_corr.py

示例5: test_merge_streams

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import select [as 别名]
def test_merge_streams():
    """TimeseriesUtility_test.test_merge_streams()

    confirm merge streams treats empty channels correctly
    """
    trace1 = __create_trace('H', [1, 1, 1, 1])
    trace2 = __create_trace('E', [2, numpy.nan, numpy.nan, 2])
    trace3 = __create_trace('F', [numpy.nan, numpy.nan, numpy.nan, numpy.nan])
    trace4 = __create_trace('H', [2, 2, 2, 2])
    trace5 = __create_trace('E', [3, numpy.nan, numpy.nan, 3])
    trace6 = __create_trace('F', [numpy.nan, numpy.nan, numpy.nan, numpy.nan])
    npts1 = len(trace1.data)
    npts2 = len(trace4.data)
    timeseries1 = Stream(traces=[trace1, trace2, trace3])
    timeseries2 = Stream(traces=[trace4, trace5, trace6])
    for trace in timeseries1:
        trace.stats.starttime = UTCDateTime('2018-01-01T00:00:00Z')
        trace.stats.npts = npts1
    for trace in timeseries2:
        trace.stats.starttime = UTCDateTime('2018-01-01T00:02:00Z')
        trace.stats.npts = npts2
    merged_streams1 = TimeseriesUtility.merge_streams(timeseries1)
    # Make sure the empty 'F' was not removed from stream
    assert_equals(1, len(merged_streams1.select(channel='F')))
    # Merge multiple streams with overlapping timestamps
    timeseries = timeseries1 + timeseries2

    merged_streams = TimeseriesUtility.merge_streams(timeseries)
    assert_equals(len(merged_streams), len(timeseries1))
    assert_equals(len(merged_streams[0]), 6)
    assert_equals(len(merged_streams[2]), 6)
    assert_almost_equal(
            merged_streams.select(channel='H')[0].data,
            [1, 1, 2, 2, 2, 2])
    assert_almost_equal(
            merged_streams.select(channel='E')[0].data,
            [2, numpy.nan, 3, 2, numpy.nan, 3])
    assert_almost_equal(
            merged_streams.select(channel='F')[0].data,
            [numpy.nan] * 6)

    trace7 = __create_trace('H', [1, 1, 1, 1])
    trace8 = __create_trace('E', [numpy.nan, numpy.nan, numpy.nan, numpy.nan])
    trace9 = __create_trace('F', [numpy.nan, numpy.nan, numpy.nan, numpy.nan])
    timeseries3 = Stream(traces=[trace7, trace8, trace9])
    npts3 = len(trace7.data)
    for trace in timeseries3:
        trace.stats.starttime = UTCDateTime('2018-01-01T00:00:00Z')
        trace.stats.npts = npts3
    merged_streams3 = TimeseriesUtility.merge_streams(timeseries3)
    assert_equals(len(timeseries3), len(merged_streams3))
    assert_almost_equal(
            timeseries3.select(channel='H')[0].data,
            [1, 1, 1, 1])
    assert_equals(
            numpy.isnan(timeseries3.select(channel='E')[0].data).all(),
            True)
    assert_equals(
            numpy.isnan(timeseries3.select(channel='F')[0].data).all(),
            True)

    trace10 = __create_trace('H', [1, 1, numpy.nan, numpy.nan, 1, 1])
    trace11 = __create_trace('H', [2, 2, 2, 2])
    trace10.stats.starttime = UTCDateTime('2018-01-01T00:00:00Z')
    trace11.stats.starttime = UTCDateTime('2018-01-01T00:01:00Z')
    timeseries4 = Stream(traces=[trace10, trace11])
    merged4 = TimeseriesUtility.merge_streams(timeseries4)
    assert_equals(len(merged4[0].data), 6)
    assert_almost_equal(
        merged4.select(channel='H')[0].data,
        [1, 2, 2, 2, 1, 1])
开发者ID:erigler-usgs,项目名称:geomag-algorithms,代码行数:73,代码来源:TimeseriesUtility_test.py

示例6: coincidenceTrigger

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import select [as 别名]
    try:
        tmp = client.getWaveform("CH", station, "", "[EH]HZ", t, t2,
                                 metadata=True)
    except:
        print station, "---"
        continue
    st += tmp

st.taper()
st.filter("bandpass", freqmin=1, freqmax=20)
triglist = coincidenceTrigger("recstalta", 10, 2, st, 4, sta=0.5, lta=10)
print len(triglist), "events triggered."

for trig in triglist:
    closest_sta = trig['stations'][0]
    tr = st.select(station=closest_sta)[0]
    trig['latitude'] = tr.stats.coordinates.latitude
    trig['longitude'] = tr.stats.coordinates.longitude

paz_wa = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1,
          'poles': [-6.2832-4.7124j, -6.2832+4.7124j]}

for trig in triglist:
    t = trig['time']
    print "#" * 80
    print "Trigger time:", t
    mags = []

    stations = client.getStations(t, t + 300, "CH")

    for station in stations:
开发者ID:kasra-hosseini,项目名称:obspy,代码行数:33,代码来源:advanced_exercise_solution_5.py

示例7: mtinv_constrained

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import select [as 别名]
def mtinv_constrained(input_set, st_tr, st_g, fmin, fmax, nsv=1, single_force=False,
          stat_subset=[], weighting_type=2, weights=[], cache_path='',
          force_recalc=False, cache=True, constrained_sources=None):
    '''
    Not intended for direct use, use mtinv_gs instead!
    '''
    utrw, weights_l2, S0w, df, dt, nstat, ndat, ng, nfft, nfinv = input_set

    # setup greens matrix in fourier space
    if os.path.isfile(cache_path + 'gw.pickle') and not force_recalc:
        # read G-matrix from file if exists
        gw = pickle.load(open(cache_path + 'gw.pickle'))
        if gw.shape[-1] < nfinv:
            force_recalc = True
        else:
            gw = gw[:,:,:nfinv]

    if not os.path.isfile(cache_path + 'gw.pickle') or force_recalc:
        g = np.zeros((nstat * 3, 6 + single_force * 3, ng))
        #gw = np.zeros((nstat * 3, 6 + single_force * 3, nfft/2+1)) * 0j
        gw = np.zeros((nstat * 3, 6 + single_force * 3, nfinv)) * 0j

        for k in np.arange(nstat):
            for i in np.arange(3):
                for j in np.arange(6 + single_force * 3):
                    g[k*3 + i,j,:] = st_g.select(station='%04d' % (k + 1),
                                     channel='%02d%1d' % (i,j))[0].data
                    # fill greens matrix in freq space, deconvolve S0
                    gw[k*3 + i,j,:] = np.fft.rfft(g[k*3 + i,j,:], n=nfft) \
                                                    [:nfinv] * dt / S0w
                    

        # write G-matrix to file
        if cache:
            pickle.dump(gw, open(cache_path + 'gw.pickle', 'w'), protocol=2)

    # setup channel subset from station subset
    if stat_subset == []:
        stat_subset = np.arange(nstat)
    else:
        stat_subset = np.array(stat_subset) - 1

    chan_subset = np.zeros(stat_subset.size*3, dtype=int)
    for i in np.arange(stat_subset.size):
        chan_subset[i*3:(i+1)*3] = stat_subset[i]*3 + np.array([0,1,2])

    # setup weighting matrix (depending on weighting scheme and apriori
    # weighting)
    
    # a priori weighting   
    if weights == []:
        weights = np.ones(nstat)
    elif len(weights) == stat_subset.size:
        weights = np.array(weights)
        buf = np.ones(nstat)
        buf[stat_subset] = weights
        weights = buf
    elif len(weights) == nstat:
        weights = np.array(weights)
    else:
        raise ValueError('argument weights has wrong length')
    
    chan_weights = np.zeros(nstat*3)
    for i in np.arange(nstat):
        chan_weights[i*3:(i+1)*3] = weights[i] + np.zeros(3)

    # l2-norm weighting
    if weighting_type == 0:
        weights_l2 *= chan_weights
        weights_l2 = np.ones(nstat*3) * (weights_l2[chan_subset].sum())**.5
    elif weighting_type == 1:
        weights_l2 = weights_l2**.5
    elif weighting_type == 2:
        for k in np.arange(nstat):
            weights_l2[k*3:k*3 + 3] = (weights_l2[k*3:k*3 + 3].sum())**.5
    else:
        raise ValueError('argument weighting_type needs to be in [0,1,2]')
   
    weights_l2 = 1./weights_l2
    weightsm = np.matrix(np.diag(weights_l2[chan_subset] *
                         chan_weights[chan_subset]**.5))
    
   
    mf = np.zeros(constrained_sources.shape[0])
    stfl = []
    stl = []

    for nn, const_source in enumerate(constrained_sources):
        stf = np.zeros(nfft/2+1) * 0j

        # inversion
        for w in np.arange(nfinv):
            GM = weightsm * np.matrix(gw[[chan_subset],:,w]) * np.matrix(const_source).T
            GI = np.linalg.pinv(GM, rcond=0.00001)
            m = GI * weightsm * np.matrix(utrw[[chan_subset],w]).T
            stf[w] = m[0,0]

        # back to time domain
        stf_t = np.fft.irfft(stf)[:nfft] * df
        
#.........这里部分代码省略.........
开发者ID:obspy,项目名称:branches,代码行数:103,代码来源:mtinv_weights_gs.py


注:本文中的obspy.core.Stream.select方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。