当前位置: 首页>>代码示例>>Python>>正文


Python Stream.merge方法代码示例

本文整理汇总了Python中obspy.core.Stream.merge方法的典型用法代码示例。如果您正苦于以下问题:Python Stream.merge方法的具体用法?Python Stream.merge怎么用?Python Stream.merge使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在obspy.core.Stream的用法示例。


在下文中一共展示了Stream.merge方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: merge_single

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
def merge_single(nch,dstart,dend):
  '''Merges traces of one channel to larger traces. Used for cross-correlation'''

  # here you load all the functions you need to use
  from obspy.seg2.seg2 import readSEG2
  from obspy.core import Stream


  dataDir2 = "/import/neptun-radler/STEINACH_feb/"
  dataDir = "/import/three-data/hadzii/STEINACH/STEINACH_longtime/"
  outdir = "/home/jsalvermoser/Desktop/Processing/out_merged"

  tr = []

  for k in range(dstart, dend, 1):
    fname = '%d' %(k)
    fileName = fname + ".dat" 
    st = readSEG2(dataDir + fileName)
    #st.detrend('linear')
    tr.append(st[nch-1])


  
  new_stream = Stream(traces=tr)
  new_stream.merge(method=1, fill_value='interpolate')

  start = new_stream[0].stats.starttime
  end = new_stream[0].stats.endtime

  timeframe = str(nch)+ "_" + str(start.year) +'.'+ str(start.julday) +'.'+ str(start.hour) +'.'+ str(start.minute) +'.'+ str(start.second) \
      +'-'+ str(end.year) +'.'+ str(end.julday) +'.'+ str(end.hour) +'.'+ str(end.minute) +'.'+ str(end.second)

  new_stream.write(outdir + timeframe + ".mseed", format="MSEED")

  return new_stream[0]
开发者ID:salve-,项目名称:Steinach,代码行数:37,代码来源:merge_single.py

示例2: merge_single

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
def merge_single(nch,dstart,dend):
  '''Merges traces of one channel to larger traces. Used for cross-correlation'''

  # here you load all the functions you need to use
  from obspy.seg2.seg2 import readSEG2
  from obspy.core import Stream


  # directories:
  dataDir2 = "/import/neptun-radler/STEINACH_feb/"
  dataDir = "/import/three-data/hadzii/STEINACH/STEINACH_longtime/"


  tr = []

  for k in range(dstart, dend, 1):
    fname = '%d' %(k)
    fileName = fname + ".dat" 
    st = readSEG2(dataDir + fileName)
    tr.append(st[nch-1])


  
  new_stream = Stream(traces=tr)
  new_stream.merge(method=1, fill_value='interpolate')


  return new_stream
开发者ID:salve-,项目名称:Steinach,代码行数:30,代码来源:corr_128s_CH16-CH20.py

示例3: get_timeseries

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
    def get_timeseries(self, starttime, endtime, observatory=None,
            channels=None, type=None, interval=None):
        """Implements get_timeseries

        Notes: Calls IMFV283Factory.parse_string in place of
            IMFV283Factory.get_timeseries.
        """
        observatory = observatory or self.observatory
        channels = channels or self.channels
        self.criteria_file_name = observatory + '.sc'
        timeseries = Stream()
        output = self._retrieve_goes_messages(starttime, endtime, observatory)
        timeseries += self.parse_string(output)
        # merge channel traces for multiple days
        timeseries.merge()
        # trim to requested start/end time
        timeseries.trim(starttime, endtime)
        # output the number of points we read for logging
        if len(timeseries):
            print("Read %s points from %s" % (timeseries[0].stats.npts,
                observatory), file=sys.stderr)

        self._post_process(timeseries)
        if observatory is not None:
            timeseries = timeseries.select(station=observatory)

        return timeseries
开发者ID:erigler-usgs,项目名称:geomag-algorithms,代码行数:29,代码来源:GOESIMFV283Factory.py

示例4: add

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
    def add(self, stream, verbose=False):
        """
        Process all traces with compatible information and add their spectral
        estimates to the histogram containg the probabilistic psd.
        Also ensures that no piece of data is inserted twice.

        :type stream: :class:`~obspy.core.stream.Stream` or
                :class:`~obspy.core.trace.Trace`
        :param stream: Stream or trace with data that should be added to the
                probabilistic psd histogram.
        :returns: True if appropriate data were found and the ppsd statistics
                were changed, False otherwise.
        """
        # return later if any changes were applied to the ppsd statistics
        changed = False
        # prepare the list of traces to go through
        if isinstance(stream, Trace):
            stream = Stream([stream])
        # select appropriate traces
        stream = stream.select(id=self.id,
                               sampling_rate=self.sampling_rate)
        # save information on available data and gaps
        self.__insert_data_times(stream)
        self.__insert_gap_times(stream)
        # merge depending on skip_on_gaps set during __init__
        stream.merge(self.merge_method, fill_value=0)

        for tr in stream:
            # the following check should not be necessary due to the select()..
            if not self.__sanity_check(tr):
                msg = "Skipping incompatible trace."
                warnings.warn(msg)
                continue
            t1 = tr.stats.starttime
            t2 = tr.stats.endtime
            while t1 + PPSD_LENGTH <= t2:
                if self.__check_time_present(t1):
                    msg = "Already covered time spans detected (e.g. %s), " + \
                          "skipping these slices."
                    msg = msg % t1
                    warnings.warn(msg)
                else:
                    # throw warnings if trace length is different
                    # than one hour..!?!
                    slice = tr.slice(t1, t1 + PPSD_LENGTH)
                    # XXX not good, should be working in place somehow
                    # XXX how to do it with the padding, though?
                    success = self.__process(slice)
                    if success:
                        self.__insert_used_time(t1)
                        if verbose:
                            print t1
                        changed = True
                t1 += PPSD_STRIDE  # advance half an hour

            # enforce time limits, pad zeros if gaps
            #tr.trim(t, t+PPSD_LENGTH, pad=True)
        return changed
开发者ID:kasra-hosseini,项目名称:obspy,代码行数:60,代码来源:psd.py

示例5: stream_seishub_read

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
def stream_seishub_read(host="localhost", port=8080, timeout=100,
                 start_time="2010-01-01 00:20:03", time_interval=30,
                 network_id="PF", station_id="", location_id="",
                 channel_id="HLE", get_paz=False, remove_mean=False,
                 remove_trend=False):
    """ Seishub server client.

    For a detailed description of how it works refer to ObsPy website
    (obspy.org)

    """

    client = Client_seis(base_url="http://" + host + ':' + str(port),
                         timeout=timeout)
    t = UTCDateTime(start_time)

    st = Stream()

    if station_id == "":
        st = client.waveform.getWaveform(network_id, str(station_id),
                                         location_id,
                                         channel_id, t, t + time_interval)
    else:
        for station in station_id:
            try:
                st += client.waveform.getWaveform(network_id, str(station),
                                                  location_id,
                                                  channel_id,
                                                  t,
                                                  t + time_interval)
            except:
                pass

    if len(st) > 0:
        if remove_trend:
            st = stream_detrend(st)

        if remove_mean:
            st = stream_demean(st)

        st.merge(method=1, fill_value=0, interpolation_samples=1)
        n_trace = len(st)
    else:
        n_trace = 0

    if get_paz:
        paz = client.station.getPAZ(network_id, station_id, t)
        return st, paz, n_trace
    else:
        return st, n_trace

    return st, n_trace
开发者ID:ftilmann,项目名称:miic,代码行数:54,代码来源:alpha_mod.py

示例6: merge_single

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
def merge_single(nch,dstart,dend,dataDir):
  '''Merges traces of one channel to larger traces. Used for cross-correlation'''

  # here you load all the functions you need to use
  from obspy.seg2.seg2 import readSEG2
  from obspy.core import Stream

  tr = []

  for k in range(dstart, dend, 1):
    fname = '%d' %(k)
    fileName = fname + ".dat" 
    st = readSEG2(dataDir + fileName)
    tr.append(st[nch-1])
 
  new_stream = Stream(traces=tr)
  new_stream.merge(method=1, fill_value='interpolate')
  return new_stream
开发者ID:salve-,项目名称:Steinach,代码行数:20,代码来源:active+cont_compare.py

示例7: Seedlink_plotter

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]

#.........这里部分代码省略.........
            fig=self.figure, type='dayplot', interval=self.interval,
            number_of_ticks=13, tick_format='%d/%m %Hh',
            size=(args.x_size * upscale_factor, args.y_size * upscale_factor),
            x_labels_size=8,
            y_labels_size=8, title=self.title, title_size=14, linewidth=0.5, right_vertical_labels=False,
            vertical_scaling_range=self.scale,
            subplots_adjust_left=0.03, subplots_adjust_right=0.99,
            subplots_adjust_top=0.95, subplots_adjust_bottom=0.1,
            one_tick_per_line=True,
            # noir  Rouge bleu vert
            color = self.color,
            show_y_UTC_label=False)

    def packetHandler(self, count, slpack):
        """
        Processes each packet received from the SeedLinkConnection.
        :type count: int
        :param count:  Packet counter.
        :type slpack: :class:`~obspy.seedlink.SLPacket`
        :param slpack: packet to process.
        :return: Boolean true if connection to SeedLink server should be
            closed and session terminated, false otherwise.
        """

        # check if not a complete packet
        if slpack is None or (slpack == SLPacket.SLNOPACKET) or \
                (slpack == SLPacket.SLERROR):
            return False

        # get basic packet info
        type = slpack.getType()

        # process INFO packets here
        if (type == SLPacket.TYPE_SLINF):
            return False
        if (type == SLPacket.TYPE_SLINFT):
#             print "Complete INFO:\n" + self.slconn.getInfoString()
            if self.infolevel is not None:
                return True
            else:
                return False

        # process packet data
        trace = slpack.getTrace()
        if trace is None:
            print self.__class__.__name__ + ": blockette contains no trace"
            return False

        # new samples add to the main stream
        self.stream += trace
        self.stream.merge()

        now = UTCDateTime()

        # Stop time will be the next round date
        stop_time = UTCDateTime(
            now.year, now.month, now.day, now.hour, 0, 0)+3600
        start_time = stop_time-self.backtrace

        # Limit the stream size
        self.stream = self.stream.slice(start_time, stop_time)
        self.stream.trim(start_time, stop_time)

        self.title = self.stream.traces[0].stats.station+" "+self.stream.traces[0].stats.network+" "+self.stream.traces[
            0].stats.location+" "+self.stream.traces[0].stats.channel+' scale: '+str(self.scale) + " - non filtre"

        stream_time_length = self.stream.traces[
            0].stats.endtime - self.stream.traces[0].stats.starttime

        ### Before we reach  print_percentage of the time data to plot, we plot each initial_update_rate we received
#         if (stream_time_length < (self.backtrace*self.print_percentage)):
#        if ((stream_time_length))<(self.backtrace-60.0*self.interval):
#         print str(stream_time_length)+"/"+str(self.print_max)
        if stream_time_length <= self.print_max:

            self.flip += 1

#             if ((stream_time_length))<(self.backtrace-60.0*self.interval):
            self.pbar.update(stream_time_length+1)
#             print str(stream_time_length)+"/"+str(self.print_max)
            if (self.flip > self.initial_update_rate):
                self.flip = 0
                self.figure.clear()
                self.plot_graph()

 #             self.pbar.finish()

        # Real time plotting
        # We plot each update_rate packet we received
        # if (stream_time_length >= (self.backtrace*self.print_percentage)):
        if ((stream_time_length)) > (self.print_max):
#             print str(stream_time_length)+"/"+str(self.print_max)

            self.flip += 1
            if (self.flip > self.update_rate):
                self.figure.clear()
                self.plot_graph()
                self.flip = 0

        return False
开发者ID:krischer,项目名称:seedlink_plotter,代码行数:104,代码来源:seedlink_plotter.py

示例8: len

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
         comps = ['Z']
         tramef_Z = np.zeros((len(stations), len(TimeVec)))
 
     j = 0
     for istation, station in enumerate(stations):
         for comp in comps:
             files = eval("datafiles%s['%s']" % (comp, station))
             if len(files) != 0:
                 logging.debug("%s.%s Reading %i Files" %
                               (station, comp, len(files)))
                 stream = Stream()
                 for file in sorted(files):
                     st = read(file, format="MSEED")
                     stream += st
                     del st
                 stream.merge()
                 stream = stream.split()
                 for trace in stream:
                     data = trace.data
                     if len(data) > 2:
                         trace.detrend("demean")
                         trace.taper(0.01)
                     else:
                         trace.data *= 0
                     del data
                 logging.debug("%s.%s Merging Stream" % (station, comp))
                 #fills gaps with 0s and gives only one 'Trace'
                 stream.merge(fill_value=0)
                 logging.debug("%s.%s Slicing Stream to %s:%s" % (station, comp, utcdatetime.UTCDateTime(
                     goal_day.replace('-', '')), utcdatetime.UTCDateTime(goal_day.replace('-', '')) + goal_duration - stream[0].stats.delta))
 
开发者ID:gthompson,项目名称:MSNoise,代码行数:32,代码来源:s03compute_cc.py

示例9: enumerate

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
       comps = ['Z']
       tramef_Z= np.zeros((len(stations),len(TimeVec)))
 
   
   j = 0
   for istation, station in enumerate(stations):
       for comp in comps:
           files = eval("datafiles%s['%s']"%(comp,station))
           if len(files) != 0:
               logging.debug("%s.%s Reading %i Files" % (station, comp, len(files)))
               stream = Stream()
               for file in sorted(files):
                   st = read(file,format="MSEED")
                   stream += st
                   del st
               stream.merge()
               stream = stream.split()
               for trace in stream:
                   data = trace.data
                   if len(data) > 2:
                       tp = cosTaper(len(data), 0.01 )
                       data -= np.mean(data)
                       data *= tp
                       trace.data = data
                   else:
                       trace.data *= 0
                   del data
               logging.debug("%s.%s Merging Stream" % (station, comp))
               stream.merge(fill_value=0) #fills gaps with 0s and gives only one 'Trace'
               logging.debug("%s.%s Slicing Stream to %s:%s" % (station, comp,utcdatetime.UTCDateTime(goal_day.replace('-','')),utcdatetime.UTCDateTime(goal_day.replace('-',''))+goal_duration-stream[0].stats.delta))
               
开发者ID:pgervais,项目名称:MSNoise,代码行数:32,代码来源:03.compute_cc.py

示例10: WaveformPlotting

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
class WaveformPlotting(object):
    """
    Class that provides several solutions for plotting large and small waveform
    data sets.

    .. warning::
        This class should NOT be used directly, instead use the
        :meth:`~obspy.core.stream.Stream.plot` method of the
        ObsPy :class:`~obspy.core.stream.Stream` or
        :class:`~obspy.core.trace.Trace` objects.

    It uses matplotlib to plot the waveforms.
    """

    def __init__(self, **kwargs):
        """
        Checks some variables and maps the kwargs to class variables.
        """
        self.stream = kwargs.get('stream')
        # Check if it is a Stream or a Trace object.
        if isinstance(self.stream, Trace):
            self.stream = Stream([self.stream])
        elif not isinstance(self.stream, Stream):
            msg = 'Plotting is only supported for Stream or Trace objects.'
            raise TypeError(msg)
        # Stream object should contain at least one Trace
        if len(self.stream) < 1:
            msg = "Empty object"
            raise IndexError(msg)
        # Type of the plot.
        self.type = kwargs.get('type', 'normal')
        # Start- and endtimes of the plots.
        self.starttime = kwargs.get('starttime', None)
        self.endtime = kwargs.get('endtime', None)
        self.fig_obj = kwargs.get('fig', None)
        # If no times are given take the min/max values from the stream object.
        if not self.starttime:
            self.starttime = min([trace.stats.starttime for \
                             trace in self.stream])
        if not self.endtime:
            self.endtime = max([trace.stats.endtime for \
                           trace in self.stream])
        # Map stream object and slice just in case.
        self.stream = self.stream.slice(self.starttime, self.endtime)
        # normalize times
        if self.type == 'relative':
            dt = self.starttime
            # fix plotting boundaries
            self.endtime = UTCDateTime(self.endtime - self.starttime)
            self.starttime = UTCDateTime(0)
            # fix stream times
            for tr in self.stream:
                tr.stats.starttime = UTCDateTime(tr.stats.starttime - dt)
        # Whether to use straight plotting or the fast minmax method.
        self.plotting_method = kwargs.get('method', 'fast')
        # Below that value the data points will be plotted normally. Above it
        # the data will be plotted using a different approach (details see
        # below). Can be overwritten by the above self.plotting_method kwarg.
        self.max_npts = 400000
        # If automerge is enabled. Merge traces with the same id for the plot.
        self.automerge = kwargs.get('automerge', True)
        # Set default values.
        # The default value for the size is determined dynamically because
        # there might be more than one channel to plot.
        self.size = kwargs.get('size', None)
        # Values that will be used to calculate the size of the plot.
        self.default_width = 800
        self.default_height_per_channel = 250
        if not self.size:
            self.width = 800
            # Check the kind of plot.
            if self.type == 'dayplot':
                self.height = 600
            else:
                # One plot for each trace.
                if self.automerge:
                    count = []
                    for tr in self.stream:
                        if hasattr(tr.stats, 'preview') and tr.stats.preview:
                            tr_id = tr.id + 'preview'
                        else:
                            tr_id = tr.id
                        if not tr_id in count:
                            count.append(tr_id)
                    count = len(count)
                else:
                    count = len(self.stream)
                self.height = count * 250
        else:
            self.width, self.height = self.size
        # Interval length in minutes for dayplot.
        self.interval = 60 * kwargs.get('interval', 15)
        # Scaling.
        self.vertical_scaling_range = kwargs.get('vertical_scaling_range',
                                                 None)
        # Dots per inch of the plot. Might be useful for printing plots.
        self.dpi = kwargs.get('dpi', 100)
        # Color of the graph.
        if self.type == 'dayplot':
            self.color = kwargs.get('color', ('#000000','#B2000F', '#004C12',
#.........这里部分代码省略.........
开发者ID:obspy,项目名称:branches,代码行数:103,代码来源:waveform.py

示例11: computePSD

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
def computePSD(sp,net,sta,loc,chan,dateval):
	#Here we compute the PSD
	lenfft = 5000
	lenol = 2500
	#Here are the different period bands.  These could be done as a dicitionary
	#if Adam B. wants to clean this up using a dicitionary that is fine with me
	permin1 = 4
	permax1 = 8
	permin2 = 18
	permax2 = 22
	permin3 = 90
	permax3 = 110
	permin4 = 200
	permax4 = 500
	
	try:

		#Get the response and compute amplitude response	
		paz=getPAZ2(sp,net,sta,loc,chan,dateval)
		respval = pazToFreqResp(paz['poles'],paz['zeros'],paz['sensitivity']*paz['gain'], \
			t_samp = 1, nfft = lenfft,freq=False)[1:]
		respval = numpy.absolute(respval*numpy.conjugate(respval))

		#Get the data to compute the PSD
		if net in set(['IW','NE','US']):
			locStr = '/xs1'
		else:
			locStr = '/xs0'
		readDataString = locStr + '/seed/' + net + '_' + sta + '/' + str(dateval.year) + \
			'/' + str(dateval.year) + '_' + str(dateval.julday).zfill(3) + '_' + net + \
			'_' + sta + '/' + loc + '_' + chan + '*.seed'
		datafiles = glob.glob(readDataString)
		st = Stream()
		for datafile in datafiles:
			st += read(datafile)
		st.merge(method=-1)

		#Compute the PSD
		cpval,fre = psd(st[0].data,NFFT=lenfft,Fs=1,noverlap=lenol,scale_by_freq=True)
		per = 1/fre[1:]
		cpval = 10*numpy.log10(((2*pi*fre[1:])**2)*cpval[1:]/respval)
		perminind1 = numpy.abs(per-permin1).argmin()
		permaxind1 = numpy.abs(per-permax1).argmin()
		perminind2 = numpy.abs(per-permin2).argmin()
		permaxind2 = numpy.abs(per-permax2).argmin()
		perminind3 = numpy.abs(per-permin3).argmin()
		permaxind3 = numpy.abs(per-permax3).argmin()
		perminind4 = numpy.abs(per-permin4).argmin()
		permaxind4 = numpy.abs(per-permax4).argmin()
		perNLNM,NLNM = get_NLNM()
		perNLNMminind1 = numpy.abs(perNLNM-permin1).argmin()
		perNLNMmaxind1 = numpy.abs(perNLNM-permax1).argmin()
		perNLNMminind2 = numpy.abs(perNLNM-permin2).argmin()
		perNLNMmaxind2 = numpy.abs(perNLNM-permax2).argmin()
		perNLNMminind3 = numpy.abs(perNLNM-permin3).argmin()
		perNLNMmaxind3 = numpy.abs(perNLNM-permax3).argmin()
		perNLNMminind4 = numpy.abs(perNLNM-permin4).argmin()
		perNLNMmaxind4 = numpy.abs(perNLNM-permax4).argmin()

		cpval1 = round(numpy.average(cpval[permaxind1:perminind1]) - \
			numpy.average(NLNM[perNLNMmaxind1:perNLNMminind1]),2)
		cpval2 = round(numpy.average(cpval[permaxind2:perminind2]) - \
			numpy.average(NLNM[perNLNMmaxind2:perNLNMminind2]),2)
		cpval3 = round(numpy.average(cpval[permaxind3:perminind3]) - \
			numpy.average(NLNM[perNLNMmaxind3:perNLNMminind3]),2)
		cpval4 = round(numpy.average(cpval[permaxind4:perminind4]) - \
			numpy.average(NLNM[perNLNMmaxind4:perNLNMminind4]),2)
	except:
		cpval1 = 0
		cpval2 = 0
		cpval3 = 0
		cpval4 = 0
	return cpval1, cpval2, cpval3,cpval4
开发者ID:ambaker-usgs,项目名称:veriSeedScan,代码行数:75,代码来源:veriPower.py

示例12: computeStepCal

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
    def computeStepCal(self):
        # cal duration needs to be divided by 10000 for step cals only.  This
        # only applies for when you are reading the cal duration from the
        # database.
        if(self.dbconn is not None):
            # divide by 10000 when getting the cal_duration from the database
            duration = self.cal_duration / 10000.0
        else:
            duration = self.cal_duration

        # Determine the type of sensor from the metadata
        sensor = self._determineSensorType()

        # ignores every location except for Z for triaxial STS-2s
        if((self.dbconn is not None) and ("Z" not in self.outChannel) and
           (sensor == "STS-2HG" or sensor == "STS-4B" or sensor == "STS-2")):
            print("Skipped " + str(self.outChannel) + ' ' + sensor)

        # get the poles values for the sensor type
        pz = self._pzvals(sensor)

        # read data for the calibration
        try:
            stOUT = Stream()
            stime = UTCDateTime(self.startdate) - 5 * 60
            stOUT = read(
                self.dataOutLoc, starttime=stime,
                endtime=stime + duration + 5 * 60 + 900
            )
            stOUT.merge()
            stIN = read(
                self.dataInLoc, starttime=stime,
                endtime=stime + duration + 5 * 60 + 900
            )
            stIN.merge()
            trIN = stIN[0]
            trOUT = stOUT[0]
            trOUT.filter('lowpass', freq=.1)
            trIN.filter('lowpass', freq=.1)
            trIN.detrend('constant')
            trIN.normalize()
            trOUT.detrend('constant')
            trOUT.normalize()
            temp = trOUT.copy()
            temp.trim(endtime=stime + int(duration / 2.))
            if temp.max() < 0.0:
                trOUT.data = -trOUT.data
        except:
            if(self.dbconn is not None):
                self.stepcal_logger.error('Unable to read data for {' +
                                          'network = ' + self.network +
                                          ', station = ' + self.station +
                                          ', sensor = ' + str(sensor) +
                                          ', location = ' + str(self.location) +
                                          ', channel = ' + str(self.outChannel) +
                                          '}')
            else:
                self.stepcal_logger.error('''(Manual Override) Unable read data
                                          for manual input file ''' +
                                          str(self.dataInLoc) +
                                          ' and output file ' +
                                          str(self.dataOutLoc))
        try:
            # compute corner (cutoff) frequency
            f = 1. / (2 * math.pi / abs(pz['poles'][0]))
            # compute damping ratio
            h = abs(pz['poles'][0].real) / abs(pz['poles'][0])
            sen = 10.0

            print (
                'Using: h=' + str(h) + ' f=' + str(f) + ' sen = ' + str(sen))

            x = numpy.array([f, h, sen])
            try:
                # compute best fit
                bf = fmin(self._resi, x, args=(trIN, trOUT),
                          xtol=10 ** -8, ftol=10 ** -3, disp=False)
            except:
                bf = x

        except:
            if(self.dbconn is not None):
                self.stepcal_logger.error('Unable to calculate {' +
                                          'network = ' + self.network +
                                          ', station = ' + self.station +
                                          ', sensor = ' + str(sensor) +
                                          ', location = ' + str(self.location) +
                                          ', channel = ' + str(self.outChannel) +
                                          '}')
            else:
                self.stepcal_logger.error('''(Manual Override) Unable to
                                          perform corner freq, damping ratio,
                                          and best fit calculations for input
                                          file ''' + str(self.dataInLoc) +
                                          ' and output file ' +
                                          str(self.dataOutLoc))
        try:
            pazNOM = cornFreq2Paz(f, h)
            pazNOM['zeros'] = [0. + 0.j]

#.........这里部分代码省略.........
开发者ID:asl-usgs,项目名称:calanalyzer,代码行数:103,代码来源:ComputeCalibrations.py

示例13: read_from_SDS

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
    def read_from_SDS(self, sds_root, net_name, sta_name, comp_name,
                      starttime=None, endtime=None, rmean=False, taper=False,
                      pad_value=None):
        """
        Read waveform data from an SDS structured archive.  Simple overlaps and
        adjacent traces are merged if possile.

        :param sds_root: root of the SDS archive
        :param net_name: network name
        :param sta_name: station name
        :param comp_name: component name
        :param starttime: Start time of data to be read.
        :param endtime: End time of data to be read.
        :param rmean: If ``True`` removes the mean from the data upon reading.
            If data are segmented, the mean will be removed from all segments
            individually.
        :param taper: If ``True`` applies a cosine taper to the data upon
            reading.  If data are segmented, tapers are applied to all segments
            individually.
        :param pad_value: If this parameter is set, points between
            ``starttime`` and the first point in the file, and points between
            the last point in the file and ``endtime``, will be set to
            ``pad_value``.  You may want to also use the ``rmean`` and
            ``taper`` parameters, depending on the nature of the data.

        :type sds_root: string
        :type net_name: string
        :type sta_name: string
        :type comp_name: string
        :type starttime: ``obspy.core.utcdatetime.UTCDateTime`` object,
            optional
        :type endtime: ``obspy.core.utcdatetime.UTCDateTime`` object, optional
        :type rmean: boolean, optional
        :type taper: boolean, optional
        :type pad_value: float, optional

        :raises UserWarning: If there are no data between ``starttime`` and
            ``endtime``

        """

        logging.info("Reading from SDS structure %s %s %s ..." %
                     (net_name, sta_name, comp_name))

        # Get the complete file list. If a directory, get all the filenames.
        filename = os.path.join(sds_root, net_name, sta_name,
                                "%s.D" % comp_name, "*")
        logging.debug("Reading %s between %s and %s" %
                      (filename, starttime.isoformat(), endtime.isoformat()))
        if os.path.isdir(glob.glob(filename)[0]):
            filename = os.path.join(filename, "*")
        file_glob = glob.glob(filename)

        # read header from all files to keep only those within the time limits
        fnames_within_times = []
        for fname in file_glob:
            st_head = stream.read(fname, headonly=True)
            # retrieve first_start and last_end time for the stream
            # without making any assumptions on order of traces
            first_start = st_head[0].stats.starttime
            last_end = st_head[0].stats.endtime
            # find earliest start time and latest end time in stream
            for tr in st_head:
                if tr.stats.starttime < first_start:
                    first_start = tr.stats.starttime
                if tr.stats.endtime > last_end:
                    last_end = tr.stats.endtime
            # add to list if start or end time are within our requested limits
            if (first_start < endtime and last_end > starttime):
                fnames_within_times.append(fname)

        logging.debug("Found %d files to read" % len(fnames_within_times))

        # now read the full data only for the relevant files
        st = Stream()
        for fname in fnames_within_times:
            st_tmp = read(fname, starttime=starttime, endtime=endtime)
            for tr in st_tmp:
                st.append(tr)
        # and merge nicely
        st.merge(method=-1)

        if st.count() > 1:  # There are gaps after sensible cleanup merging
            logging.info("File contains gaps:")
            st.printGaps()

        # apply rmean if requested
        if rmean:
            logging.info("Removing the mean from single traces.")
            st = stream_rmean(st)

        # apply rmean if requested
        if taper:
            logging.info("Tapering single traces.")
            st = stream_taper(st)

        if not pad_value is None:
            try:
                first_tr = st.traces[0]
                # save delta (to save typing)
#.........这里部分代码省略.........
开发者ID:amaggi,项目名称:waveloc,代码行数:103,代码来源:OP_waveforms.py

示例14: __plotStraight

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
    def __plotStraight(self, trace, ax, *args, **kwargs):  # @UnusedVariable
        """
        Just plots the data samples in the self.stream. Useful for smaller
        datasets up to around 1000000 samples (depending on the machine its
        being run on).

        Slow and high memory consumption for large datasets.
        """
        # Copy to avoid any changes to original data.
        trace = deepcopy(trace)
        if len(trace) > 1:
            stream = Stream(traces=trace)
            # Merge with 'interpolation'. In case of overlaps this method will
            # always use the longest available trace.
            if hasattr(trace[0].stats, 'preview') and trace[0].stats.preview:
                stream = Stream(traces=stream)
                stream = mergePreviews(stream)
            else:
                stream.merge(method=1)
            trace = stream[0]
        else:
            trace = trace[0]
        # Check if it is a preview file and adjust accordingly.
        # XXX: Will look weird if the preview file is too small.
        if hasattr(trace.stats, 'preview') and trace.stats.preview:
            # Mask the gaps.
            trace.data = np.ma.masked_array(trace.data)
            trace.data[trace.data == -1] = np.ma.masked
            # Recreate the min_max scene.
            dtype = trace.data.dtype
            old_time_range = trace.stats.endtime - trace.stats.starttime
            data = np.empty(2 * trace.stats.npts, dtype=dtype)
            data[0::2] = trace.data / 2.0
            data[1::2] = -trace.data / 2.0
            trace.data = data
            # The times are not supposed to change.
            trace.stats.delta = old_time_range / float(trace.stats.npts - 1)
        # Write to self.stats.
        calib = trace.stats.calib
        max = trace.data.max()
        min = trace.data.min()
        if hasattr(trace.stats, 'preview') and trace.stats.preview:
            tr_id = trace.id + ' [preview]'
        else:
            tr_id = trace.id
        self.stats.append([tr_id, calib * trace.data.mean(),
                           calib * min, calib * max])
        # Pad the beginning and the end with masked values if necessary. Might
        # seem like overkill but it works really fast and is a clean solution
        # to gaps at the beginning/end.
        concat = [trace]
        if self.starttime != trace.stats.starttime:
            samples = (trace.stats.starttime - self.starttime) * \
                trace.stats.sampling_rate
            temp = [np.ma.masked_all(int(samples))]
            concat = temp.extend(concat)
            concat = temp
        if self.endtime != trace.stats.endtime:
            samples = (self.endtime - trace.stats.endtime) * \
                      trace.stats.sampling_rate
            concat.append(np.ma.masked_all(int(samples)))
        if len(concat) > 1:
            # Use the masked array concatenate, otherwise it will result in a
            # not masked array.
            trace.data = np.ma.concatenate(concat)
            # set starttime and calculate endtime
            trace.stats.starttime = self.starttime
        trace.data *= calib
        ax.plot(trace.data, color=self.color)
        # Set the x limit for the graph to also show the masked values at the
        # beginning/end.
        ax.set_xlim(0, len(trace.data) - 1)
开发者ID:obspy,项目名称:branches,代码行数:74,代码来源:waveform.py

示例15: merge

# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import merge [as 别名]
def merge(nch,dstart,dend):
  
  ''' read the file 'fileName' that is in directory 'dataDir' into 
      a stream called 'st'

      The i-loop is over the channels/geophones, so the merged data of i geophones
      is plotted in the end.
      The k to m loops are over the data files, so traces from files 1 to 113 are 
      merged together which equals one hour of measurements.

      For each channel i the corresponding trace is picked from all the 113 files
      and then written into a new stream 'new_stream'. In this stream all 
      traces (consecutive in time) are merged then into one long trace using the 
      'merge-method 1'. As the new_stream only consists of one trace the index '[0]'
      can always be used.

      't' is an array, built to merge a combined time axis for all the merged traces.
      The new_stream data is finally plotted against the time axis. This procedure
      is iterated for each channel, whereas subplots are built. '''

  # here you load all the functions you need to use
  from obspy.seg2.seg2 import readSEG2
  from obspy.core import Stream
  import matplotlib.pyplot as plt
  import numpy as np
  from numpy import matrix
  import time

  time_start = time.clock()

  dataDir = "/home/johannes/~home/Steinach/DEC_JAN/DEC_night/"
  outdir = "/home/johannes/~home/Steinach/outmseed/"

  ax = plt.subplot(111)

  TR = []

  for a in range(0,nch):
    TR.append([])

  for k in range(dstart, dend, 1):
    fname = '%d' %(k)
    fileName = fname + ".dat" 
    st = readSEG2(dataDir + fileName)
    for i in range(0,nch):
      TR[i].append(st[i])
      
      
  for m in range(0,nch):
    new_stream = Stream(traces=TR[m])
    new_stream.merge(method=0, fill_value='interpolate')

    start = new_stream[0].stats.starttime
    end = new_stream[0].stats.endtime

    timeframe = str(m+1)+ "_" + str(start.year) +'.'+ str(start.julday) +'.'+ str(start.hour) +'.'+ str(start.minute) +'.'+ str(start.second) \
      +'-'+ str(end.year) +'.'+ str(end.julday) +'.'+ str(end.hour) +'.'+ str(end.minute) +'.'+ str(end.second)

    new_stream.write(outdir + "CH" + str(m+1) + "/" + timeframe + ".mseed", format="MSEED")

    new_stream[0].normalize()
    dt = new_stream[0].stats.starttime.timestamp


    
    t = np.linspace(new_stream[0].stats.starttime.timestamp - dt,
     new_stream[0].stats.endtime.timestamp -dt, new_stream[0].stats.npts)

    ax.plot(t, new_stream[0].data + 1.5* m)

  time_elapsed = (time.clock() - time_start)
  print time_elapsed
  plt.show()
开发者ID:salve-,项目名称:Steinach,代码行数:75,代码来源:merge_48_cont5.py


注:本文中的obspy.core.Stream.merge方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。