当前位置: 首页>>代码示例>>Python>>正文


Python core.getTime函数代码示例

本文整理汇总了Python中psychopy.core.getTime函数的典型用法代码示例。如果您正苦于以下问题:Python getTime函数的具体用法?Python getTime怎么用?Python getTime使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了getTime函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: callback

 def callback(self, in_data, frame_count, time_info, status):
     data = self._wf.readframes(frame_count)
     if self._starttime is None:
         self._starttime = core.getTime()
     chunk_dur = len(data)/self.bytes_per_sample/self.sampling_rate
     self._endtime = core.getTime()+chunk_dur
     return (data, pyaudio.paContinue)
开发者ID:g-no,项目名称:OpenHandWrite,代码行数:7,代码来源:audio.py

示例2: shoot

 def shoot(bullet_type, color):
     self.bullet = Bullet(color)
     self.bullet.color = str(color)
     # Set the bullet so it shoots from middle of player
     self.bullet.rect.x = self.player.middle
     self.bullet.rect.y = self.player.rect.y
     #play bullet sound
     self.shot_sound.out()
     #decrease ammo supply by 1
     self.level.ammo-=1
     # Add the bullet to the lists
     self.all_sprites_list.add(self.bullet)
     if color == GREEN:
         shot = core.getTime()
         self.Ashot_time.append(shot)
         self.A_bullet_list.add(self.bullet)
     elif color == RED:
         shot = core.getTime()
         self.Bshot_time.append(shot)
         self.B_bullet_list.add(self.bullet)
     elif color == YELLOW:
         shot = core.getTime()
         self.Cshot_time.append(shot)
         self.C_bullet_list.add(self.bullet)
     elif color == BROWN:
         shot = core.getTime()
         self.Dshot_time.append(shot)
         self.D_bullet_list.add(self.bullet)
开发者ID:arkansasred,项目名称:AP_Game,代码行数:28,代码来源:game_stars(don't+use).py

示例3: run

    def run(self):
        """Starts the validation process. This function will not return
        until the validation is complete. The validation results are
        returned in dict format.

        :return: dist containing validation results.

        """

        continue_val = self._enterIntroScreen()

        if continue_val is False:
            return None

        # delay about 0.5 sec before staring validation
        ftime = self.win.flip()
        while core.getTime() - ftime < 0.5:
            self.win.flip()
            self.io.clearEvents()

        val_results = self._enterValidationSequence()

        # delay about 0.5 sec before showing validation end screen
        ftime = self.win.flip()
        while core.getTime() - ftime < 0.5:
            self.win.flip()
            self.io.clearEvents()

        self._enterFinishedScreen(val_results)
        self.io.clearEvents()
        self.win.flip()

        return val_results
开发者ID:isolver,项目名称:OpenHandWrite,代码行数:33,代码来源:wintabgraphics.py

示例4: generate

 def generate(self):
     """ generate the enemy off screen """
     #distance for offset = desired time * velocity
     #ns.sync()
     self.offset_time = 60*random.randrange(self.offscreen_min, self.offscreen_max) #multiply by 60 for fps-->s
     self.offset_distance = -(self.offset_time * self.y_speed)
     self.rect.y = self.offset_distance
     if self.enemy_type == 'A':
         self.sound.out()
         #ns.send_event('SndA', timestamp = egi.ms_localtime())
         self.rect.x = self.a_pos
         time = core.getTime()
         self.enemyA_generate_time.append(time)
     elif self.enemy_type == 'B':
         self.sound.out()
         #ns.send_event('SndB', timestamp = egi.ms_localtime())
         self.rect.x = self.b_pos
         time = core.getTime()
         self.enemyB_generate_time.append(time)
     elif self.enemy_type == 'C':
         self.sound.out()
         #ns.send_event('SndC', timestamp = egi.ms_localtime())
         self.rect.x = self.c_pos
         time = core.getTime()
         self.enemyC_generate_time.append(time)
     elif self.enemy_type == 'D':
         self.sound.out()
         #ns.send_event('SndC', timestamp = egi.ms_localtime())
         self.rect.x = self.d_pos
         time = core.getTime()
         self.enemyD_generate_time.append(time)
开发者ID:arkansasred,项目名称:AP_Game,代码行数:31,代码来源:game_stars(don't+use).py

示例5: update

 def update(self):
     """ Automatically called when we need to move the enemy. """
     self.rect.y += self.y_speed
     #self.rect.x += self.x_speed
     #bounce off edges
     #if self.rect.x > SCREEN_WIDTH - self.rect.width or self.rect.x <= 0:
     #    self.x_speed = -self.x_speed
     #change x direction based on probability function
     #self.random = random.random
     #if self.random < self.prob:
     #    self.x_speed = -self.x_speed
     """ Record time right when enemy fully enters screen """
     if -1<= self.rect.y <= 0:
         t_sight = core.getTime()
         #ns.send_event('Site', timestamp = egi.ms_localtime())
         if self.enemy_type=='A':
             #ns.send_event('Site', timestamp = egi.ms_localtime())
             t_sight = core.getTime()
             self.enemyA_sight_time.append(t_sight)
         if self.enemy_type =='B':
             #ns.send_event('Site', timestamp = egi.ms_localtime())
             t_sight = core.getTime()
             self.enemyB_sight_time.append(t_sight)
         if self.enemy_type=='C':
             #ns.send_event('Site', timestamp = egi.ms_localtime())
             t_sight = core.getTime()
             self.enemyC_sight_time.append(t_sight)
         if self.enemy_type=='D':
             #ns.send_event('Site', timestamp = egi.ms_localtime())
             t_sight = core.getTime()
             self.enemyD_sight_time.append(t_sight)
开发者ID:arkansasred,项目名称:AP_Game,代码行数:31,代码来源:game_stars(don't+use).py

示例6: _record

    def _record(self, sec, filename='', block=True):
        while self.recorder.running:
            pass
        self.duration = float(sec)
        self.onset = core.getTime()  # for duration estimation, high precision
        self.fileOnset = core.getAbsTime()  # for log and filename, 1 sec precision
        logging.data('%s: Record: onset %d, capture %.3fs' %
                     (self.loggingId, self.fileOnset, self.duration) )
        if not file:
            onsettime = '-%d' % self.fileOnset
            self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename))
        else:
            self.savedFile = os.path.abspath(filename).strip('.wav') + '.wav'

        t0 = core.getTime()
        self.recorder.run(self.savedFile, self.duration, **self.options)

        self.rate = sound.pyoSndServer.getSamplingRate()
        if block:
            core.wait(self.duration, 0)
            logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' %
                     (self.loggingId, core.getTime(), core.getTime() - t0) )
            while self.recorder.running:
                core.wait(.001, 0)
        else:
            logging.exp('%s: Record: return immediately, no blocking' %
                     (self.loggingId) )

        return self.savedFile
开发者ID:DiogoamCoutinho,项目名称:stimulus.py,代码行数:29,代码来源:microphone.py

示例7: runTrial

 def runTrial(self,*args):
     self.babyStatus=0 # -1 no signal, 0 saccade, 1 fixation,
     self.sacPerPursuit=0
     self.pursuedAgents=False
     self.rewardIter=0
     self.nrRewards=0
     self.blinkCount=0
     self.tFix=0
     self.isFixLast=False
     self.babySawReward=False
     ende=False
     if core.getTime()> BabyExperiment.expDur*60+self.tStart: ende=True
     if ende:
         print core.getTime()-self.tStart
         self.etController.sendMessage('Finished')
         self.etController.closeConnection()
         self.wind.close(); core.quit()
     self.timeNotLooking=0
     self.etController.preTrial(driftCorrection=self.showAttentionCatcher>0)
     self.etController.sendMessage('Trial\t%d'%self.t)        
     self.etController.sendMessage('Phase\t%d'%self.phases[self.pi])
     if self.eeg!=None: 
         self.eeg.setData(int(self.t+1))
     Experiment.runTrial(self,*args,fixCross=False)
     self.etController.postTrial()
开发者ID:simkovic,项目名称:GazeContingentChaseBaby,代码行数:25,代码来源:Experiment.py

示例8: onGazeData

    def onGazeData(self, data):
        '''
        Wird aufgerufen, wenn der Tobii neue Daten errechnet und rausgeschickt hat.
        Diese Daten werden im Buffer gespeichert.
        '''
        self.__dataCount += 1

        if len(self.__buffer) >= self.__buffersize:
            self.__buffer.pop(0)
        self.__buffer.append((core.getTime(), data))

        if self.__storing:
            print "\n\nomg storing\n\n", core.getTime()
        if core.getTime() - self.__lastStoreTime > self.__dataSaveIntervall:
            self.__storing = True
            self.__storeData()
            self.__storing = False

        lx = data.x_gazepos_lefteye
        ly = data.y_gazepos_lefteye
        rx = data.x_gazepos_righteye
        ry = data.y_gazepos_righteye

        lx, ly = pyTetClient.tobiiToPsyCoord(lx, ly)
        rx, ry = pyTetClient.tobiiToPsyCoord(rx, ry)

        avgX = (lx + rx) / 2
        avgY = (ly + ry) / 2

        if self.__showGazePoint:
            #passe posi der discs an
            #print "lx:%f\tly:%f\trx:%f\try:%f" % (lx, ly, rx, ry)
            self.__discAvg.setPosition(avgX, avgY)
            self.__discLeft.setPosition(lx, ly)
            self.__discRight.setPosition(rx, ry)
开发者ID:akrv,项目名称:TunnelExp,代码行数:35,代码来源:GazeProcessor.py

示例9: record

    def record(self, sec, file='', block=True):
        """Capture sound input for duration <sec>, save to a file.

        Return the path/name to the new file. Uses onset time (epoch) as
        a meaningful identifier for filename and log.
        """
        while self.recorder.running:
            pass
        self.duration = float(sec)
        self.onset = core.getTime() # note: report onset time in log, and use in filename
        logging.data('%s: Record: onset %.3f, capture %.3fs' %
                     (self.loggingId, self.onset, self.duration) )
        if not file:
            onsettime = '-%.3f' % self.onset
            self.savedFile = onsettime.join(os.path.splitext(self.wavOutFilename))
        else:
            self.savedFile = os.path.abspath(file).strip('.wav') + '.wav'

        t0 = core.getTime()
        self.recorder.run(self.savedFile, self.duration, self.sampletype)
        self.rate = sound.pyoSndServer.getSamplingRate()

        if block:
            core.wait(self.duration - .0008) # .0008 fudge factor for better reporting
                # actual timing is done by Clean_objects
            logging.exp('%s: Record: stop. %.3f, capture %.3fs (est)' %
                     (self.loggingId, core.getTime(), core.getTime() - t0) )
        else:
            logging.exp('%s: Record: return immediately, no blocking' %
                     (self.loggingId) )

        return self.savedFile
开发者ID:RainyJupiter,项目名称:psychopy,代码行数:32,代码来源:microphone.py

示例10: testWait

def testWait(duration=1.55):
    try:
        t1=getTime()
        wait(duration)
        t2=getTime()

        # Check that the actual duration of the wait was close to the requested delay.
        #
        # Note that I have had to set this to a relatively high value of
        # 50 msec because on my Win7, i7, 16GB machine I would get delta's of up to
        # 35 msec when I was testing this.
        #
        # This is 'way high', and I think is because the current wait()
        # implementation polls pyglet for events during the CPUhog period.
        # IMO, during the hog period, which should only need to be only 1 - 2 msec
        # , not the 200 msec default now, nothing should be done but tight looping
        # waiting for the wait() to expire. This is what I do in ioHub and on this same
        # PC I get actual vs. requested duration delta's of < 100 usec consitently.
        #
        # I have not changed the wait in psychopy until feedback is given, as I
        # may be missing a reason why the current wait() implementation is required.
        #
        assert np.fabs((t2-t1)-duration) < 0.05

        printf(">> core.wait(%.2f) Test: PASSED"%(duration))

    except Exception:
        printf(">> core.wait(%.2f) Test: FAILED. Actual Duration was %.3f"%(duration,(t2-t1)))
        printExceptionDetails()

    printf("-------------------------------------\n")
开发者ID:bergwiesel,项目名称:psychopy,代码行数:31,代码来源:test_core.py

示例11: run_main_experiment

def run_main_experiment():
    time_start = core.getTime()
    time_play = time_start
    order = Exp.make_random_stim_order()
    Nonethird = np.floor(len(order)/3)
    Ntwothird = np.floor(2*len(order)/3)

    t = 0
    for i in order:
        t = t+1
        print(core.getTime() -time_start)
        if t in [Nonethird,Ntwothird]:
            set_msg('Short Break!','MAIN')
            set_msg('Press return to continue','KEY')
            win.flip()
            event.waitKeys(keyList=['return','space'])
            core.wait(1) 



        s = sound_build.make_noisy_stim(i,Exp)
        scaled = np.int16(s/np.max(np.abs(s)) * 32767)
        write('test.wav', 44100, scaled)
        core.wait(time_play - core.getTime())        
        set_msg('Up or down?','MAIN')
        win.flip()        
        playsound(s,vol)
        core.wait(0.1) 
        #core.wait(0.5) #wait 500ms; but use a loop of x frames for more accurate timing in fullscreen
        thisResp = get_response()
        iscorrect = Exp.isRespCorrect(i,thisResp) # 1=correct, O=incorrect, -1=missed
        time_play =  core.getTime() + iti
        dataFile.write('%i,%i,%i\n' %(i, thisResp,iscorrect))
    dataFile.close()
开发者ID:vincentadam87,项目名称:pitch_experiment,代码行数:34,代码来源:run_experiment.py

示例12: getGLFont

    def getGLFont(font_family_name,size=32,bold=False,italic=False,dpi=72):
        """
        Return a FontAtlas object that matches the family name, style info,
        and size provided. FontAtlas objects are cached, so if multiple
        TextBox instances use the same font (with matching font properties)
        then the existing FontAtlas is returned. Otherwise, a new FontAtlas is
        created , added to the cache, and returned.
        """
        from psychopy.visual.textbox import getFontManager
        fm=getFontManager()

        if fm:
            if fm.font_store:
                # should be loading from font store if requested font settings
                # have been saved to the hdf5 file (assuming it is faster)
                pass
                #print "TODO: Check if requested font is in FontStore"
            font_infos=fm.getFontsMatching(font_family_name,bold,italic)
            if len(font_infos) == 0:
                return False
            font_info=font_infos[0]
            fid=MonospaceFontAtlas.getIdFromArgs(font_info,size,dpi)
            font_atlas=fm.font_atlas_dict.get(fid)
            if font_atlas is None:
                font_atlas=fm.font_atlas_dict.setdefault(fid,MonospaceFontAtlas(font_info,size,dpi))
                font_atlas.createFontAtlas()
            if fm.font_store:
                t1=getTime()
                fm.font_store.addFontAtlas(font_atlas)
                t2=getTime()
                print 'font store add atlas:',t2-t1
        return font_atlas
开发者ID:Gianluigi,项目名称:psychopy,代码行数:32,代码来源:fontmanager.py

示例13: updateStimText

def updateStimText(stim,text=None):
    stime=core.getTime()*1000.0
    if text:    
        stim.setText(text)
    stim.draw()
    gl.glFinish()
    etime=core.getTime()*1000.0 
    return etime-stime
开发者ID:wilberth,项目名称:psychopy,代码行数:8,代码来源:textstim_vs_textbox.py

示例14: resample

    def resample(self, newRate=16000, keep=True, log=True):
        """Re-sample the saved file to a new rate, return the full path.

        Can take several visual frames to resample a 2s recording.

        The default values for resample() are for google-speech, keeping the
        original (presumably recorded at 48kHz) to archive.
        A warning is generated if the new rate not an integer factor / multiple of the old rate.

        To control anti-aliasing, use pyo.downsamp() or upsamp() directly.
        """
        if not self.savedFile or not os.path.isfile(self.savedFile):
            msg = "%s: Re-sample requested but no saved file" % self.loggingId
            logging.error(msg)
            raise ValueError(msg)
        if newRate <= 0 or type(newRate) != int:
            msg = "%s: Re-sample bad new rate = %s" % (self.loggingId, repr(newRate))
            logging.error(msg)
            raise ValueError(msg)

        # set-up:
        if self.rate >= newRate:
            ratio = float(self.rate) / newRate
            info = "-ds%i" % ratio
        else:
            ratio = float(newRate) / self.rate
            info = "-us%i" % ratio
        if ratio != int(ratio):
            logging.warn("%s: old rate is not an integer factor of new rate" % self.loggingId)
        ratio = int(ratio)
        newFile = info.join(os.path.splitext(self.savedFile))

        # use pyo's downsamp or upsamp based on relative rates:
        if not ratio:
            logging.warn("%s: Re-sample by %sx is undefined, skipping" % (self.loggingId, str(ratio)))
        elif self.rate >= newRate:
            t0 = core.getTime()
            downsamp(self.savedFile, newFile, ratio)  # default 128-sample anti-aliasing
            if log and self.autoLog:
                logging.exp(
                    "%s: Down-sampled %sx in %.3fs to %s" % (self.loggingId, str(ratio), core.getTime() - t0, newFile)
                )
        else:
            t0 = core.getTime()
            upsamp(self.savedFile, newFile, ratio)  # default 128-sample anti-aliasing
            if log and self.autoLog:
                logging.exp(
                    "%s: Up-sampled %sx in %.3fs to %s" % (self.loggingId, str(ratio), core.getTime() - t0, newFile)
                )

        # clean-up:
        if not keep:
            os.unlink(self.savedFile)
            self.savedFile = newFile
            self.rate = newRate

        return os.path.abspath(newFile)
开发者ID:rpbaxter,项目名称:psychopy,代码行数:57,代码来源:microphone.py

示例15: loadMovie

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary


        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        filename = pathToString(filename)
        self._unload()
        self._reset()
        if self._no_audio is False:
            self._createAudioStream()

        # Create Video Stream stuff
        self._video_stream.open(filename)
        vfstime = core.getTime()
        opened = self._video_stream.isOpened()
        if not opened and core.getTime() - vfstime < 1:
            raise RuntimeError("Error when reading image file")

        if not opened:
            raise RuntimeError("Error when reading image file")

        self._total_frame_count = self._video_stream.get(
            cv2.CAP_PROP_FRAME_COUNT)
        self._video_width = int(self._video_stream.get(
            cv2.CAP_PROP_FRAME_WIDTH))
        self._video_height = int(self._video_stream.get(
            cv2.CAP_PROP_FRAME_HEIGHT))
        self._format = self._video_stream.get(
            cv2.CAP_PROP_FORMAT)
        # TODO: Read depth from video source
        self._video_frame_depth = 3

        cv_fps = self._video_stream.get(cv2.CAP_PROP_FPS)

        self._video_frame_rate = cv_fps

        self._inter_frame_interval = 1.0/self._video_frame_rate

        # Create a numpy array that can hold one video frame, as returned by
        # cv2.
        self._numpy_frame = numpy.zeros((self._video_height,
                                         self._video_width,
                                         self._video_frame_depth),
                                        dtype=numpy.uint8)
        self.duration = self._total_frame_count * self._inter_frame_interval
        self.status = NOT_STARTED

        self.filename = filename
        logAttrib(self, log, 'movie', filename)
开发者ID:dgfitch,项目名称:psychopy,代码行数:56,代码来源:movie2.py


注:本文中的psychopy.core.getTime函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。