当前位置: 首页>>代码示例>>Python>>正文


Python time.replace函数代码示例

本文整理汇总了Python中time.replace函数的典型用法代码示例。如果您正苦于以下问题:Python replace函数的具体用法?Python replace怎么用?Python replace使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了replace函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ProcessDailyFX

def ProcessDailyFX ():
   global counter
   dailyfx_start_url = 'http://www.dailyfx.com/forex_market_news/'
   dailyfx_base_url = 'http://www.dailyfx.com'
   dailyfx_write_path = '/home/ubuntu/news_scrape/dailyfx_data/'
   html = GetURLText(dailyfx_start_url)
   soup = BeautifulSoup (html, 'html5lib')

   divs = soup.findAll('div', attrs={'class':'secondary-box-content'})

   for a_div in divs:
      for a_link in a_div.findAll('a'):
	 a_url = a_link['href']
	 new_html = GetURLText (dailyfx_base_url + a_url)
	 time = strftime("%Y-%m-%d %H:%M:%S")
	 time = time.replace(' ', '_').replace(':','-')
	 f = open (dailyfx_write_path + str(counter) + '_' + time + '.html', 'w')
	 f.write(new_html.encode('utf-8'))
	 f.close()
	 counter += 1
         break

   divs = soup.findAll('div', attrs={'class':'main-article-non-home'})

   for a_div in divs:
      for a_link in a_div.findAll('a'):
	 a_url = a_link['href']
	 new_html = GetURLText (dailyfx_base_url + a_url)
	 time = strftime("%Y-%m-%d %H:%M:%S")
	 time = time.replace(' ', '_').replace(':','-')
	 f = open (dailyfx_write_path + str(counter) + '_' + time + '.html', 'w')
	 f.write(new_html.encode('utf-8'))
	 f.close()
	 counter += 1
         break
开发者ID:ajc289,项目名称:ib,代码行数:35,代码来源:crawl.py

示例2: getUpdate

def getUpdate(i):
    global bugidlist
    number = i
    cj = cookielib.MozillaCookieJar()
    cj.load(os.path.join(os.path.expanduser(""),"cookies.txt"))
    url1 = "https://bugs.launchpad.net/bugs/+bugs?field.searchtext=&search=Search&field.status%3Alist=NEW&field.status%3Alist=OPINION&field.status%3Alist=INVALID&field.status%3Alist=WONTFIX&field.status%3Alist=EXPIRED&field.status%3Alist=CONFIRMED&field.status%3Alist=TRIAGED&field.status%3Alist=INPROGRESS&field.status%3Alist=INCOMPLETE_WITH_RESPONSE&field.status%3Alist=INCOMPLETE_WITHOUT_RESPONSE&assignee_option=any&field.assignee=&field.bug_reporter=&field.bug_commenter=&field.subscriber=&field.tag=&field.tags_combinator=ANY&field.status_upstream-empty-marker=1&field.has_cve.used=&field.omit_dupes.used=&field.omit_dupes=on&field.affects_me.used=&field.has_patch.used=&field.has_branches.used=&field.has_branches=on&field.has_no_branches.used=&field.has_no_branches=on&field.has_blueprints.used=&field.has_blueprints=on&field.has_no_blueprints.used=&field.has_no_blueprints=on&orderby=-date_last_updated&start="
    url2 = "%d"%(number)
    url = url1+url2
    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    r = opener.open(url).read()
    data = r.decode("utf-8")
    soup = BeautifulSoup(data)
    try:
        tab = []
        tab = soup.findAll("div", {"class": "buglisting-row"})
    except:
        pass
    try:
        for x in range(0,len(tab)):
            table = tab[x]
            bugid = table.find("span",{"class":"bugnumber"}).renderContents().decode("utf-8").strip()
            time = table.find("span",{"class":"sprite milestone field"}).renderContents().decode("utf-8").strip()
            bugid = bugid.replace("#", "")
            bugidlist.append(bugid)
            time = time.replace("Last updated", "")
            time = time.replace("ago", "")
            time = time.replace("on", "")
            time = time.replace("-", "")
            out_file = open("bugidlist.txt", "a")
            term = bugid + " "
            out_file.write(term)
            out_file.close()
            lastedtime[0] = time
    except:
        pass
开发者ID:leexung,项目名称:web-crawler,代码行数:35,代码来源:launchpad.py

示例3: init

 def init(self, thread=None, log_name=None):
   self.log_name = log_name
   if not thread == None:
     self.name = self.name+"_thread:"+str(thread)
   if self.log == None:
     if thread == None:
       # Creating a log file
       self.log_name = caller = self.name
       self.file_path = settings.log_file_path
       time = datetime.now()
       time = str(time)
       time = time.replace(' ','')
       time = time.replace(':','-')
       self.file_name = "%s_%s_%s_%s.log" % (caller, caller, caller, time)
       self.file = self.file_path+self.file_name
       f = open(self.file, "w")
       f.close()
       logging.basicConfig(filename=self.file, level=logging.DEBUG,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     log = logging.getLogger(self.log_name)
     log.setLevel(logging.INFO)
     # create console handler and set level to debug
     ch = logging.StreamHandler()
     ch.setLevel(logging.INFO)
     # create formatter
     formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     # add formatter to ch
     ch.setFormatter(formatter)
     # add ch to logger
     log.addHandler(ch)
     self.log=log
   self.log.info("%s initiated" % (self.name))
开发者ID:lundstrj,项目名称:TTS,代码行数:31,代码来源:testcase.py

示例4: link_parse

 def link_parse(self,html,base):
     if not html or not base: return urls
     soup = BeautifulSoup(html)
     for li in soup.findAll('li'):
         try:
             li.contents[0].contents[0]
         except:
             continue
         title = li.contents[0].contents[0]
         #title = self.get_safe_utf8(title)
         href = li.contents[0]["href"]
         time = li.contents[1].strip()
         time = time.replace(u')',"")
         time = time.replace(u'(',"")
         #title = self.cleanHtmlTag(self.get_safe_utf8(title))
         if not href:continue
         if href in self.suburl.keys():continue
         href = self.normal_url(self.get_safe_utf8(urljoin(base, self.get_safe_utf8(href))))
         #self.suburl[href] = (title,time)
         if time == self.today:
             self.suburl[href] = (title,time)
         #print title 
         #print href
         #print time 
     return True
开发者ID:earthwu,项目名称:spider,代码行数:25,代码来源:spider.py

示例5: ProcessCNBC

def ProcessCNBC ():
   global counter
   cnbc_start_url = 'http://www.cnbc.com/id/15839121/'
   cnbc_base_url = 'http://www.cnbc.com'
   cnbc_write_path = '/home/ubuntu/news_scrape/cnbc_data/'
   html = GetURLText(cnbc_start_url)
   soup = BeautifulSoup (html, 'html5lib')

   divs = soup.findAll('div', attrs={'class':'asset cnbcnewsstory big'})

   for a_div in divs:
      for a_link in a_div.findAll('a'):
	 a_url = a_link['href']
	 new_html = GetURLText (cnbc_base_url + a_url)
	 time = strftime("%Y-%m-%d %H:%M:%S")
	 time = time.replace(' ', '_').replace(':','-')
	 f = open (cnbc_write_path + str(counter) + '_' + time + '.html', 'w')
	 f.write(new_html.encode('utf-8'))
	 f.close()
	 counter += 1

   divs = soup.findAll('div', attrs={'class':'asset cnbcnewsstory'})

   for a_div in divs:
      for a_link in a_div.findAll('a'):
	 a_url = a_link['href']
	 new_html = GetURLText (cnbc_base_url + a_url)
	 time = strftime("%Y-%m-%d %H:%M:%S")
	 time = time.replace(' ', '_').replace(':','-')
	 f = open (cnbc_write_path + str(counter) + '_' + time + '.html', 'w')
	 f.write(new_html.encode('utf-8'))
	 f.close()
	 counter += 1
开发者ID:ajc289,项目名称:ib,代码行数:33,代码来源:crawl.py

示例6: currentTime

def currentTime():
	time = str(datetime.utcnow())
	time = time.replace(' ','T')
	time = time.replace('-','')
	time = time.replace(':','')
	time = time.split('.')[0]
	time = time + 'Z'
	return time
开发者ID:dqwiki,项目名称:DeltaQuadBot,代码行数:8,代码来源:globalfunc.py

示例7: get_date

def get_date(time):
    """
    Get tweeting date of the tweet.
    """
    threshold_time = (parse(setting['start_time']) - datetime.timedelta(minutes=30)).time()
    if time.time() < threshold_time:
        return time.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None) - datetime.timedelta(days=1)
    else:
        return time.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=None)
开发者ID:sakuramochi0,项目名称:1draw,代码行数:9,代码来源:onedraw.py

示例8: reminder_time

def reminder_time(input):
	global next_func
	next_func = None
	time = datetime.datetime.today()
	words, has_nums = parse_to_nums(input.lower().split())
	day_identifier = [i for i in words if i in weekdays]
	if "after tomorrow" in input.lower():
		time = time+datetime.timedelta(days=2)
	elif 'tomorrow' in words:
		time = time+datetime.timedelta(days=1)
	elif day_identifier:
		day = weekdays.index(day_identifier[0])
		print day
		time = time+datetime.timedelta(days=(day-time.weekday())%7)
	
	hour = [i for i in words if i in times]
	if hour:
		time = time.replace(hour=times[hour[0]],minute=0,second=0,microsecond=0)
	hour = False
	minute = 0
	pmam = False
	bell = False
	for i in words:
		if hasattr(i, '__int__'):
			if type(hour)==bool and not hour:
				hour = i
			elif not minute:
				minute = i
		if i in bells:
			bell = True
			minute = bells[i]
		elif bell:
			if i in ['as', 'past', 'after']:
				pass
			elif i in ['to', 'told', 'til', 'till', 'of']:
					minute = -minute
			bell = False
		elif pmam:
			if i=='m.':
				if pmam=='pm':
					hour = hour%12+12
				elif pmam=='am':
					hour = hour%12
			pmam = False
		elif i in ['p.', 'a.']:
			pmam = {'p.':'pm','a.':'am'}[i]
	if minute<0:
		hour=(hour-1)%24
		minute = 60+minute
	if type(hour)==bool:
		hour = time.hour
	tf = tempfile.NamedTemporaryFile()
	tf.write('espeak "'+input+'"')
	time = time.replace(hour=hour,minute=minute,second=0,microsecond=0)
	os.system('at -t '+time.strftime('%Y%m%d%H%M')+' -f '+tf.name)

	return str(time), None
开发者ID:nodevel,项目名称:saera,代码行数:57,代码来源:saera_processing.py

示例9: getTime

def getTime(staElem):
    try:
        timeNode = staElem.find_element_by_css_selector('a.S_link2.WB_time')
        time = timeNode.get_attribute("title")
        time = time.replace('-', '')
        time = time.replace(':', '')
        time = time.replace(' ', '')
    except:
        time = "void"
    return time
开发者ID:B-Rich,项目名称:WSpi,代码行数:10,代码来源:PageDriver.py

示例10: extractTime

 def extractTime(self, log_text):
     totals_line = self.extractTotalsLine(log_text)
     if not totals_line:
         return None
     # Total: X tests, X failures, X errors in [X minutes] X.Y seconds.
     time = totals_line.split(' in ')[-1]
     time = time.replace(' minutes ', 'm')
     time = time.replace(' seconds.', 's')
     time = re.sub('[.][0-9]+s', 's', time)
     return time
开发者ID:securactive,项目名称:sact.buildbot.custom,代码行数:10,代码来源:buildout.py

示例11: get_datetime

    def get_datetime(cls, f_timestep, meteocenter, s_timestep, hashtag):
        date = datetime.strptime(f_timestep.attrib.get('value'), '%Y%m%d')
        time = datetime.now()

        if s_timestep.attrib.get('value') == '24:00':
            time = time.replace(hour=00)
            time = time.replace(minute=00)
        else:
            time = datetime.strptime(s_timestep.attrib.get('value'), '%H:%M')

        datestr = '%s-%s-%s %s:%s' % (date.year, date.month, date.day, time.hour, time.minute)
        return datetime.strptime(datestr, meteocenter.date_reg)
开发者ID:BystryakovSimon,项目名称:weather,代码行数:12,代码来源:tiempo.py

示例12: __getIntFromTime

	def __getIntFromTime(self, time):
		pm = False
		if "PM" in time.upper():
			pm = True
		
		if "." in time or ":" in time:
			time = time.replace(":",".")
			time = time[:time.find(".")]
		
		time = int(time.replace("AM", "").replace("PM", "").replace("am", "").replace("pm", ""))
		if pm: time = int(time)+12
		
		return str(time)
开发者ID:alenacci,项目名称:BuildingRules,代码行数:13,代码来源:datetimeTriggerDriver.py

示例13: init

  def init(self, url=None, nbr=None, logger=None):
    # Creating a log file
    if not nbr == None:
      self.name = self.name+"_thread_"+str(nbr)
    if logger == None:
      caller = self.name
      project_name = 'prox'
      build = '0001'
      self.file_path = settings.log_file_path
      time = datetime.now()
      time = str(time)
      time = time.replace(' ','')
      time = time.replace(':','-')
      self.file_name = "%s_%s_%s_%s.log" % (caller, project_name, build, time)
      self.file = self.file_path+self.file_name
      f = open(self.file, "w")
      f.close()
      logging.basicConfig(filename=self.file, level=logging.DEBUG,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    else:
      log = logger    
    log = logging.getLogger(self.name)
    log.setLevel(logging.INFO)

    # create console handler and set level to debug
    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)

    # create formatter
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    # add formatter to ch
    ch.setFormatter(formatter)

    # add ch to logger
    log.addHandler(ch)
    #logg.setFormatter(formatter)

    # 'application' code
    #log.debug('debug message')
    #log.info('info message')
    #log.warn('warn message')
    #log.error('error message')
    #log.critical('critical message')
    #log.info("simple_load_test initiating....")
   
    self.prox = prox_front.ProX()
    self.prox.init(settings.prox_url)
    
    log.info("%s initiated" % (self.name))
    self.log=log
    logg = log
开发者ID:lundstrj,项目名称:TTS,代码行数:51,代码来源:simple_load_test.py

示例14: ProcessBloomberg

def ProcessBloomberg():
   global counter
   bloomberg_start_url = 'http://www.bloomberg.com/news/'
   bloomberg_base_url = 'http://www.bloomberg.com'
   bloomberg_write_path = '/home/ubuntu/news_scrape/bloomberg_data/'

   html = GetURLText(bloomberg_start_url)
   soup = BeautifulSoup (html, 'html5lib')

   divs = soup.findAll('div', attrs={'id':'markets_news'})

   for a_div in divs:
      for a_link in a_div.findAll('a'):
	 a_url = a_link['href']
	 if a_url != '/news/markets/':
	    new_html = GetURLText (bloomberg_base_url + a_url)
	    time = strftime("%Y-%m-%d %H:%M:%S")
	    time = time.replace(' ', '_').replace(':','-')
	    f = open (bloomberg_write_path + str(counter) + '_' + time + '.html', 'w')
	    f.write(new_html.encode('utf-8'))
	    f.close()
	    counter += 1

   divs = soup.findAll('div', attrs={'id':'industries_news'})

   for a_div in divs:
      for a_link in a_div.findAll('a'):
	 a_url = a_link['href']
	 if a_url != '/news/industries/':
	    new_html = GetURLText (bloomberg_base_url + a_url)
	    time = strftime("%Y-%m-%d %H:%M:%S")
	    time = time.replace(' ', '_').replace(':','-')
	    f = open (bloomberg_write_path + str(counter) + '_' + time + '.html', 'w')
	    f.write(new_html.encode('utf-8'))
	    f.close()
	    counter += 1

   divs = soup.findAll('div', attrs={'id':'economy_news'})

   for a_div in divs:
      for a_link in a_div.findAll('a'):
	 a_url = a_link['href']
	 if a_url != '/news/economy/':
	    new_html = GetURLText (bloomberg_base_url + a_url)
	    time = strftime("%Y-%m-%d %H:%M:%S")
	    time = time.replace(' ', '_').replace(':','-')
	    f = open (bloomberg_write_path + str(counter) + '_' + time + '.html', 'w')
	    f.write(new_html.encode('utf-8'))
	    f.close()
	    counter += 1
开发者ID:ajc289,项目名称:ib,代码行数:50,代码来源:crawl.py

示例15: ProcessSeekingAlpha

def ProcessSeekingAlpha ():
   global counter
   seeking_alpha_start_url = 'http://www.seekingalpha.com'
   seeking_alpha_alt_start_url = 'http://seekingalpha.com/analysis/macro-view/all'
   seeking_alpha_base_url = 'http://www.seekingalpha.com'
   seeking_alpha_write_path = '/home/ubuntu/news_scrape/seekingalpha_data/'
   html = GetURLTextAuth(seeking_alpha_start_url)
   soup = BeautifulSoup (html, 'html5lib')

   divs = soup.findAll('div', attrs={'id':'hp_news_unit'})
   for a_div in divs:
      for a_link in a_div.findAll('a'):
	 a_url = a_link['href']
	 new_html = GetURLTextAuth (seeking_alpha_base_url + a_url)
         print (seeking_alpha_base_url + a_url)
	 time = strftime("%Y-%m-%d %H:%M:%S")
	 time = time.replace(' ', '_').replace(':','-')
	 f = open (seeking_alpha_write_path + str(counter) + '_' + time + '.html', 'w')
	 f.write(new_html.encode('utf-8'))
	 f.close()
	 counter += 1

   divs = soup.findAll('div', attrs={'class':'articles'})
   for a_div in divs:
      for a_list in a_div.findAll('li'):
	 for a_link in a_list.findAll('a'):
	    a_url = a_link['href']
	    new_html = GetURLTextAuth (seeking_alpha_base_url + a_url)
	    print (seeking_alpha_base_url + a_url)
	    time = strftime("%Y-%m-%d %H:%M:%S")
	    time = time.replace(' ', '_').replace(':','-')
	    f = open (seeking_alpha_write_path + str(counter) + '_' + time + '.html', 'w')
	    f.write(new_html.encode('utf-8'))
	    f.close()
	    counter += 1
            break
   
   html = GetURLTextAuth(seeking_alpha_alt_start_url)
   soup = BeautifulSoup (html, 'html5lib')

   for a_link in soup.findAll('a', attrs={'class':'article_title'}):
      a_url = a_link['href']
      new_html = GetURLTextAuth (seeking_alpha_base_url + a_url)
      print (seeking_alpha_base_url + a_url)
      time = strftime("%Y-%m-%d %H:%M:%S")
      time = time.replace(' ', '_').replace(':','-')
      f = open (seeking_alpha_write_path + str(counter) + '_' + time + '.html', 'w')
      f.write(new_html.encode('utf-8'))
      f.close()
      counter += 1
开发者ID:ajc289,项目名称:ib,代码行数:50,代码来源:crawl.py


注:本文中的time.replace函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。