当前位置: 首页>>代码示例>>Python>>正文


Python string.find函数代码示例

本文整理汇总了Python中string.find函数的典型用法代码示例。如果您正苦于以下问题:Python find函数的具体用法?Python find怎么用?Python find使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了find函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: countSubStringMatch

def countSubStringMatch(target, key):
    count = 0
    initialIndex = 0
    while string.find(target, key, initialIndex) != -1:
        count += 1
        initialIndex = string.find(target, key, initialIndex) + len(key) - 1
    print count
开发者ID:vominhhoang308,项目名称:python,代码行数:7,代码来源:ps3.py

示例2: _test_changing_upstream_list

    def _test_changing_upstream_list(self):
        bus.queryenv_service = qe
        config = bus.config
        sect_name = nginx.CNF_SECTION
        nginx_incl = "/etc/nginx/app-servers.include"
        config.set(sect_name, "app_include_path",nginx_incl)

        custom_include = 'upstream backend {\n\n        server  8.8.8.8:80\tweight=5;\n\n       server  7.7.7.7:80\tdebug;\n}'
        print custom_include
        with open(nginx_incl, 'w') as fp:
            fp.write(custom_include)

        n = nginx.NginxHandler()
        n._reload_upstream()
        n._reload_upstream()

        new_incl = None
        with open(nginx_incl, 'r') as fp:
            new_incl = fp.read()
        print new_incl

        #queryenv has only 8.8.8.8 in list_roles, so 7.7.7.7 supposed not to exist
        self.assertRaises(ValueError, string.index,*(new_incl, '7.7.7.7;'))
        #ip_hash wasn`t in original file, so after reconfigure it supposed not to exist either
        self.assertRaises(ValueError, string.index,*(new_incl, 'ip_hash;'))
        #8.8.8.8 had 'weight' option, so it not supposed to be vanished
        self.assertNotEquals(string.find(new_incl, 'weight=5;'), -1)
        #check that there is only one include
        include_str = 'include  /etc/nginx/proxies.include;'
        self.assertNotEquals(string.find(new_incl, include_str), '-1')
        self.assertEquals(string.find(new_incl, include_str), string.rfind(new_incl, include_str))
开发者ID:AnyBucket,项目名称:scalarizr,代码行数:31,代码来源:test_nginx.py

示例3: getPage

    def getPage(self, url):
        r = Retrive(url)
        retval = r.download()
        if retval[0] == '*':
            print retval, 'sss'
            return
        Crawler.count += 1
        self.seen.append(url)

        links = r.parseAndGetLinks()

        for eachlink in links:
            if eachlink[:4] != 'http' and find(eachlink, '://') == -1:
                eachlink = urljoin(url, eachlink)
            print '* ',eachlink

            if eachlink not in self.seen:
                if find(eachlink, self.dom) == -1:
                    print '  ...discarded,not in domain'
                else:
                    if eachlink not in self.q:
                        self.q.append(eachlink)
                        print ' ...new, added to Q'
                    else:
                        print ' ...discarded,already in Q'
            else:
                print ' ...discarded,process'
开发者ID:refinedKing,项目名称:scripts,代码行数:27,代码来源:crawl.py

示例4: piped_spawn

def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
    # There is no direct way to do that in python. What we do
    # here should work for most cases:
    #   In case stdout (stderr) is not redirected to a file,
    #   we redirect it into a temporary file tmpFileStdout
    #   (tmpFileStderr) and copy the contents of this file
    #   to stdout (stderr) given in the argument
    if not sh:
        sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
        return 127
    else:
        # one temporary file for stdout and stderr
        tmpFileStdout = os.path.normpath(tempfile.mktemp())
        tmpFileStderr = os.path.normpath(tempfile.mktemp())

        # check if output is redirected
        stdoutRedirected = 0
        stderrRedirected = 0
        for arg in args:
            # are there more possibilities to redirect stdout ?
            if string.find(arg, ">", 0, 1) != -1 or string.find(arg, "1>", 0, 2) != -1:
                stdoutRedirected = 1
            # are there more possibilities to redirect stderr ?
            if string.find(arg, "2>", 0, 2) != -1:
                stderrRedirected = 1

        # redirect output of non-redirected streams to our tempfiles
        if stdoutRedirected == 0:
            args.append(">" + str(tmpFileStdout))
        if stderrRedirected == 0:
            args.append("2>" + str(tmpFileStderr))

        # actually do the spawn
        try:
            args = [sh, "/C", escape(string.join(args))]
            ret = os.spawnve(os.P_WAIT, sh, args, env)
        except OSError, e:
            # catch any error
            try:
                ret = exitvalmap[e[0]]
            except KeyError:
                sys.stderr.write("scons: unknown OSError exception code %d - %s: %s\n" % (e[0], cmd, e[1]))
            if stderr != None:
                stderr.write("scons: %s: %s\n" % (cmd, e[1]))
        # copy child output from tempfiles to our streams
        # and do clean up stuff
        if stdout != None and stdoutRedirected == 0:
            try:
                stdout.write(open(tmpFileStdout, "r").read())
                os.remove(tmpFileStdout)
            except (IOError, OSError):
                pass

        if stderr != None and stderrRedirected == 0:
            try:
                stderr.write(open(tmpFileStderr, "r").read())
                os.remove(tmpFileStderr)
            except (IOError, OSError):
                pass
        return ret
开发者ID:BackupTheBerlios,项目名称:sconsexe-svn,代码行数:60,代码来源:win32.py

示例5: processFlexResLinesV4

 def processFlexResLinesV4(self, lines):
     #print "in processFlexResLinesV4: len(self.ligLines=)", len(self.ligLines)
     if self.version!=4.0:
         print "not version 4.0! RETURNING!!"
         return
     ligLINES = []
     foundRun = 0
     ind = 21
     for l in lines:
         #in clustering dlg, multiple copies of input-pdbq are present
         if find(l, 'Run')>-1 and foundRun:
             break
         elif find(l, 'Run')>-1:
             foundRun = 1
         elif find(l, '^_____________________')>-1:
             #last line is ________________-
             break
         else:
             ligLINES.append(l[ind:-1])
     #check here to remove lines of just spaces
     nl = []
     for l in ligLINES:
         if len(strip(l)):
             nl.append(l)
     self.flex_res_lines = nl
     #print "end pFRLV4: len(self.flex_res_lines)=", len(nl)
     #print "end processFlexResLinesV4: len(self.ligLines=)", len(self.ligLines)
     self.hasFlexRes = True
     self.flex_res_count = nl.count("REMARK  status: ('A' for Active; 'I' for Inactive)")
开发者ID:marekolsak,项目名称:fastgrid,代码行数:29,代码来源:DlgParser.py

示例6: _load_resource_from_path

def _load_resource_from_path(app_root, service_name, conf, event_no):

    queue_name = conf['SQS_QUEUE']
    mock_root = app_root + '/../mock' 
    std_root = mock_root
    if 'MOCK_ROOT' in conf and conf['MOCK_ROOT'] is not None:
        mock_root = conf['MOCK_ROOT']
    root = mock_root
    fname = 'event'
    fpath = '/' + service_name + '/' + queue_name + '/' + fname + '.' + str(event_no)

    try:
        file_path = convert_to_platform_safe(root + fpath)
        logger.info('mock file: ' + file_path)
        handle = open(file_path)
    except IOError:
        if std_root is not mock_root:
            try:
                file_path = convert_to_platform_safe(std_root + fpath)
                logger.info('mock file: ' + file_path)
                handle = open(file_path)
            except IOError:
                return

    data = handle.read()
    cut = string.find(data,'MOCKDATA-MOCKDATA-MOCKDATA')
    if cut>=0:
        data = data[string.find(data, '\n', cut)+1:]
    response = json.loads(data)
    return response
开发者ID:mattjmuw,项目名称:iam-messaging,代码行数:30,代码来源:mock.py

示例7: check_config_h

def check_config_h():
    """Check if the current Python installation (specifically, pyconfig.h)
    appears amenable to building extensions with GCC.  Returns a tuple
    (status, details), where 'status' is one of the following constants:
      CONFIG_H_OK
        all is well, go ahead and compile
      CONFIG_H_NOTOK
        doesn't look good
      CONFIG_H_UNCERTAIN
        not sure -- unable to read pyconfig.h
    'details' is a human-readable string explaining the situation.
    
    Note there are two ways to conclude "OK": either 'sys.version' contains
    the string "GCC" (implying that this Python was built with GCC), or the
    installed "pyconfig.h" contains the string "__GNUC__".
    """
    from distutils import sysconfig
    import string
    if string.find(sys.version, 'GCC') >= 0:
        return (CONFIG_H_OK, "sys.version mentions 'GCC'")
    fn = sysconfig.get_config_h_filename()
    try:
        f = open(fn)
        try:
            s = f.read()
        finally:
            f.close()

    except IOError as exc:
        return (CONFIG_H_UNCERTAIN, "couldn't read '%s': %s" % (fn, exc.strerror))

    if string.find(s, '__GNUC__') >= 0:
        return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
    else:
        return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
开发者ID:webiumsk,项目名称:WOT-0.9.14-CT,代码行数:35,代码来源:emxccompiler.py

示例8: strToEventList

def strToEventList( str_in ):
    events_idx = string.find(str_in, 'Events')  # -1 is not found
    start_idx = string.find(str_in, '[', events_idx)  # -1 is not found

    psn = start_idx+1
    square_count = 1
    curly_count = 0
    events_str_array = []
    while square_count > 0 and psn <= len(str_in):
        cur = str_in[psn]

        #if args.verbose:
        #    print (psn, cur, square_count, curly_count, len(events_str_array))

        if cur == '[':
            square_count += 1
        elif cur == ']':
            square_count -= 1
        elif cur == '{':
            if curly_count == 0:
                begin_psn = psn
            curly_count += 1
        elif cur == '}':
            if curly_count == 1:
                events_str_array.append( str_in[begin_psn:psn+1] )
            curly_count -= 1
        psn += 1

    return events_str_array
开发者ID:rbaker-idmod,项目名称:EMOD,代码行数:29,代码来源:regression_utils.py

示例9: walking

def walking(skip, dirname, names):
  print
  if dirname in skip:
    print 'skipping', dirname
  else:
    print 'working in', dirname
    for name in names:
      if dirname!=os.curdir:
          filename = os.path.join(dirname, name)
      else:
          filename = name
      if os.path.isfile(filename)==1:
        if string.find(filename, ".htm")<>-1 \
               or string.find(filename, ".shtm")<>-1:
          print 'file:', filename, '  ----  ',
          # fix and validate xhtml
          print 'Tidy,'
          os.system('tidy -q -m ' + filename)
          # to be added: linbot link check
          # to be added: bobby accessibility check
        elif string.find(filename, ".css")<>-1:
          #w3c css validator
          classpath = ' E:\\lib\\validator.zip org.w3c.css.css.StyleSheetCom '
          os.system('java -classpath' + classpath + filename)
        else:
          print 'file:', filename, '  ----  ',
          print 'no processing'
开发者ID:AAthresh,项目名称:quantlib,代码行数:27,代码来源:webclean.py

示例10: find_tv_show_season

def find_tv_show_season(content, tvshow, season):
    url_found = None
    possible_matches = []
    all_tvshows = []

    h = HTMLParser.HTMLParser()
    for matches in re.finditer(movie_season_pattern, content, re.IGNORECASE | re.DOTALL):
        found_title = matches.group('title')
        found_title = h.unescape(found_title)

        log(__name__, "Found tv show season on search page: %s" % found_title)
        s = difflib.SequenceMatcher(None, string.lower(found_title + ' ' + matches.group('year')), string.lower(tvshow))
        all_tvshows.append(matches.groups() + (s.ratio() * int(matches.group('numsubtitles')),))
        if string.find(string.lower(found_title), string.lower(tvshow) + " ") > -1:
            if string.find(string.lower(found_title), string.lower(season)) > -1:
                log(__name__, "Matching tv show season found on search page: %s" % found_title)
                possible_matches.append(matches.groups())

    if len(possible_matches) > 0:
        possible_matches = sorted(possible_matches, key=lambda x: -int(x[3]))
        url_found = possible_matches[0][0]
        log(__name__, "Selecting matching tv show with most subtitles: %s (%s)" % (
            possible_matches[0][1], possible_matches[0][3]))
    else:
        if len(all_tvshows) > 0:
            all_tvshows = sorted(all_tvshows, key=lambda x: -int(x[4]))
            url_found = all_tvshows[0][0]
            log(__name__, "Selecting tv show with highest fuzzy string score: %s (score: %s subtitles: %s)" % (
                all_tvshows[0][1], all_tvshows[0][4], all_tvshows[0][3]))

    return url_found
开发者ID:matrixn,项目名称:service.subtitles.subscene,代码行数:31,代码来源:service.py

示例11: shortcreateurls

def shortcreateurls(input):
	curloc = 0
	while curloc <> -1:
		curloc = string.find(input,"http://",curloc)
		if -1 <> curloc:
			maxend = string.find(input," ",curloc)
			if maxend == -1:
				maxend = len(input)
			length = maxend-curloc
			a = input[curloc+length-1]
			while a == '.' or a == ']' or a == ')' or a == ',' or a == ';':
				length -= 1
				a = input[curloc+length-1]
			firstslash = curloc+7
			while firstslash<len(input) and input[firstslash] <> "/" and firstslash<curloc+length:
				firstslash += 1
			output = '('+input[curloc+7:firstslash]+") "+'<a class="saxgray" href="'
			output = output + input[curloc:curloc+length] + '" target="_blank">[Link]</a>'
			print(output)
			newlen = len(output)
			if curloc > 0:
				output = input[0:curloc-1] + output
			if curloc + length < len(input):
				output = output + input[curloc+length:-1]
			input = output
			curloc = curloc+newlen
	return shorturls(input,0)
开发者ID:ElectronicsGeek,项目名称:gCn,代码行数:27,代码来源:gcnhub.py

示例12: getspec

def getspec( infile, region='relpix,box(-2,-2,0,0)', vsource=5., hann=5, tmpfile="junk" ):
    '''dump out spectrum of selected region with imspec, return [chan, freqLSR, flux] arrays'''

  # step 1: use imlist to retrieve velocity and freq information from the header
    p= subprocess.Popen( ( shlex.split('imlist in=%s' % infile) ), \
        stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.STDOUT)
    result = p.communicate()[0]
    lines = result.split("\n")
    for line in lines :
      if len(line) > 1 :
        a = line.split()
        n = string.find( line, "restfreq:" )
        if n >= 0 :
          restfreq = float( line[n+9:].split()[0] )
        n = string.find( line, "crval3  :" )
        if n >= 0 :
          v1 = float( line[n+9:].split()[0] )
        n = string.find( line, "cdelt3  :" )
        if n >= 0 :
          dv = float( line[n+9:].split()[0] )
    print "restfreq = %.5f GHz; v1 = %.3f km/sec; dv = %.3f km/sec" % (restfreq,v1,dv)        

  # step 2: use imspec to dump out the spectrum for the selected region to tmpfile
    chan = []
    freq = []
    flux = []
    p= subprocess.Popen( ( shlex.split("imspec in=%s region=%s options=list,eformat,noheader,hanning,%d log=%s" % \
      (infile,region,hann,tmpfile) )), stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.STDOUT)
    time.sleep(1)
    result = p.communicate()[0]
    print result
    if "Fatal Error" in result :
      print " --- fatal --- "
      return

  # step 3: read velocities and flux densities from tmpfile, create arrays
    fin = open( tmpfile, "r" )
    for line in fin :
      a = line.split()
      if len(a) > 2 :
        chan.append( int(a[0]) )
        nchan = int( a[0] )
        vlsr = float( a[1] )
        flux.append( float( a[2] ) )
        vlsrcalc = v1 + (nchan - 1) * dv
        if abs(vlsrcalc-vlsr) > 0.05 :
          print "WARNING: channel %d listed vlsr = %.2f, calculated = %.2f" % (nchan,vlsr,vlsrcalc)
        fqLSR = restfreq * (1. - vlsrcalc/clight) 
        freq.append( fqLSR/(1.-vsource/clight) )
        #print nchan, vlsrcalc, fqLSR, freq[-1]
          # freq in rest frame of source
    fin.close()
    print "read in %d lines" % len(freq)

  # step 4: sort in frequency order, return arrays
    spectrum = numpy.array(sorted(zip(freq,chan,flux)))
      # this sorts the chan,freq,flux triplets in freq order
    a,b,c = numpy.split( spectrum, 3, axis=1 )
      # this returns separate freq and flux arrays
    return numpy.reshape(b,len(a)), numpy.reshape(a,len(b)), numpy.reshape(c,len(c))
开发者ID:richardplambeck,项目名称:tadpol,代码行数:60,代码来源:ori2.py

示例13: query_info_Request_Structure

def query_info_Request_Structure(Query_info, fileId, *query_info_params):
        infotypefound = 0
        fileinfoclassfound = 0
        cnt = 0
        while ( cnt < len(query_info_params)):
            tmpstr = query_info_params[cnt]
            tmpstr = tmpstr.strip()
            tmpstr = tmpstr.lower()
            if( string.find(tmpstr, 'infotype') != -1):
                tmpstr = (tmpstr[(string.find(tmpstr,'=')+1):]).strip()
                infotype = eval(tmpstr)
                infotypefound = 1
            elif( string.find(tmpstr, 'fileinfoclass') !=-1):
                tmpstr = (tmpstr[(string.find(tmpstr,'=')+1):]).strip()
                fileinfoclass = eval(tmpstr)
                fileinfoclassfound = 1
                #print fileinfoclass
            cnt += 1

        if (infotypefound == 0):
            infotype = 0x0
        if (fileinfoclassfound == 0):
            fileinfoclass = 0x0

        Query_info1 = query_info_Extend_Request_Structure(Query_info, fileId, infotype, fileinfoclass)
        return Query_info1
开发者ID:dvenrao,项目名称:SMB,代码行数:26,代码来源:query_info.py

示例14: subStringMatchExact

def subStringMatchExact(target,key):
    startingList = []
    initialIndex = 0
    while string.find(target,key,initialIndex) != -1:
        startingList.append(string.find(target,key,initialIndex))
        initialIndex = string.find(target, key, initialIndex) + len(key) - 1
    print tuple(startingList)
开发者ID:vominhhoang308,项目名称:python,代码行数:7,代码来源:ps3.py

示例15: getPage

    def getPage(self, url):
        r = Retriever(url)
        retval = r.download()
        if retval[0] == '*':     # error situation, do not parse
            print retval, '... skipping parse'
            return
        Crawler.count = Crawler.count + 1
        print '\n(', Crawler.count, ')'
        print 'URL:', url
        print 'FILE:', retval[0]
        self.seen.append(url)

        links = r.parseAndGetLinks()  # get and process links
        for eachLink in links:
            if eachLink[:4] != 'http' and \
                    find(eachLink, '://') == -1:
                eachLink = urljoin(url, eachLink)
            print '* ', eachLink,

            if find(lower(eachLink), 'mailto:') != -1:
                print '... discarded, mailto link'
                continue

            if eachLink not in self.seen:
                if find(eachLink, self.dom) == -1:
                    print '... discarded, not in domain'
                else:
                    if eachLink not in self.q:
                        self.q.append(eachLink)
                        print '... new, added to Q'
                    else:
                        print '... discarded, already in Q'
            else:
                    print '... discarded, already processed'
开发者ID:fjrti,项目名称:snippets,代码行数:34,代码来源:crawl.py


注:本文中的string.find函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。