当前位置: 首页>>代码示例>>Python>>正文


Python Literal.parseString方法代码示例

本文整理汇总了Python中pyparsing.Literal.parseString方法的典型用法代码示例。如果您正苦于以下问题:Python Literal.parseString方法的具体用法?Python Literal.parseString怎么用?Python Literal.parseString使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyparsing.Literal的用法示例。


在下文中一共展示了Literal.parseString方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_disable_pyparsing_arity_trimming_works

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
def test_disable_pyparsing_arity_trimming_works():
    """Tests that arity trimming has been disabled and parse actions with
    the wrong number of arguments will raise TypeErrors"""
    for func in [lambda a: None, lambda a, b: None, lambda a, b, c, d: None]:
        element = Literal('test').setParseAction(func)
        with raises(TypeError):
            element.parseString('test')
开发者ID:borntyping,项目名称:python-dice,代码行数:9,代码来源:test_utilities.py

示例2: check_unnecessary_include

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
def check_unnecessary_include(self, code):
    grammar = Literal('#') + Literal('include') + Literal('<') + Word(alphanums)
    try:
        grammar.parseString(code)
        begin = code.find("<")
        end = code.find(">")
        included_library = code[begin + 1:end]
        if included_library not in self.includes:
            self.add_error(label="UNNECESSARY_INCLUDE")
    except ParseException:
        return
开发者ID:vianuevm,项目名称:cppStyle,代码行数:13,代码来源:single_line_checks.py

示例3: check_local_include

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
def check_local_include(self, code):
    grammar = Literal('#') + Literal('include') + Literal('"') + Word(alphanums)
    try:
        grammar.parseString(code)
        begin = code.find('"')
        included_file = code[begin + 1:]
        end = included_file.find('"')
        included_file = included_file[:end]
        if included_file not in self.includes:
            self.local_includes[self.current_file].append(included_file)
    except ParseException:
        return
开发者ID:vianuevm,项目名称:cppStyle,代码行数:14,代码来源:single_line_checks.py

示例4: typeSwitch

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
def typeSwitch(line):
    global typedversion

    typeflag = Literal("#") + "option" + Literal("=") + oneOf("untyped", "typed")
    res = typeflag.parseString(line)
    if res[3] == "untyped":
        typedversion = False
    elif res[3] == "typed":
        typeversion = True
    else:
        print "Cannot determine whether typed or untyped."
        raise ParseException

    str = "Detected "
    if not typedversion:
        str += "un"
    str += "typed version."
    print str
开发者ID:schoenb,项目名称:scyther,代码行数:20,代码来源:Ifparser.py

示例5: insertResult

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
# Copyright L.P.Klyne 2013 
# Licenced under 3 clause BSD licence 

from pyparsing import Literal, Empty, replaceWith

def insertResult(v):
    """
    Parser helper function that simply inserts a result in 
    the list of values returned.
    """
    return Empty().setParseAction( replaceWith(v) )

p1 = Literal("1")
p2 = Literal("2")+insertResult("B")     # 'AttributeError: 'NoneType' object has no attribute 'streamline''
p3 = insertResult("B")+Literal("3")   # Blows python stack

r1 = p1.parseString("1")
r2 = p2.parseString("2")
r3 = p3.parseString("3")

print r2

print r3
开发者ID:AndyThirtover,项目名称:wb_gateway,代码行数:25,代码来源:pptest.py

示例6: Purchased

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
    date=datetime.datetime.strptime(site_contents[begin_date:end_date].strip().replace(',',''), '%b %d %Y')
except ValueError:
    date=datetime.datetime.strptime(site_contents[begin_date:end_date].strip().replace(',',''), '%B %d %Y')
date=date-datetime.timedelta(hours=24)
start_index = site_contents.find('Sows Purchased (Live and Carcass Basis)')
labels = [ '300-399', '400-449', '450-499', '500-549', '550/up' ]
x = 0
parsed = []
# Loops through each label in labels and parses its line of data on the website.
# Then it creates a table with the parsed data elements and moves to the next label.
while x < len(labels):
    label_index = site_contents.find(labels[x], start_index) # index of labels[x] on the website
    #grammar for each line of data    
    line_grammar = Literal(labels[x]) + Word(nums+',') + Word(nums) + Word(nums+'.'+'-') + Word(nums+'.')
    line_end = site_contents.find('\r\n', label_index) # index of the end of the line to be parsed
    parsed = line_grammar.parseString(site_contents[label_index:line_end]).asList() # parses line and converts to list
    parsed.append(parsed[4]) # add the weighted average to end of the list because split on next line will overwrite parsed[4]
    [ parsed[3], parsed[4] ] = parsed[3].split('-') # split the price range into low price and high price
    headings = [ 'Date', 'Head Count', 'Avg Wgt', 'Low Price', 'High Price', 'Wtd Avg Price' ]
    data = { 'Date': [date.strftime('%Y-%m-%d')], 'Head Count': [parsed[1]], 'Avg Wgt': [parsed[2]], 'Low Price': [parsed[3]], \
           'High Price': [parsed[4]], 'Wtd Avg Price': [parsed[5]] }
    data_df = pd.DataFrame(data, columns = headings)
    data_df.index = data_df['Date']
    data_df = data_df.drop('Date', 1)
    quandl_code = 'USDA_LM_HG230_' + parsed[0].replace('-', '_').replace('/', '_') + '\r'# build unique quandl code
    reference_text = '  Historical figures from USDA can be verified using the LMR datamart located ' \
    '\n  at http://mpr.datamart.ams.usda.gov.\n' 
    print 'code: ' + quandl_code + '\n'
    print 'name: National Daily Sows Purchased- ' + parsed[0] + ' pounds\n'
    print 'description: National daily direct sow and boar report. This dataset contains '\
    ' head count, average weight, price range, and weighted average for sows in the weight range ' + parsed[0] +\
开发者ID:nataliemoore,项目名称:web_scraping,代码行数:33,代码来源:USDA_LM_HG230_scraper.py

示例7: len

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
try:
    date=datetime.datetime.strptime(site_contents[begin_date:end_date].strip().replace(',',''), '%b %d %Y')
except ValueError:
    date=datetime.datetime.strptime(site_contents[begin_date:end_date].strip().replace(',',''), '%B %d %Y')
date = date - datetime.timedelta(days = 5)
# list of each region in the report
labels = [ 'North East', 'South Atlantic', 'North Central', 'South Central', 'West', 'U.S. total' ]
# Loops through each region and uses pyparsing to find the head and average 
# live weight for the turkeys slaughtered. 
x = 0
while x < len(labels):
    suppress = Suppress(Word(printables))
    line = Literal(labels[x]) + suppress * 4 + Word(nums+',') + Word(nums+'.') # grammar for each line of data following a region
    first = site_contents.find(labels[x]) # index of label
    end = site_contents.find('\r\n', first) # index of end of the line
    line = line.parseString(site_contents[first:end]) # parse line and store in list "line"
    line = [float(y.replace(',','')) for y in line[1:]] # remove commas and convert to floats
    headings = [ 'Date','Actual Turkey Slaughter', 'Turkey Average Weight' ]
    data={ 'Date':[date.strftime('%Y-%m-%d')], 'Actual Turkey Slaughter': [line[0]], 'Turkey Average Weight': [line[1]] }
    data_df = pd.DataFrame(data, columns = headings)
    data_df.index = data_df['Date']
    data_df = data_df.drop('Date', 1)
    name = labels[x].replace(' ','_').replace('.','')
    quandl_code = 'USDA_NW_PY021_' + name.upper() + '\r'
    print 'code: ' + quandl_code
    print 'name: Weekly National Turkey Slaughter- ' + labels[x].title() + '\r'
    reference_text = '  Historical figures from USDA can be verified using the LMR datamart located ' \
    '\n  at http://mpr.datamart.ams.usda.gov.\n' 
    print 'description:  Weekly national turkey slaughter data' \
    '\n  from the USDA NW_PY021 report published by the USDA Agricultural Marketing Service ' \
    '\n  (AMS). This dataset covers the ' + labels[x] + '.\n'\
开发者ID:nataliemoore,项目名称:web_scraping,代码行数:33,代码来源:USDA_NW_PY021_scraper.py

示例8: Manager

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
class Manager(object):
  '''
    A manager to orchestrate the creation and 
    deletion of container clusters
  '''
  def __init__(self, logger):
    self.salt_client = salt.client.LocalClient()
    self.etcd = Etcd(logger)
    self.logger = logger
    # Parse out the username and formation name 
    # from the ETCD directory string
    self.formation_parser = Literal('/formations/') + \
      Word(srange("[0-9a-zA-Z_-]")).setResultsName('username') + Literal('/') + \
      Word(srange("[0-9a-zA-Z_-]")).setResultsName('formation_name')

  def fqdn_to_shortname(self, fqdn):
    if '.' in fqdn:
      return fqdn.split('.')[0]
    else:
      return fqdn

  def check_salt_key_used(self, hostname):
    self.logger.info("Checking if the key for {host} is already used".format(
      host=hostname))
    s = subprocess.Popen('salt-key', shell=True, stdout=PIPE)
    salt_list = s.communicate()[0]

    if hostname in salt_list:
      return True
    else:
      return False

  def check_port_used(self, host, port):
    self.logger.info("Checking if {port} on {host} is open with salt-client".format(
      host=host, port=port))
    results = self.salt_client.cmd(host, 'cmd.run', 
      ['netstat -an | grep %s | grep tcp | grep -i listen' % port], 
      expr_form='list')
    self.logger.debug("Salt return: {lsof}".format(lsof=results[host]))

    if results[host] is not '':
      return True
    else:
      return False

  # TODO
  def check_for_existing_formation(self, formation_name):
    # If the user passed in an existing formation name lets append to it
    pass

  def get_docker_cluster(self):
    # Return a list of docker hosts
    cluster = self.etcd.get_key('docker_cluster')
    if cluster is not None:
      return cluster.split(',')
    else:
      return None

  def get_load_balancer_cluster(self):
    # Return a list of nginx hosts
    cluster = self.etcd.get_key('nginx_cluster')
    if cluster is not None:
      return cluster.split(',')
    else:
      return None

  def order_cluster_by_load(self, cluster_list):
    # Sample salt output
    # {'dlceph01.drwg.local': '0.27 0.16 0.15 1/1200 26234'}

    # define grammar
    point = Literal('.')
    number = Word(nums) 
    floatnumber = Combine( number + point + number)
    float_list = OneOrMore(floatnumber)

    results = self.salt_client.cmd(','.join(cluster_list), 'cmd.run', ['cat /proc/loadavg'], expr_form='list')
    load_list = []
    self.logger.debug("Salt load return: {load}".format(load=results))

    for host in results:
      host_load = results[host]
      match = float_list.parseString(host_load)
      if match:
        one_min = match[0]
        five_min = match[1]
        fifteen_min = match[2]
        self.logger.debug("Adding Load({host}, {one_min}, {five_min}, {fifteen_min}".format(
          host=host, one_min=one_min, five_min=five_min, fifteen_min=fifteen_min))
        load_list.append(Load(host, one_min, five_min, fifteen_min))
      else:
        self.logger.error("Could not parse host load output")

    # Sort the list by fifteen min load
    load_list = sorted(load_list, key=lambda x: x.fifteen_min_load)
    for load in load_list:
      self.logger.debug("Sorted load list: " + str(load))

    return load_list

#.........这里部分代码省略.........
开发者ID:Mondego,项目名称:pyreco,代码行数:103,代码来源:allPythonContent.py

示例9: start_verifying

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
  def start_verifying(self):
    # Parse out the username and formation name 
    # from the ETCD directory string
    formation_parser = Literal('/formations/') + \
      Word(srange("[0-9a-zA-Z_-]")).setResultsName('username') + Literal('/') + \
      Word(srange("[0-9a-zA-Z_-]")).setResultsName('formation_name')

    # call out to ETCD and load all the formations
    formation_list = []

    user_list = self.etcd.list_directory('formations')
    if user_list:
      for user in user_list:
        formations = self.etcd.list_directory(user)
        for formation in formations:
          parse_results = formation_parser.parseString(formation)
          if parse_results:
            formation_name = parse_results['formation_name']
            username = parse_results['username']

            self.logger.info('Attempting to load formation: {formation_name} '
              'with username: {username}'.format(formation_name=formation_name,
                username=username))
            f = self.manager.load_formation_from_etcd(username, formation_name)
            formation_list.append(f)
          else:
            self.logger.error("Could not parse the ETCD string")

      if formation_list:
        # TODO Use background salt jobs
        # Start verifying things
        # Ask salt to do these things for me and give me back an job_id
        # results = self.salt_client.cmd_async(host, 'cmd.run', 
        #   ['netstat -an | grep %s | grep tcp | grep -i listen' % port], 
        #   expr_form='list')
        # 
        # salt-run jobs.lookup_jid <job id number>
        for f in formation_list:
          for app in f.application_list:
            # Check to make sure it's up and running
            self.logger.info("Running verification on app: "
              "{app_name}".format(app_name=app.hostname))
            self.logger.info('{server} docker ps | grep {container_id}'.format(
              server=app.host_server, 
              container_id=app.container_id))
            results = self.salt_client.cmd(app.host_server, 'cmd.run', 
              ['docker ps | grep {container_id}'.format(container_id=app.container_id)], 
              expr_form='list')
            if results:
              self.logger.debug("Salt return: {docker_results}".format(
                docker_results=results[app.host_server]))
              if results[app.host_server] == "":
                self.logger.error("App {app} is not running!".format(
                  app=app.hostname))
                # Start the app back up and run start.sh on there
                self.start_application(app)
              else:
                self.logger.info("App {app} is running.  Checking if "
                  "cron is running also".format(app=app.hostname))
                # Check if cron is running on the container and bring it back 
                # up if needed
                # Log in with ssh and check if cron is up and running
                self.logger.info("Sleeping 2 seconds while the container starts")
                time.sleep(2)
                self.check_running_application(app)
            else:
              self.logger.error("Call out to server {server} failed. Moving it".format(
                server=app.host_server))
              # move the container
              self.move_application(app)
开发者ID:Mondego,项目名称:pyreco,代码行数:72,代码来源:allPythonContent.py

示例10: report

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
        'Nebraska', 'Oklahoma', 'South Dakota', 'Texas', 'Washington', 'Other States' ]
 # List of the sections of data that need to be found
 name_labels = [ 'Cattle on Feed', 'Cattle Placed on Feed', 'Marketed', 'Other Disappearance' ]
 end = site_contents.find('Number of Cattle on Feed on 1,000+ Capacity Feedlots by Month') # set to point at beginning of report (will be changed each iteration of following loop)
 new_date = date - relativedelta(months = 1) # subtract one month because data is for previous month that report is published
 n = 0
 while n < len(name_labels):
     end = site_contents.find(name_labels[n], end) # store where name_label occurs (always after previous name_label)
     x = 0
     while x < len(state_labels):
         start = site_contents.find(state_labels[x], end) # find where the state name occurs 
         end = site_contents.find('\r\n', start) # end is changed to end of line
         # This is the grammar for each line of data. It starts with the name of the state and is followed by a varying number
         # of periods. Then five numbers of data follow the colon.
         line_grammar = Literal(state_labels[x]) + Suppress(ZeroOrMore(Literal('.'))) + Suppress(Literal(':')) + Word(nums+',') * 5
         parsed = line_grammar.parseString(site_contents[start:end])[3] # parse the line and only keep the fourth element because it contains most recent data
         headings = ['Date', 'Thousand Head'] 
         # The 'Cattle on Feed' data corresponds to the current month so 1 month is added to the date and
         # the year, month, and day are converted to strings  
         if n == 0:
             month = str((new_date + relativedelta(months = 1)).month)
             day = (new_date + relativedelta(months = 1)).day
             year = (new_date + relativedelta(months = 1)).year
         else:
             year = str(new_date.year)
             month = str(new_date.month)
             day = str(new_date.day)
         if len(month) == 1:
             month = '0' + month # prepend 0 to month if it is one digit
         data = {'Date': [str(year) + str(month) + str(day)], 'Thousand Head': [parsed]}
         data_df = pd.DataFrame(data, columns = headings)
开发者ID:nataliemoore,项目名称:web_scraping,代码行数:33,代码来源:CATTLE_ON_FEED_HIST_SCRAPER.py

示例11: QUARTERS

# 需要导入模块: from pyparsing import Literal [as 别名]
# 或者: from pyparsing.Literal import parseString [as 别名]
# names of each cut in the report
labels = ['BREAST - B/S', 'TENDERLOINS', 'BREAST - WITH RIBS', 'BREAST - LINE RUN', 'LEGS', 'LEG QUARTERS (BULK)',\
        'DRUMSTICKS', 'THIGHS', 'B/S THIGHS', 'WINGS (WHOLE)', 'BACKS AND NECKS (STRIPPED)', 'LIVERS (5 POUND TUBS)',\
        'GIZZARDS (HEARTS)']

ending_index = 0 # initializes ending_index to 0 to be used in following loop       
# Loops through each cut in labels and uses pyparsing to find the weighted average
# and volume for that cut. The data and data are formatted into a table and the 
# relevant quandl data is printed.
x = 0
while x < len(labels):
    line = Literal(labels[x]) + Word(nums+'-') + Word(nums+'.') + Word(nums+',') # grammar to find each label's data
    starting_index = site_contents.find(labels[x], ending_index) # stores the index of the beginning of each label's data
    ending_index = site_contents.find('\r\n', starting_index) # stores the index of the end of the label's data
    text = site_contents[starting_index:ending_index] # the line to be parsed is from starting_index to ending_index
    parsed = line.parseString(text) # parses the line and stores it in "parsed"
    headings = ['Date', 'Weighted Average (Price)', 'Volume (Lbs)']
    data = {'Date': [date.strftime('%Y-%m-%d')], 'Weighted Average (Price)': [parsed[2]], 'Volume (Lbs)': [parsed[3].replace(',','')]}
    data_df = pd.DataFrame(data, columns = headings)
    data_df.index = data_df['Date']
    data_df = data_df.drop('Date', 1)
    replace = re.compile('[ /]') # list of characters to be replaced 
    remove = re.compile('[,%#-&()!$+<>?/\'"{}.*@]') # list of characters to be removed
    name1 = replace.sub('_', labels[x]) # replace certain characters with '_'
    name2 = remove.sub('', name1).upper() # remove certain characters and convert to upper case
    name2 = name2.translate(None, '-') # ensure '-' character is removed
    quandl_code = 'USDA_AJ_PY047_' + name2 + '\r'
    print 'code: ' + quandl_code
    print 'name: Daily Northeast Broiler/Fryer Parts- ' + labels[x].title() + '\r'
    reference_text = '  Historical figures from USDA can be verified using the LMR datamart located ' \
    '\n  at http://mpr.datamart.ams.usda.gov.\n' 
开发者ID:nataliemoore,项目名称:web_scraping,代码行数:33,代码来源:USDA_AJ_PY047_scraper.py


注:本文中的pyparsing.Literal.parseString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。