当前位置: 首页>>代码示例>>Python>>正文


Python diag.progress函数代码示例

本文整理汇总了Python中swap.diag.progress函数的典型用法代码示例。如果您正苦于以下问题:Python progress函数的具体用法?Python progress怎么用?Python progress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了progress函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: loadFiles

def loadFiles(files):
    graph = myStore.formula()
    graph.setClosureMode("e")    # Implement sameAs by smushing
    if verbose>0: progress("Loading %s..." % files)
    graph = myStore.loadMany(files, openFormula=graph)
    if verbose>0: progress("Loaded", graph)
    return graph
开发者ID:Mchockalingam,项目名称:swap,代码行数:7,代码来源:delta.py

示例2: parseProduction

    def parseProduction(parser, lhs, tok, stream):
        "The parser itself."

        if tok() is None: return None
        name, thing, line = tok()
        lookupTable = parser.branchTable[lhs]
        rhs = lookupTable.get(name, None)  # Predict branch from token
        if rhs == None:
            progress("""Found %s when expecting some form of %s,
\tsuch as %s\n\t%s"""  % (tok(), lhs, lookupTable.keys(), parser.around(None, None)))
            raise SyntaxError("""Found %s when expecting some form of %s,
\tsuch as %s\n\t%s"""  % (tok(), lhs, lookupTable.keys(), parser.around(None, None)))
        if parser.verb: progress( "%i  %s means expand %s as %s" %(parser.lineNumber,tok(), lhs, rhs.value()))
        tree = [lhs]
        for term in rhs:
            lit = term.fragid
            if lit != name: # Not token
                if lit in parser.tokenSet:
                    progress("Houston, we have a problem. %s is not equal to %s" % (lit, name))
                progress("recursing on %s, which is not %s. Token is %s" % (lit, name, `tok()`))
                tree.append(parser.parseProduction(term, tok, stream))
            else:
                progress("We found %s, which matches %s" % (lit, `tok()`))
                tree.append(tok())
                tok(parser.token(stream))  # Next token
            if tok():
                name, thing, line = tok()
            else:
                name, thing = None, None
        if hasattr(parser, "p_" + lhs.fragid):
            return getattr(parser, "p_" + lhs.fragid)(tree)
        return tree
开发者ID:AwelEshetu,项目名称:cwm,代码行数:32,代码来源:lexedParser.py

示例3: internalCheck

def internalCheck():
    global kb
    global cat
    transactions = kb.each(pred=rdf.type, obj=cat_ns.Internal)
    unbalanced = []
    while len(transactions) > 0:
        x = transactions.pop()
        month = monthNumber(x)
        if month < 0 : continue

        date = str(kb.the(subj=x, pred=qu.date))
        if len(kb.each(subj=x, pred=qu.in_USD)) != 1:
            progress("Ignoring !=1 amount transaction %s" % x)
            continue
        amount = float(str(kb.the(subj=x, pred=qu.in_USD)))
        for y in transactions:
            datey = str(kb.the(subj=y, pred=qu.date))
            if 1: #  date[0:10] == datey[0:10]:  # precision one day
                usds = kb.each(subj=y, pred=qu.in_USD)
                if len(usds) == 0:continue  # No amount => must be not in this period.
                if len(usds) != 1:
                    progress("Error: Ignoring: %i != 1 USD amounts for Internal transaction %s" % (len(usds), `y`+': '+ `usds`))
                    transactions.remove(y)
                    continue
                if abs(amount +
                        float(str(kb.the(subj=y, pred=qu.in_USD)))) < 0.001:
                    transactions.remove(y)
                    break
        else:
            unbalanced.append(x)
    if unbalanced:
        print "<h2>Unbalanced internal transactions</h2>"
        print transactionTable(unbalanced);
    return
开发者ID:Mchockalingam,项目名称:swap,代码行数:34,代码来源:fin.py

示例4: saveAs

def saveAs(uri, filename):
    gifStream = urlopen(uri)
    gifData = gifStream.read()
    gifStream.close
    progress('curl "%s" > %s' % (uri, filename))
    saveStream = open(filename, "w")
    saveStream.write(gifData)
    saveStream.close()
开发者ID:AwelEshetu,项目名称:cwm,代码行数:8,代码来源:day.py

示例5: getSize

 def getSize(s, atr):
     i = s.find(atr + '="') + len(atr) + 2
     val = ""
     while s[i] in "0123456789":
         val += s[i]
         i = i + 1
     x = int(val)
     progress("Found attribute %s=%i" % (atr, x))
     return x
开发者ID:AwelEshetu,项目名称:cwm,代码行数:9,代码来源:day.py

示例6: token

  def token(parser, str, i):
      """The Tokenizer:  returns (token type character, offset of token)
      Skips spaces.
      "0" means numeric
      "a" means alphanumeric
      """
      
      while 1:
          m = whiteSpace.match(str, i)
          if m == None or m.end() == i: break
          i = m.end()
      parser.countLines(str, i)
      if i == len(str):
          return "",  i # eof
      
      if parser.verb: progress( "%i) Looking at:  ...%s$%s..." % (
          parser.lineNumber, str[i-10:i],str[i:i+10]))
      for double in "=>", "<=", "^^":
          if double == str[i:i+2]: return double, i
  
      ch = str[i]
      if ch == ".": parser.keywordMode = 0 # hack
      if ch in singleCharacterSelectors:
          return ch, i
      if ch in "+-0123456789":
          return "0", i #  Numeric
      j = i+1
      if ch == "@":
          if i!=0 and whiteSpace.match(str[i-1]).end() == 0:
              return ch, i
          while str[j] not in notNameChars: j = j + 1
          if str[i+1:j] == "keywords" :
              parser.keywords = [] # Special
              parser.keywordMode = 1
          return str[i:j], i # keyword
      if ch == '"':  #"
          return '"', i #"
 
      # Alphanumeric: keyword hacks
      while str[j] not in notQNameChars: j = j+1
      word = str[i:j]
      if parser.keywordMode:
          parser.keywords.append(word)
      elif word in parser.keywords:
          if word == "keywords" :
              parser.keywords = []    # Special
              parser.keywordMode = 1
          if parser.atMode:
              return "@" + word, i  # implicit keyword
          return word, i
      return "a", i    # qname, langcode, or barename
开发者ID:dard12,项目名称:WebSearch,代码行数:51,代码来源:predictiveParser.py

示例7: removeCommon

def removeCommon(f, g, match):
    """Find common statements from f and g
    match gives the dictionary mapping bnodes in f to bnodes in g"""
    only_f, common_g = Set(), Set()
    for st in f.statements[:]:
        s, p, o = st.spo()
        assert s not in f._redirections 
        assert o not in f._redirections
        if s.generated(): sg = match.get(s, None)
        else: sg = s
        if o.generated(): og = match.get(o, None)
        else: og = o
        if og != None and sg != None:
            gsts = g.statementsMatching(subj=sg, pred=p, obj=og)
            if len(gsts) == 1:
                if verbose>4: progress("Statement in both", st)
                common_g.add(gsts[0])
                continue
        only_f.add(st)
    return only_f, Set(g.statements)-common_g
开发者ID:Mchockalingam,项目名称:swap,代码行数:20,代码来源:delta.py

示例8: consolidate

def consolidate(delta, patchVerb):
    """Consolidate patches
    
    Where the same left hand side applies to more than 1 RHS formula,
    roll those RHS formulae into one, to make the dif file more readable
    and faster to execute in some implementations
    """
    agenda = {}
    if verbose >3: progress("Consolidating %s" % patchVerb)
    for s in delta.statementsMatching(pred=patchVerb):
        list = agenda.get(s.subject(), None)
        if list == None:
            list = []
            agenda[s.subject()] = list
        list.append(s)
    for lhs, list in agenda.items():
        if verbose >3: progress("Patches lhs= %s: %s" %(lhs, list))
        if len(list) > 1:
            rhs = delta.newFormula()
            for s in list:
                delta.store.copyFormula(s.object(), rhs)
                delta.removeStatement(s)
            delta.add(subj=lhs, pred=patchVerb, obj=rhs.close())
开发者ID:Mchockalingam,项目名称:swap,代码行数:23,代码来源:delta.py

示例9: P

def P(s):
    """
    Input: a state s
    Output: possible pairs to add to the mapping
    """
    G1 = s.problem.G1
    G2 = s.problem.G2
    t1_out_size, t2_out_size, t1_in_size, t2_in_size = (len(s.t1_out), len(s.t2_out), len(s.t1_in), len(s.t2_in))
    progress("P(s) %s %s %s %s" % (t1_out_size, t2_out_size, t1_in_size, t2_in_size))
    if t1_out_size and t2_out_size:
        progress(", case 1")
        m = s.t2_out.first()
        if representsSelf(m):
            if m in s.t1_out:
                yield m, m, regular
        else:
            for n in s.t1_out:
                yield n, m, regular
    elif not t1_out_size and not t2_out_size and t1_in_size and t2_in_size:
        progress(", case 2")
        m = s.t2_in.first()
        if representsSelf(m):
            if m in s.t1_in:
                yield m, m, regular
        else:
            for n in s.t1_in:
                yield n, m, regular
    elif not t1_out_size and not t2_out_size and not t1_in_size and not t2_in_size:
        progress(", case 3")
        m = s.G2_not_taken.first()
        if representsSelf(m):
            if m in s.G1_not_taken:
                yield m, m, regular
        else:
            for n in s.G1_not_taken:
                yield n, m, regular
开发者ID:AwelEshetu,项目名称:cwm,代码行数:36,代码来源:vf2.py

示例10: parseProduction

    def parseProduction(parser, lhs, str, tok=None, here=0):
        "The parser itself."

        if tok == "": return tok, here # EOF    
        lookupTable = parser.branchTable[lhs]
        rhs = lookupTable.get(tok, None)  # Predict branch from token
        if rhs == None:
            progress("""Found %s when expecting some form of %s,
\tsuch as %s\n\t%s"""  % (tok, lhs, lookupTable.keys(), parser.around(str, here)))
            raise SyntaxError("""Found %s when expecting some form of %s,
\tsuch as %s\n\t%s"""  % (tok, lhs, lookupTable.keys(), parser.around(str, here)))
        if parser.verb: progress( "%i  %s means expand %s as %s" %(parser.lineNumber,tok, lhs, rhs.value()))
        for term in rhs:
            if isinstance(term, Literal): # CFG Terminal
                lit = term.value()
                next = here + len(lit)
                if str[here:next] == lit: pass
                elif "@"+str[here:next-1] == lit: next = next-1
                else: raise SyntaxError(
                    "Found %s where %s expected\n\t %s" %
                        (`str[here:next]`, lit, parser.around(str, here)))
            else:
                rexp = tokenRegexps.get(term, None)
                if rexp == None: # Not token
                    tok, here = parser.parseProduction(term, str, tok, here)
                    continue
                m = rexp.match(str, here)
                if m == None:
                    progress("\n\n\nToken: should match %s\n\t %s" % 
                                (rexp.pattern, parser.around(str, here)))
                    raise SyntaxError("Token: should match %s\n\t %s" % 
                                (rexp.pattern, parser.around(str, here)))
                if parser.verb: progress( "Token matched to <%s> as pattern <%s>" % (str[here:m.end()], rexp.pattern))
                next = m.end()
            tok, here = parser.token(str, next)  # Next token
        return tok, here
开发者ID:dard12,项目名称:WebSearch,代码行数:36,代码来源:predictiveParser.py

示例11: match

def match(s, extras=BindingTree()):
    """
Input: an intermediate state s
Output: the mapping between the two graphs

When a match forces a predicate match, we add that
to extras --- we go through all of those before continuing
on our regularly scheduled P(s)
    """
    progress("starting match")
    progress("s.map=%s" % s.map)
    G2 = s.problem.G2
    for choice in extras:
        if not choice:
            if set(s.map.values()) >= G2.allNodes():
                yield s.map
            elif set(s.map.values()) >= G2.nodes():
                yield finish(s, s.map)

            nodeList = P(s)

        else:
            n, m = choice[0]
            nodeList = [(n, m, choice[1:])]

        nodeList = [x for x in nodeList]
        progress("nodeList=", nodeList)
        for n, m, realExtras in nodeList:
            progress("... trying n,m=%s,%s" % (n, m))
            newExtras = BindingTree()
            newExtras.int_and(realExtras)
            if F(s, n, m, newExtras):
                s2 = s.addNode(n, m)
                for x in match(s2, newExtras):
                    yield x
                s2.undo()
开发者ID:AwelEshetu,项目名称:cwm,代码行数:36,代码来源:vf2.py

示例12: lookUp

def lookUp(predicates, assumptions=Set()):
    """Look up all the schemas for the predicates given"""
    global verbose
    schemas = assumptions
    for pred in predicates:
        if verbose > 3: progress("Predicate: %s" % `pred`)
        u = pred.uriref()
        hash = u.find("#")
        if hash <0:
            if verbose > 1: progress("Warning: Predicate <%s> looks like web resource not Property" % u)
        else:
            schemas.add(u[:hash])
    if verbose > 2:
        for r in schemas:
            progress("Metadata to be loaded: ", r) 
    if schemas:
        return loadMany([(x) for x in schemas])
    return myStore.store.newFormula() # Empty formula
开发者ID:Mchockalingam,项目名称:swap,代码行数:18,代码来源:delta.py

示例13: main

def main():
    global already, agenda, errors
    parseAs = None
    grammarFile = None
    parseFile = None
    yaccFile = None
    global verbose
    global g
    verbose = 0
    lumped = 1

    try:
        opts, args = getopt.getopt(sys.argv[1:], "ha:v:p:g:y:",
            ["help", "as=",  "verbose=", "parse=", "grammar=", "yacc="])
    except getopt.GetoptError:
        usage()
        sys.exit(2)
    output = None
    for o, a in opts:
        if o in ("-h", "--help"):
            usage()
            sys.exit()
        if o in ("-v", "--verbose"):
            verbose =int(a)
            diag.chatty_flag = int(a)
        if o in ("-a", "--as"):
            parseAs = uripath.join(uripath.base(), a)
        if o in ("-p", "--parse"):
            parseFile = uripath.join(uripath.base(), a)
        if o in ("-g", "--grammar"):
            grammarFile = uripath.join(uripath.base(), a)
        if o in ("-y", "--yacc"):
            yaccFile = uripath.join(uripath.base(), a)[5:]  # strip off file:

    

#    if testFiles == []: testFiles = [ "/dev/stdin" ]
    if not parseAs:
        usage()
        sys.exit(2)
    parseAs = uripath.join(uripath.base(), parseAs)
    if not grammarFile:
        grammarFile = parseAs.split("#")[0]   # strip off fragid
    else:
        grammarFile = uripath.join(uripath.base(), grammarFile)


    
    # The Grammar formula
    progress("Loading " + grammarFile)
    start = clock()
    g = load(grammarFile)
    taken = clock() - start + 1
    progress("Loaded %i statements in %fs, ie %f/s." %
        (len(g), taken, len(g)/taken))
    
    document = g.newSymbol(parseAs)
    
    already = []
    agenda = []
    errors = []
    doProduction(document)
    while agenda:
        x = agenda[0]
        agenda = agenda[1:]
        already.append(x)
        doProduction(x)
        
    if errors != []:
        progress("###### FAILED with %i errors." % len(errors))
        for s in errors: progress ("\t%s" % s)
        exit(-2)
    else:
        progress( "Ok for predictive parsing")
    
    #if parser.verb: progress "Branch table:", branchTable
    if verbose:
        progress( "Literal terminals: %s" %  literalTerminals.keys())
        progress("Token regular expressions:")
        for r in tokenRegexps:
            progress( "\t%s matches %s" %(r, tokenRegexps[r].pattern) )
    
    if yaccFile:
        yacc=open(yaccFile, "w")
        yaccConvert(yacc, document, tokenRegexps)
        yacc.close()

    if parseFile == None: exit(0)

    
    ip = webAccess.urlopenForRDF(parseFile, None)
    
    str = ip.read().decode('utf_8')
    sink = g.newFormula()
    keywords = g.each(pred=BNF.keywords, subj=document)
    keywords = [a.value() for a in keywords]
    p = PredictiveParser(sink=sink, top=document, branchTable= branchTable,
            tokenRegexps= tokenRegexps, keywords =  keywords)
    p.verb = verbose
    start = clock()
#.........这里部分代码省略.........
开发者ID:dard12,项目名称:WebSearch,代码行数:101,代码来源:predictiveParser.py

示例14: differences

def differences(f, g, assumptions):
    """Smush the formulae.  Compare them, generating patch instructions."""
    global lumped
    
# Cross-map nodes:

    g_bnodes, g_definitions = nailFormula(g, assumptions)
    bnodes, definitions = nailFormula(f, assumptions)
    if verbose > 1: progress("\n Done nailing")
    definitions.reverse()  # go back down list @@@ reverse the g list too? @@@
    g_definitions.reverse()     # @@ needed for the patch generation
    
    unmatched = bnodes.copy()
    match = {}  # Mapping of nodes in f to nodes in g
    for x, inverse, pred, y in definitions:
        if x in match: continue # done already

        if x in f._redirections:
            if verbose > 3: progress("Redirected %s to %s. Ignoring" % (`x`, `f._redirections[x]`))
            unmatched.discard(x)
            continue

        if verbose > 3: progress("Definition of %s = %s%s%s"% (`x`, `y` , ".!^"[inverse], `pred`))

        if y.generated():
            while y in f._redirections:
                y = f._redirections[y]
                if verbose>4: progress(" redirected to  %s = %s%s%s"% (`x`,  `y`, "!^"[inverse], `pred`))
            yg = match.get(y, None)
            if yg == None:
                if verbose>4: progress("  Had definition for %s in terms of %s which is not matched"%(`x`,`y`))
                continue
        else:
            yg = y

        if inverse:  # Inverse functional property like ssn
            matches = Set(g.each(obj=yg, pred=pred))
        else: matches = Set(g.each(subj=yg, pred=pred))
        if len(matches) == 0:
            continue   # This is normal - the node does not exist in the other graph
#           raise RuntimeError("Can't match %s" % x)

        if len(matches) > 1:
            raise RuntimeError("""Rats. Wheras in the first graph %s%s%s uniquely selects %s,
                    in the other graph there are more than 1 matches: %s""" % (`y`, "!^"[inverse], `pred`, `x`,  `matches`))
        for q in matches:  # pick only one  @@ python function?
            z = q
            break
        if verbose > 2:
            progress("Found match for %s in %s " % (`x`,`z`))
        match[x] = z
        unmatched.discard(x)

    if len(unmatched) > 0:
        if verbose >1:
            progress("Failed to match all nodes:", unmatched)
            for n in unmatched:
                debugBnode(n, f)

    # Find common parts
    only_f, only_g = removeCommon(f,g, match)

    delta = f.newFormula()
    if len(only_f) == 0 and len(only_g) == 0:
        return delta

    f = f.close()    #  We are not going to mess with them any more
    g = g.close()
    
    common = Set([match[x] for x in match])

    if verbose>2: progress("Common bnodes (as named in g)", common)
    patches(delta, f, only_f, Set(), definitions, deleting=1)
    patches(delta, g, only_g, common, g_definitions, deleting=0)
    if lumped:
        consolidate(delta, delta.store.insertion)
        consolidate(delta, delta.store.deletion)
    return delta
开发者ID:Mchockalingam,项目名称:swap,代码行数:78,代码来源:delta.py

示例15: doProduction

def doProduction(lhs):
    "Generate branch tables for one production"
    global branchTable
    if lhs is BNF.void:
        progress("\nvoid")
        return
    if lhs is BNF.eof:
        progress( "\nEOF")
        return
    if isinstance(lhs, Literal):
        literalTerminals[lhs.value()] = 1
        return

    branchDict = {}

    rhs = g.the(pred=BNF.matches, subj=lhs)
    if rhs != None:
        if chatty_flag: progress( "\nToken %s matches regexp %s" %(lhs, rhs))
        try:
            tokenRegexps[lhs] = re.compile(rhs.value(), re.U)
        except:
            print rhs.value().encode('utf-8')
            raise
        cc = g.each(subj=lhs, pred=BNF.canStartWith)
        if cc == []: progress (recordError(
            "No record of what token %s can start with" % `lhs`))
        if chatty_flag: progress("\tCan start with: %s" % cc) 
        return
    if g.contains(subj=lhs, pred=RDF.type, obj=REGEX.Regex):
        import regex
        rhs = regex.makeRegex(g, lhs)
        try:
            tokenRegexps[lhs] = re.compile(rhs, re.U)
        except:
            print rhs
            raise
        cc = g.each(subj=lhs, pred=BNF.canStartWith)
        if cc == []: progress (recordError(
            "No record of what token %s can start with" % `lhs`))
        if chatty_flag: progress("\tCan start with: %s" % cc) 
        return         
    
    rhs = g.the(pred=BNF.mustBeOneSequence, subj=lhs)
    if rhs == None:
        progress (recordError("No definition of " + `lhs`))
        return
#       raise RuntimeError("No definition of %s  in\n %s" %(`lhs`, `g`))
    options = rhs
    if chatty_flag: progress ( "\nProduction %s :: %s  ie %s" %(`lhs`, `options` , `options.value()`))
    succ = g.each(subj=lhs, pred=BNF.canPrecede)
    if chatty_flag: progress("\tCan precede ", succ)

    branches = g.each(subj=lhs, pred=BNF.branch)
    for branch in branches:
        option = g.the(subj=branch, pred=BNF.sequence)
        if chatty_flag: progress( "\toption: "+`option.value()`)
        for part in option:
            if part not in already and part not in agenda: agenda.append(part)
            y = `part`
        conditions = g.each(subj=branch, pred=BNF.condition)
        if conditions == []:
            progress(
                recordError(" NO SELECTOR for %s option %s ie %s" %
                (`lhs`, `option`, `option.value()` )))
            if option.value == []: # Void case - the tricky one
                succ = g.each(subj=lhs, pred=BNF.canPrecede)
                for y in succ:
                    if chatty_flag: progress("\t\t\tCan precede ", `y`)
        if chatty_flag: progress("\t\tConditions: %s" %(conditions))
        for str1 in conditions:
            if str1 in branchDict:
                progress(recordError(
                    "Conflict: %s is also the condition for %s" % (
                                str1, branchDict[str1].value())))
            branchDict[str1.__str__()] = option
#           break

    for str1 in branchDict:
        for str2 in branchDict:
            s1 = unicode(str1)
            s2 = unicode(str2)
# @@ check that selectors are distinct, not substrings
            if (s1.startswith(s2) or s2.startswith(s1)) and branchDict[str1] is not branchDict[str2]:
                progress("WARNING: for %s, %s indicates %s, but  %s indicates %s" % (
                            lhs, s1, branchDict[str1], s2, branchDict[str2]))
    branchTable[lhs] = branchDict
开发者ID:dard12,项目名称:WebSearch,代码行数:86,代码来源:predictiveParser.py


注:本文中的swap.diag.progress函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。