当前位置: 首页>>代码示例>>Python>>正文


Python Base.commit方法代码示例

本文整理汇总了Python中pydblite.Base.commit方法的典型用法代码示例。如果您正苦于以下问题:Python Base.commit方法的具体用法?Python Base.commit怎么用?Python Base.commit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pydblite.Base的用法示例。


在下文中一共展示了Base.commit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: do_Post

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
    def do_Post (self, result, request, args):
        def transfomType(x):
            if isinstance(x, unicode): return str(x)
            else: return x
    
        requestBody = args['requestContent']
        
        #######    Replace this section by your logic   #######
        vTestId = transfomType(json.loads(requestBody)['testId'])
        vTestMessage = transfomType(json.loads(requestBody)['testMessage'])
    
        responseCode = 200 #ResponseCode.Ok
 
       
        db = Base('database_service6.pdl')
        db.create('testId', 'testMessage', mode="open")
        db.insert(testId = vTestId, testMessage = vTestMessage)
        db.commit()

        result = []

        responseBody = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
        #######    Replace this section by your logic   #######


        request.setResponseCode(responseCode)
        resp = utils.serviceResponse(responseCode, responseBody)
        
        return resp
开发者ID:LucasFLima,项目名称:Mestrado,代码行数:31,代码来源:service6_srv.py

示例2: __init__

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
class Posts:
    def __init__(self, filename, erase_db):
        self.db = Base(filename)
        self.db.create('author', 'content', 'date', mode="override" if erase_db else "open")

    def addPost(self, post):
        """ Persist a Post object in db and returns auto-generated id. """
        post.id = self.db.insert(author = post.author, content = post.content, date = post.date)
        self.db.commit()
        return post.id

    def getPost(self, id):
        """ Get a post by its id. Returns a Post object or None if id is not found. """
        db_entry = self.db[id] if id in self.db else None
        return self.__createPost(db_entry) if db_entry is not None else None

    def getPosts(self, from_date = None, to_date = None, author = None):
        """ Get all posts matching optionally provided conditions. Returns a list (can be empty). """
        iterator = self.db.filter()
        if from_date is not None:
            iterator = iterator & (self.db("date") > from_date)
        if to_date is not None:
            iterator = iterator & (self.db("date") < to_date)
        if author is not None:
            iterator = iterator & (self.db("author") == author)

        return [self.__createPost(db_entry) for db_entry in iterator]

    def getPostsCount(self):
        """ Get total number of posts in db. """
        return len(self.db)

    def __createPost(self, db_entry):
        return Post(db_entry['author'], db_entry['content'], db_entry['date'], db_entry['__id__'])
开发者ID:aragornis,项目名称:vdm-ws,代码行数:36,代码来源:repository.py

示例3: generateWeights

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
def generateWeights(graph, weightFile, param):
    pdb = Base(weightFile)
    pdb.create('pair', 'node1', 'node2','TS02','TS05','TS08', 'JC')
    pdb.create_index('pair')
    
    sortedNodes = sorted(graph.nodes())
    for node in sortedNodes:
        others = sorted(set(n for n in sortedNodes if n > node))
        for other in others:
            if graph.has_edge(node, other):
                informations = list(edge for n1, n2, edge in graph.edges([node, other], data=True) if ((n1 == node and n2 == other) or (n1 == other and n2 == node)) )
                timesofLinks = []
                for info in informations:
                    timesofLinks.append(int(info['time']))
        
                
                bagNode1 = list(eval(edge['keywords']) for n1, n2, edge in graph.edges([node], data=True) if (n1 != other and n2 != other)  )
                bagNode2 = list(eval(edge['keywords']) for n1, n2, edge in graph.edges([other], data=True) if (n1 != node and n2 != node)  )
                
                
                total_publications = len(informations)   
                k =  int(param.t0_)  - max(timesofLinks)
                decayfunction02 = (1 - 0.2) ** k
                decayfunction05 = (1 - 0.5) ** k
                decayfunction08 = (1 - 0.8) ** k
                
                pdb.insert(str(node) + ';' + str(other),node,other,(total_publications * decayfunction02) , (total_publications * decayfunction05) , (total_publications * decayfunction08), get_jacard_domain(bagNode1, bagNode2) ) 
                 
    pdb.commit()            
    return pdb
开发者ID:AndersonChaves,项目名称:Predicao-de-Links,代码行数:32,代码来源:FullExecutionGraphRich.py

示例4: do_Delete

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
    def do_Delete (self, result, request, args):
        def transfomType(x):
            if isinstance(x, unicode): return str(x)
            else: return x
    
        #######    Replace this section by your logic   #######
        db = Base('database_service6.pdl')
        db.create('testId', 'testMessage', mode="open")
        result = db(testId = int(args['testId']))
        
        if len(result) == 0:
            responseCode = 404 #ResponseCode.Ok
            responseBody = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
        else:
            responseCode = 200 #ResponseCode.Ok
            responseBody = json.dumps(result[0], sort_keys=True, indent=4, separators=(',', ': '))
            db.delete(result[0])
            db.commit()
        #######    Replace this section by your logic   #######


        request.setResponseCode(responseCode)
        resp = utils.serviceResponse(responseCode, responseBody)
        
        return resp
开发者ID:LucasFLima,项目名称:Mestrado,代码行数:27,代码来源:service6_srv.py

示例5: generateWeights

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
def generateWeights(graph, weightFile, param):
    pdb = Base(weightFile)
    pdb.create('pair', 'node1', 'node2','FTI01','FTI02','FTI03','FTI04','FTI05','FTI06','FTI07','FTI08','FTI09')
    pdb.create_index('pair')
    
    sortedNodes = sorted(graph.nodes())
    for node in sortedNodes:
        others = sorted(set(n for n in sortedNodes if n > node))
        for other in others:
            if graph.has_edge(node, other):
                informations = list(edge for n1, n2, edge in graph.edges([node, other], data=True) if ((n1 == node and n2 == other) or (n1 == other and n2 == node)) )
                timesofLinks = []
                for info in informations:
                    timesofLinks.append(int(info['time']))
                
                total_publications = len(informations)   
                k =  int(param.t0_)  - max(timesofLinks)
                FTI01 = total_publications * (0.1**k)
                FTI02 = total_publications * (0.2**k)
                FTI03 = total_publications * (0.3**k)
                FTI04 = total_publications * (0.4**k)
                FTI05 = total_publications * (0.5**k)
                FTI06 = total_publications * (0.6**k)
                FTI07 = total_publications * (0.7**k)
                FTI08 = total_publications * (0.8**k)
                FTI09 = total_publications * (0.9**k)
                 
                
 
                pdb.insert(str(node) + ';' + str(other),node,other, FTI01, FTI02, FTI03, FTI04, FTI05, FTI06, FTI07, FTI08, FTI09 ) 
                 
    pdb.commit()            
    return pdb
开发者ID:cptullio,项目名称:Predicao-de-Links,代码行数:35,代码来源:FullExecutionFTI.py

示例6: YahoourlsearcherPipeline

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
class YahoourlsearcherPipeline(object):
    def open_spider(self, spider):

        filename = "urls_log.txt"
        self.log_target = codecs.open(filename, 'a+', encoding='utf-8')
        self.log_target.truncate()

        self.db = Base('URL_database.pdl')
        self.db.create('url', 'date', mode="open")
        self.log_target.write("***New url scraping session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
        print("***New url scraping session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
        self.log_target.write("*** Total url in the Database BEFORE new search: "+ str(len(self.db)) + " ***" + "\n")


        dispatcher.connect(self.spider_closed, signals.spider_closed)


    def process_item(self, item, spider):
        self.db.insert(url=item['url'],
                       date=item['date']
                       )
        self.log_target.write(item['url'] + "  " + item['date'] + "\n")
        self.db.commit()
        return item

    def spider_closed(self, spider):
        url_structure = []
        print ("End of database")
        i = 1
        for r in self.db:
            #print (str(r["url"]) + " " + str(r["date"]) + " \n")
            url_structure.append(url_date(r["url"],r["date"]))
            i += 1
        print (str(i) + "Url in the DB \n")
        self.log_target.write("Session ends at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + "\n")
        print ("Session ends at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + "\n")
        self.log_target.write("*** Total url in the Database AFTER the search: "+ str(len(self.db)) + " ***" + "\n")

        print ("Elementi presenti nel database: "+ str(len(self.db)) + " in struttura: " + str(len(url_structure)))
        all_record = []
        for r in self.db:
            all_record.append(r)
        self.db.delete(all_record)
        print ("Elementi presenti nel database: "+ str(len(self.db)))

        #set qui
        url_structure = {x.url: x for x in url_structure}.values()


        for any_url in url_structure:
            self.db.insert(any_url.url, any_url.date)


        print ("Elementi presenti nel database: "+ str(len(self.db)))
        self.db.commit()
        self.log_target.write("--- After SET operation: "+ str(len(self.db)) + " --- " + "\n" + "\n" + "\n" + "\n")

        self.log_target.close()
开发者ID:piercolella,项目名称:qa-scrapers,代码行数:60,代码来源:pipelines.py

示例7: DBPipeline

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
class DBPipeline(object):
    def __init__(self):

        #Creating log file
        filename = "session_log.txt"
        self.log_target = codecs.open(filename, 'a+', encoding='utf-8')
        self.log_target.truncate()
        self.log_target.write("***New session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")

        #Creating database for items
        self.db = Base('QuestionThreadExtracted.pdl')
        self.db.create('uid', 'type', 'author', 'title', 'text', 'date_time',
                       'tags', 'views', 'answers', 'resolve', 'upvotes', 'url', mode="open")

        #Some data for the log file
        self.number_of_questions = 0
        self.number_of_answers = 0
        self.last_id=0
        dispatcher.connect(self.spider_closed, signals.spider_closed)

 
    def process_item(self, item, spider):

        self.db.insert(uid=item['uid'],
                       type=item['type'],
                       author=item['author'],
                       title=item['title'],
                       text=item['text'],
                       date_time=item['date_time'],
                       tags=item['tags'],
                       views=item['views'],
                       answers=item['answers'],
                       resolve=item['resolve'],
                       upvotes=item['upvotes'],
                       url=item['url']
                       )
        #Count questions and answers
        if "question" in item['type']:
            self.number_of_questions+=1
            if self.last_id<item['uid']:
                self.last_id=item['uid']
        else:
            self.number_of_answers+=1


        self.db.commit()
        return item

    def spider_closed(self, spider):
        self.log_target.write("Questions founded: "+ str(self.number_of_questions) + "\n")
        self.log_target.write("Answers founded: "+ str(self.number_of_answers) + "\n")
        self.log_target.write("Last UID: "+str(self.last_id) + "\n" + "\n")


        self.log_target.write("***Session End at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")

        self.log_target.close()
开发者ID:piercolella,项目名称:qa-scrapers,代码行数:59,代码来源:pipelines.py

示例8: convertcsv2db

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
def convertcsv2db(csvpath, dbpath): #Converts a CSV file to a PyDBLite database
    db = Base(dbpath)
    try:
        csvfile = open(csvpath, 'rb')
    except csv.Error:
        print "Could not open CSV file at " + csvpath + "\n"
    reader = csv.reader(csvfile)
    header = reader.next()
    try:
        db.create(*header)
    except IOError:
        print "Existing DB at " + dbpath + "\n"
    for row in reader:
        db.insert(*row)
    db.commit()
开发者ID:magomez96,项目名称:AdamTestBot,代码行数:17,代码来源:utils.py

示例9: generate_finalResult

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
def generate_finalResult(order,topRank, TestGraph, FileNameResult ):
    pdb = Base(FileNameResult)
    pdb.create('node1', 'node2', 'value', 'sucesso','topRank')
    pdb.create_index('node1', 'node2')
 
    indice = 0 
    for nodeToCheck in order:
        indice = indice+1
        isTopRank = (indice <= topRank)
        if (TestGraph.has_edge(nodeToCheck['node1'],nodeToCheck['node2'])):
            pdb.insert(str(nodeToCheck['node1']), nodeToCheck['node2'],nodeToCheck['value'] , True, isTopRank  ) 
        else:   
            pdb.insert(str(nodeToCheck['node1']), nodeToCheck['node2'],nodeToCheck['value'] , False, isTopRank  ) 
    
    pdb.commit()
    return pdb
开发者ID:cptullio,项目名称:Predicao-de-Links,代码行数:18,代码来源:FullExecutionFCTI.py

示例10: checkCondition

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
 def checkCondition(cls, result):
     if result == "error":
         return "erro"
     else:
         
         db = Base('database_service1.pdl')
         db.create('cod', 'message', mode="open")
         db.insert(cod='1', message='valid')
         db.insert(cod='2', message='not valid')
         db.commit()
         #for rec in (db("age") > 30):
         for rec in db:
             print rec["cod"] +' '+ rec["message"]
         
         
         
         return "ok"
开发者ID:LucasFLima,项目名称:Mestrado,代码行数:19,代码来源:tmp.py

示例11: DBPipeline

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
class DBPipeline(object):
    # Pipeline to write an Item in the database
    def open_spider(self, spider):
        # Creation of DB
        self.db = Base(spider.database)
        self.db.create('uid', 'type', 'author', 'title', 'text', 'date_time',
                       'tags', 'views', 'answers', 'resolve', 'upvotes', 'url',
                       mode="override")
        dispatcher.connect(self.spider_closed, signals.spider_closed)

    def process_item(self, item, spider):
        # Writing of the item
        self.db.insert(uid=item['uid'],
                       type=item['type'],
                       author=item['author'],
                       title=item['title'],
                       text=item['text'],
                       date_time=item['date_time'],
                       tags=item['tags'],
                       views=item['views'],
                       answers=item['answers'],
                       resolve=item['resolve'],
                       upvotes=item['upvotes'],
                       url=item['url']
                       )

        self.db.commit()
        return item

    def spider_closed(self, spider):
        # Number of items saved, shown at the end
        i = 0
        j = 0
        for r in self.db:

            if r["type"] == "question":
                i += 1
            else:
                j += 1

        print ('Number of questions and answers found:')
        print (str(i) + ' questions \n')
        print (str(j) + ' answers \n')
开发者ID:piercolella,项目名称:qa-scrapers,代码行数:45,代码来源:pipelines.py

示例12: get_gbq_query_results

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
def get_gbq_query_results(gbq_service, query, prj_id='ace-amplifier-455', index_col=None,
                          col_order=None, cache=True, cache_path='', db=None):
    
    # set_trace()
    if cache:
        if db is None:
            db = Base(cache_path).create('query', 'result_df', mode='open')
            # db.create_index('query')
        final_df = extract(db, query)
        if len(final_df) > 0:
            # print 'extracted from cache ' + db.path
            return final_df

    df = gbq.read_gbq(query, prj_id)

    if cache:
        db.insert(query, df)
        db.commit()
        print 'saved to cache ' + db.path

    return df
开发者ID:sergey-linio,项目名称:weekly_reports,代码行数:23,代码来源:auth_to_gbq2.py

示例13: calculatingWeights

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
def calculatingWeights(graph, nodesnotLinked, database, calculatingFile):
    pdb = Base(calculatingFile)
    pdb.create('node1', 'node2', 'cnWTS02','cnWTS05','cnWTS08', 'aaWTS02', 'aaWTS05', 'aaWTS08')
    pdb.create_index('node1', 'node2')
                
    element = 0
    qtyofNodesToProcess = len(nodesnotLinked)
    for pair in nodesnotLinked:
        element = element+1
        FormatingDataSets.printProgressofEvents(element, qtyofNodesToProcess, "Calculating features for nodes not liked: ")
        neighbors_node1 = all_neighbors(graph, pair[0])
        neighbors_node2 = all_neighbors(graph, pair[1])
        len_neihbors_node1 = len(neighbors_node1)
        len_neihbors_node2 = len(neighbors_node2)
        CommonNeigbors = neighbors_node1.intersection(neighbors_node2)
        CNWts02Feature = 0;
        CNWts05Feature = 0;
        CNWts08Feature = 0;
        AAWts02Feature = 0;
        AAWts05Feature = 0;
        AAWts08Feature = 0;
        CNWJCFeature = 0;
        AAWJCFeature = 0;
        
        for cn in CommonNeigbors:
            item = get_partOfWeightCalculating(graph, database, pair, cn)
            CNWts02Feature = CNWts02Feature + item['cnWts02'];
            CNWts05Feature = CNWts05Feature + item['cnWts05'];
            CNWts08Feature = CNWts08Feature + item['cnWts08'];
            AAWts02Feature = AAWts02Feature + item['aaWts02'];
            AAWts05Feature = AAWts05Feature + item['aaWts05'];
            AAWts08Feature = AAWts08Feature + item['aaWts08'];
            #CNWJCFeature = CNWJCFeature + item['cnWJC'];
            #AAWJCFeature = AAWJCFeature + item['aaWJC'];
        
            
        pdb.insert(str(pair[0]), str(pair[1]), CNWts02Feature, CNWts05Feature, CNWts08Feature, AAWts02Feature, AAWts05Feature, AAWts08Feature  )   
    pdb.commit()
    return pdb;
开发者ID:cptullio,项目名称:Predicao-de-Links,代码行数:41,代码来源:FullExecutionGraphRich_versaoTempo.py

示例14: get_results_db

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
def get_results_db(clear_cache=False, skip=[]):
    cache_file = 'cache/results.pdl'
    db = Base(cache_file)

    if clear_cache or not db.exists() or os.path.getmtime(cache_file) < os.path.getmtime(results_dir):
        warnings.warn('Rebuilding results cache...')
        columns = set()
        rows = []
        p = pathlib.Path(results_dir)
        for config_file in p.glob('*.config'):
            with config_file.open() as config_fh:
                settings_hash = config_file.stem
                row = json.loads(config_fh.read())
            if settings_hash in skip:
                continue
            row['hash'] = settings_hash
            tests_count = analyze.count(config_file.parent, settings_hash)
            row['iostat_cpu'], len_cpu_values = analyze.iostat_cpu(config_file.parent, settings_hash)
            row['iperf_result'], len_iperf_values = getattr(analyze, row['iperf_name'])(config_file.parent, settings_hash, row)
            if tests_count != len_cpu_values or tests_count != len_iperf_values:
                raise analyze.AnalysisException('For test {}, mismatch in cardinality of tests between count ({}), iostat ({}) and iperf ({})'.format(settings_hash, tests_count, len_cpu_values, len_iperf_values), settings_hash)
            if len_iperf_values > 0:
                min_fairness = row['iperf_result']['fairness'][0] - row['iperf_result']['fairness'][1]
                if min_fairness < (1 - 1 / (2 * row['parallelism'])):
                    warnings.warn('For test {}, fairness has a critical value: {}.'.format(settings_hash, row['iperf_result']['fairness']), RuntimeWarning)
            columns = columns | set(row.keys())
            rows.append(row)

        db.create(*columns, mode='override')
        for r in rows:
            db.insert(**r)
        db.commit()
        warnings.warn('Results cache built.')
    else:
        warnings.warn('Reusing results cache.')
        db.open()

    return db
开发者ID:serl,项目名称:topoblocktest,代码行数:40,代码来源:test_master.py

示例15: likeconvert

# 需要导入模块: from pydblite import Base [as 别名]
# 或者: from pydblite.Base import commit [as 别名]
def likeconvert(likesRoot):
    histPath = likesRoot + '/history'
    convertcsv2db(likesRoot + '/totals.csv', likesRoot + '/likes.pdl')
    db = Base(likesRoot + '/likes.pdl')
    db.open()
    db.add_field('history', "")
    db.add_field('liked', "")
    dirContents = os.listdir(histPath)
    histFiles = []

    for File in dirContents:
        if ".csv" in File:
            histFiles.append(File)
    for histFile in histFiles:
        try:
            csvfile = open(histPath + '/' + histFile, 'rb')
            reader = csv.DictReader(csvfile)
            for row in reader:
                if histFile.endswith('history.csv'):
                    recName = histFile[:-11]
                    print(recName)
                if db(userID=recName):
                    rec = db(userID=recName).pop()
                    if not rec['liked']:
                        db.update(rec, liked=row['liked'])
                    else:
                        tmpLiked = rec['liked']
                        tmpLiked += " " + row['liked']
                        db.update(rec, liked=tmpLiked)
                    if not rec['history']:
                        db.update(rec, history=row['messageID'])
                    else:
                        tmpHist = rec['history']
                        tmpHist += " " + row['messageID']
                        db.update(rec, history=tmpHist)
                db.commit()
        except csv.Error:
                print("Could not open CSV file")
开发者ID:noisemaster,项目名称:AdamTestBot,代码行数:40,代码来源:utils.py


注:本文中的pydblite.Base.commit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。