当前位置: 首页>>代码示例>>Python>>正文


Python Session.flush方法代码示例

本文整理汇总了Python中database.Session.flush方法的典型用法代码示例。如果您正苦于以下问题:Python Session.flush方法的具体用法?Python Session.flush怎么用?Python Session.flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在database.Session的用法示例。


在下文中一共展示了Session.flush方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: on_return

# 需要导入模块: from database import Session [as 别名]
# 或者: from database.Session import flush [as 别名]
    def on_return(self, task):
        """Called by main thread on the return of data from the workers.
        Post-processing"""
        logger.info("Retrieved task %s", task.tag)
        traj = Session.query(models.Trajectory).get(int(task.tag))

        try:
            # save lh5 version of the trajectory
            conf = load_file(self.project.pdb_topology_file)
            coordinates = msmbuilder.Trajectory.load_trajectory_file(str(traj.dry_xtc_fn), Conf=conf)
            save_file(traj.lh5_fn, coordinates)

        except Exception as e:
            logger.error("When postprocessing %s, convert to lh5 failed!", traj)
            logger.exception(e)
            raise

        # convert last_wet_snapshot to lh5
        pdb_to_lh5(traj, "last_wet_snapshot_fn")
        pdb_to_lh5(traj, "init_pdb_fn")

        traj.host = task.host
        traj.returned_time = datetime.now()
        traj.length = len(coordinates)
        Session.flush()
        Session.commit()
        logger.info("Finished converting new traj to lh5 sucessfully")
开发者ID:rmcgibbo,项目名称:msmaccelerator,代码行数:29,代码来源:QMaster.py

示例2: submit

# 需要导入模块: from database import Session [as 别名]
# 或者: from database.Session import flush [as 别名]
    def submit(self, traj):
        """ Submit a job to the work-queue for further sampling.
        
        Parameters
        ----------
        """
        if traj.submit_time is not None:
            raise ValueError("This traj has already been submitted")
        Session.add(traj)
        Session.flush()
        traj.populate_default_filenames()

        if not hasattr(traj, "init_pdb"):
            raise ValueError("Traj is supposed to have a pdb object tacked on")
        save_file(traj.init_pdb_fn, traj.init_pdb)

        remote_driver_fn = os.path.split(str(traj.forcefield.driver))[1]
        remote_pdb_fn = "input.pdb"
        remote_output_fn = "production_dry{}".format(traj.forcefield.output_extension)

        if traj.mode is None or traj.forcefield is None:
            raise ValueError("malformed traj")

        task = Task(
            "python ./{driver} {pdb_fn} {ff} {water} {mode} {threads}".format(
                pdb_fn=remote_pdb_fn,
                mode=traj.mode,
                driver=remote_driver_fn,
                ff=traj.forcefield.name,
                water=traj.forcefield.water,
                threads=traj.forcefield.threads,
            )
        )

        # why does traj.forcefield.driver come out as unicode?
        task.specify_input_file(str(traj.forcefield.driver), remote_driver_fn)
        task.specify_output_file(traj.wqlog_fn, "logs/driver.log")
        task.specify_input_file(traj.init_pdb_fn, remote_pdb_fn)
        task.specify_output_file(traj.dry_xtc_fn, remote_output_fn)

        if self.return_wet_xtc:
            # this is the XTC file with waters, generated by the driver
            # when you're doing implicit solvent only, this stuff is not used.
            remote_wet_output_fn = "production_wet{}".format(traj.forcefield.output_extension)
            task.specify_output_file(traj.wet_xtc_fn, remote_wet_output_fn)
            task.specify_output_file(traj.last_wet_snapshot_fn, "last_wet_snapshot.pdb")
        else:
            logger.debug("Not requesting production_wet%s from driver (implicit)", traj.forcefield.output_extension)

        task.specify_tag(str(traj.id))
        task.specify_algorithm(WORK_QUEUE_SCHEDULE_FILES)  # what does this do?

        traj.submit_time = datetime.now()

        # need to do a commit from this the qmaster, since this is a different
        # session
        Session.commit()
        self.wq.submit(task)
        logger.info("Submitted to queue: %s", traj)
开发者ID:rmcgibbo,项目名称:msmaccelerator,代码行数:61,代码来源:QMaster.py

示例3: edit_month

# 需要导入模块: from database import Session [as 别名]
# 或者: from database.Session import flush [as 别名]
def edit_month(year, month, single_day=None):
    session = Session()
    bible_query = BibleQuery()
    lit_years = {}

    editor = Editor()

    # From http://stackoverflow.com/questions/15120346/emacs-setting-comment-character-by-file-extension
    PrependStream(editor.tempfile, '# ').write(u'-*- coding: utf-8; comment-start: "#"; -*-\n')
    editor.tempfile.write(u'\n')

    def push_day(day):
        date = datetime.date(year, month, day)
        lit_date = get_lit_date(date, lit_years, session)
        events = map(lambda x: x[1], lit_date.competitors)
        print_lit_date(lit_date, PrependStream(editor.tempfile, u'# '), with_id=True)
        editor.tempfile.write(u'\n')
        editor.tempfile.write(json.dumps(map(lambda x: x.as_dict(), events), ensure_ascii=False, indent=2, sort_keys=True) + u'\n')
        editor.tempfile.write(u'---===---\n')
        editor.tempfile.write(u'\n')

    if single_day is not None:
        push_day(single_day)
    else:
        for day in real_itermonthdays(year, month):
            push_day(day)

    editor.edit()

    while True:
        lines = filter(lambda x: not x.startswith(u'#'), editor.edited_content)
        buf = u''

        try:
            for line in lines:
                if line.strip() == u'---===---':
                    data = json.loads(buf)
                    for piece in data:
                        from_dict(piece, session)
                    buf = u''
                else:
                    buf += line
            session.flush()

        except:
            traceback.print_exc()
            sys.stdout.write("Error while parsing new content. Re-edit? [Y/n] ")
            answer = sys.stdin.readline().strip()
            if answer != '':
                answer = answer[0]
            if answer == 'n' or answer == 'N':
                sys.stdout.write("Aborting\n")
                sys.exit(0)
            else:
                sys.stdout.write("Re-editing...\n")
                session.rollback()
                edited_content = editor.edited_content
                editor = Editor()
                editor.tempfile.write("".join(edited_content))
                editor.edit()

        else:
            break

    if editor.confirmation_request(session_has_pending_commit(session)):
        #reading.text = new_text
        session.commit()
    else:
        session.rollback()
开发者ID:giomasce,项目名称:liturgy,代码行数:71,代码来源:edit_month.py

示例4: run_round

# 需要导入模块: from database import Session [as 别名]
# 或者: from database.Session import flush [as 别名]
def run_round(checkdata=True):
    """Activate the builder and build new MSMs (if necessary)
    
    First, check to see if there is enough data are to warrant building a
    new set of MSMs. Assuming yes, do a joint clustering over all of the
    data, and then build MSMs for each forcefield on that state space.
    
    Parameters
    ----------
    checkdata : boolean, optional
         If False, skip the checking process
    
    Returns
    -------
    happened : boolean
        True if we actually did a round of MSM building, False otherwise
    """
        
    if checkdata:
        logger.info("Checking if sufficient data has been acquired.")
        if not is_sufficient_new_data():
            return False
    else:
        logger.info("Skipping check for adequate data.")
        
    # use all the data together to get the cluster centers
    generators, db_trajs = joint_clustering()
        
    msmgroup = MSMGroup(trajectories=db_trajs)
    for ff in Session.query(Forcefield).all():
        trajs = filter(lambda t: t.forcefield == ff, db_trajs)
        msm = build_msm(ff, generators=generators, trajs=trajs)
        msmgroup.markov_models.append(msm)
        
    # add generators to msmgroup
    Session.add(msmgroup)
    Session.flush()
    msmgroup.populate_default_filenames()
        
    msmgroup.trajectories = db_trajs
    msmgroup.n_states = len(generators)
    save_file(msmgroup.generators_fn, generators)

        
    for msm in msmgroup.markov_models:
        msm.populate_default_filenames()
        if hasattr(msm, 'counts'):
            save_file(msm.counts_fn, msm.counts)
        if hasattr(msm, 'assignments'):
            save_file(msm.assignments_fn, msm.assignments)
        if hasattr(msm, 'distances'):
            save_file(msm.distances_fn, msm.distances)
            save_file(msm.inverse_assignments_fn, dict(MSMLib.invert_assignments(msm.assignments)))
        

    # ======================================================================#
    # HERE IS WHERE THE ADAPTIVE SAMPLING ALGORITHMS GET CALLED
    # The obligation of the adaptive_sampling routine is to set the
    # model_selection_weight on each MSM/forcefield and the microstate
    # selection weights
    # check to make sure that the right fields were populated
    try:
        Project().adaptive_sampling(Session, msmgroup)
        
        for msm in msmgroup.markov_models:
            if not isinstance(msm.model_selection_weight, numbers.Number):
                raise ValueError('model selection weight on %s not set correctly' % msm)
            if not isinstance(msm.microstate_selection_weights, np.ndarray):
                raise ValueError('microstate_selection_weights on %s not set correctly' % msm)
    except Exception as e:
        logging.error('ADAPTIVE SAMPLING ERROR')
        logging.error(e)
        sampling.default(Session, msmgroup)
        
    #=======================================================================#

        
    Session.flush()       
    logger.info("Round completed sucessfully")
    return True
开发者ID:rmcgibbo,项目名称:msmaccelerator,代码行数:82,代码来源:Builder.py

示例5: import_from_scrape

# 需要导入模块: from database import Session [as 别名]
# 或者: from database.Session import flush [as 别名]
def import_from_scrape(year, month):
    lit_years = {}
    session = Session()

    for day in real_itermonthdays(year, month):
        date = datetime.date(year, month, day)
        print >> sys.stderr, "Importing %s..." % (date)
        lit_date = get_lit_date(date, lit_years, session)

        # Check if we already have a mass here
        try:
            lit_date.get_masses(strict=False)
        except SelectingMassException:
            pass
        else:
            print >> sys.stderr, "  * skipping because a valid mass already exists"
            continue

        winner = lit_date.get_winner(remove_ok=True)
        if winner is None:
            print >> sys.stderr, "  * skipping because there are no masses today"
            continue

        event = winner[1]
        with open(os.path.join('scrape', '%04d-%02d-%02d.html' % (year, month, day))) as fhtml:
            quotes = scrape_file(fhtml)

        if u'auto' not in event.status.split(u' '):
            event.status += u' auto'

        mass = Mass()
        mass.order = 0
        mass.event = event
        mass.digit = lit_date.digit
        mass.letter = lit_date.letter
        mass.title = None
        mass.status = u'auto'
        session.add(mass)

        order = 0
        if len(quotes) == 4:
            titles = [u'Prima lettura', u'Salmo responsoriale', u'Seconda lettura', u'Vangelo']
        elif len(quotes) == 3:
            titles = [u'Prima lettura', u'Salmo responsoriale', u'Vangelo']
        # Domenica delle Palme
        elif len(quotes) == 5:
            titles = [u'Vangelo delle Palme', u'Prima lettura', u'Salmo responsoriale', u'Seconda lettura', u'Vangelo']
        # Pasqua
        elif len(quotes) == 17:
            titles = [u'Prima lettura',
                      u'Salmo responsoriale',
                      u'Seconda lettura',
                      u'Salmo responsoriale',
                      u'Terza lettura',
                      u'Salmo responsoriale',
                      u'Quarta lettura',
                      u'Salmo responsoriale',
                      u'Quinta lettura',
                      u'Salmo responsoriale',
                      u'Sesta lettura',
                      u'Salmo responsoriale',
                      u'Settima lettura',
                      u'Salmo responsoriale',
                      u'Epistola',
                      u'Salmo responsoriale',
                      u'Vangelo']
        else:
            raise Exception('Strange number of readings (%d)' % (len(quotes)))

        for (quote, text), title in zip(quotes, titles):
            reading = Reading()
            reading.order = order
            order += 1
            reading.alt_num = 0
            reading.mass = mass
            reading.title = title
            reading.quote = canonicalise_quote(quote)
            reading.text = text
            try:
                decode_quote(quote, allow_only_chap=True, valid_abbr=ABBR_VATICAN)
            except:
                reading.quote_status = u'auto invalid'
            else:
                reading.quote_status = u'auto'
            if text is None:
                reading.text_status = u'missing'
            else:
                reading.text_status = u'auto'
            session.add(reading)

        session.flush()

        # Write some interesting things
        #print '#'
        #print_lit_date(lit_date, PrependStream(sys.stdout, '# '))
        #print
        #print json.dumps(event.as_dict(), encoding='utf-8', ensure_ascii=False, indent=2, sort_keys=True)
        #print

    session.commit()
#.........这里部分代码省略.........
开发者ID:giomasce,项目名称:liturgy,代码行数:103,代码来源:import_month.py


注:本文中的database.Session.flush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。