当前位置: 首页>>代码示例>>Python>>正文


Python GlobalDB.db方法代码示例

本文整理汇总了Python中dataactcore.interfaces.db.GlobalDB.db方法的典型用法代码示例。如果您正苦于以下问题:Python GlobalDB.db方法的具体用法?Python GlobalDB.db怎么用?Python GlobalDB.db使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在dataactcore.interfaces.db.GlobalDB的用法示例。


在下文中一共展示了GlobalDB.db方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def main():
    sess = GlobalDB.db().session
    start = datetime.datetime.now()
    logger.info("FPDS IDV delete started")

    # get and read the file
    del_file = get_delete_file()
    data = pd.read_csv(del_file, dtype=str, encoding='utf_8_sig')

    # Clean up the data so it's usable
    data = clean_delete_data(data)

    # Gather list of records to delete
    gather_start = datetime.datetime.now()
    logger.info("Starting gathering of records to delete.")
    delete_list, delete_dict = get_deletes(sess, data)
    gather_end = datetime.datetime.now()
    logger.info("Finished gathering records in {} seconds. Total records to delete: {}".format(gather_end-gather_start,
                                                                                               len(delete_list)))
    # Delete records
    logger.info("Deleting records")
    delete_records(sess, delete_list, delete_dict)
    sess.commit()

    end = datetime.datetime.now()
    logger.info("FPDS IDV delete finished in %s seconds", end-start)
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:28,代码来源:delete_deleted_fpds_idv.py

示例2: insert_from_table

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def insert_from_table(file_type_id, submission_id):
    """ Insert the data from the base staging table into the corresponding Certified table.

        Params:
            file_type_id:  Database file type ID for files A, B, or C
            submission_id: Database ID for the submission being loaded
    """
    sess = GlobalDB.db().session
    logger.info('Copying submission {} data from base table into {}'.format(submission_id,
                                                                            FTI_TABLENAME_DICT[file_type_id]))

    table_type = FTI_BASETABLE_DICT[file_type_id].__table__.name
    column_list = [col.key for col in FTI_BASETABLE_DICT[file_type_id].__table__.columns]
    column_list.remove('created_at')
    column_list.remove('updated_at')
    column_list.remove(table_type + '_id')
    col_string = ", ".join(column_list)

    executed = sess.execute(
        "INSERT INTO certified_{} (created_at, updated_at, {}) "
        "SELECT NOW() AS created_at, NOW() AS updated_at, {} "
        "FROM {} "
        "WHERE submission_id={}".format(table_type, col_string, col_string, table_type, submission_id))
    sess.commit()

    logger.info('Loaded {} records into the {} table'.format(executed.rowcount, FTI_TABLENAME_DICT[file_type_id]))
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:28,代码来源:load_historical_certified_dabs.py

示例3: update_offices

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def update_offices(csv_path):
    """ Load office data from the provided CSV and replace/insert any office lookups.

        Args:
            csv_path: path/url to the file to read from
    """
    sess = GlobalDB.db().session

    data = clean_office(csv_path)
    add_existing_id(data)

    old_data = data[data['existing_id'].notnull()]
    del old_data['existing_id']

    new_data = data[data['existing_id'].isnull()]
    del new_data['existing_id']

    # instead of using the pandas to_sql dataframe method like some of the other domain load processes, iterate through
    # the dataframe rows so we can load using the orm model (note: toyed with the SQLAlchemy bulk load options but
    # ultimately decided not to go outside the unit of work for the sake of a performance gain)
    for _, row in old_data.iterrows():
        sess.query(FPDSContractingOffice).filter_by(contracting_office_code=row['contracting_office_code'])\
            .update(row, synchronize_session=False)

    for _, row in new_data.iterrows():
        sess.add(FPDSContractingOffice(**row))

    sess.commit()
    logger.info('%s records in CSV, %s existing', len(data.index), sum(data['existing_id'].notnull()))
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:31,代码来源:load_offices.py

示例4: list_all_agencies

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
 def list_all_agencies():
     sess = GlobalDB.db().session
     cgacs = sess.query(CGAC).all()
     agency_list = [{'agency_name': cgac.agency_name, 'cgac_code': cgac.cgac_code} for cgac in cgacs]
     frecs = sess.query(FREC).all()
     shared_list = [{'agency_name': frec.agency_name, 'frec_code': frec.frec_code} for frec in frecs]
     return JsonResponse.create(StatusCode.OK, {'agency_list': agency_list, 'shared_agency_list': shared_list})
开发者ID:tony-sappe,项目名称:data-act-broker-backend,代码行数:9,代码来源:domainRoutes.py

示例5: retrieve_cached_file_generation

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def retrieve_cached_file_generation(job, agency_type, agency_code):
    """ Retrieves a cached FileGeneration for the D file request, if there is one.

        Args:
            job: Upload Job for the generation file
            agency_type: Type of Agency to generate files by: "awarding" or "funding"
            agency_code: Agency code to generate file for

        Returns:
            FileGeneration object matching the criteria, or None
    """
    sess = GlobalDB.db().session
    logger.info({'message': 'Checking for a cached FileGeneration to pull file from', 'message_type': 'BrokerInfo',
                 'submission_id': job.submission_id, 'job_id': job.job_id, 'file_type': job.file_type.letter_name})

    # find current date and date of last FPDS pull
    current_date = datetime.now().date()
    last_update = sess.query(FPDSUpdate).one_or_none()
    fpds_date = last_update.update_date if last_update else current_date

    # check if a cached FileGeneration already exists using these criteria
    file_generation = None
    file_gen = sess.query(FileGeneration).filter(
        FileGeneration.start_date == job.start_date, FileGeneration.end_date == job.end_date,
        FileGeneration.agency_code == agency_code,  FileGeneration.agency_type == agency_type,
        FileGeneration.file_type == job.file_type.letter_name, FileGeneration.is_cached_file.is_(True)).one_or_none()

    if file_gen and (file_gen.file_type == 'D1' and file_gen.request_date < fpds_date):
        # Uncache expired D1 FileGeneration
        file_gen.is_cached_file = False
        sess.commit()
    elif file_gen:
        file_generation = file_gen

    return file_generation
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:37,代码来源:generation_helper.py

示例6: get_fabs_meta

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def get_fabs_meta(submission_id):
    """Return the total rows, valid rows, publish date, and publish file for FABS submissions"""
    sess = GlobalDB.db().session

    # get row counts from the DetachedAwardFinancialAssistance table
    dafa = DetachedAwardFinancialAssistance
    total_rows = sess.query(dafa).filter(dafa.submission_id == submission_id)
    valid_rows = total_rows.filter(dafa.is_valid)

    # retrieve the published data and file
    submission = sess.query(Submission).filter(Submission.submission_id == submission_id).one()
    publish_date, published_file = None, None
    certify_data = get_lastest_certified_date(submission, is_fabs=True)

    try:
        iter(certify_data)
    except TypeError:
        publish_date = certify_data
    else:
        publish_date, file_path = certify_data
        if CONFIG_BROKER["use_aws"] and file_path:
            path, file_name = file_path.rsplit('/', 1)  # split by last instance of /
            published_file = S3Handler().get_signed_url(path=path, file_name=file_name,
                                                        bucket_route=CONFIG_BROKER['certified_bucket'],
                                                        url_mapping=CONFIG_BROKER["certified_bucket_mapping"],
                                                        method="get_object")
        elif file_path:
            published_file = file_path

    return {
        'valid_rows': valid_rows.count(),
        'total_rows': total_rows.count(),
        'publish_date': publish_date.strftime('%-I:%M%p %m/%d/%Y') if publish_date else None,
        'published_file': published_file
    }
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:37,代码来源:function_bag.py

示例7: submit_files

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
    def submit_files():
        file_manager = FileHandler(request, is_local=is_local, server_path=server_path)

        sess = GlobalDB.db().session

        start_date = request.json.get('reporting_period_start_date')
        end_date = request.json.get('reporting_period_end_date')
        is_quarter = request.json.get('is_quarter_format', False)

        if not (start_date is None or end_date is None):
            formatted_start_date, formatted_end_date = FileHandler.check_submission_dates(start_date,
                                                                                          end_date, is_quarter)

            submissions = sess.query(Submission).filter(
                Submission.cgac_code == request.json.get('cgac_code'),
                Submission.frec_code == request.json.get('frec_code'),
                Submission.reporting_start_date == formatted_start_date,
                Submission.reporting_end_date == formatted_end_date,
                Submission.is_quarter_format == request.json.get('is_quarter'),
                Submission.publish_status_id != PUBLISH_STATUS_DICT['unpublished'])

            if 'existing_submission_id' in request.json:
                submissions.filter(Submission.submission_id !=
                                   request.json['existing_submission_id'])

            submissions = submissions.order_by(desc(Submission.created_at))

            if submissions.count() > 0:
                data = {
                        "message": "A submission with the same period already exists.",
                        "submissionId": submissions[0].submission_id
                }
                return JsonResponse.create(StatusCode.CLIENT_ERROR, data)

        return file_manager.submit(create_credentials)
开发者ID:tony-sappe,项目名称:data-act-broker-backend,代码行数:37,代码来源:fileRoutes.py

示例8: submission_procurements

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def submission_procurements(submission_id):
    """Fetch procurements and subcontracts"""
    sess = GlobalDB.db().session

    logger.debug('Starting submission procurements')

    award_proc_sub = sess.query(AwardProcurement.piid, AwardProcurement.parent_award_id,
                                AwardProcurement.naics_description, AwardProcurement.awarding_sub_tier_agency_c,
                                AwardProcurement.submission_id).\
        filter(AwardProcurement.submission_id == submission_id).distinct().cte("award_proc_sub")

    results = sess.query(award_proc_sub, FSRSProcurement, FSRSSubcontract).\
        filter(FSRSProcurement.contract_number == award_proc_sub.c.piid).\
        filter(FSRSProcurement.idv_reference_number.isnot_distinct_from(award_proc_sub.c.parent_award_id)).\
        filter(FSRSProcurement.contracting_office_aid == award_proc_sub.c.awarding_sub_tier_agency_c).\
        filter(FSRSSubcontract.parent_id == FSRSProcurement.id)

    # The cte returns a set of columns, not an AwardProcurement object, so we have to unpack each column
    for award_piid, award_parent_id, award_naics_desc, award_sub_tier, award_sub_id, proc, sub in results:
        # need to combine those columns again here so we can get a proper ModelRow
        award = AwardProcurement(piid=award_piid, parent_award_id=award_parent_id, naics_description=award_naics_desc,
                                 awarding_sub_tier_agency_c=award_sub_tier, submission_id=award_sub_id)
        yield ModelRow(award, proc, sub, naics_desc=award.naics_description)

    logger.debug('Finished submission procurements')
开发者ID:tony-sappe,项目名称:data-act-broker-backend,代码行数:27,代码来源:fileF.py

示例9: main

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def main():
    parser = argparse.ArgumentParser(description='Update contract transaction and/or fabs rows based on updates to the '
                                                 'agency list')
    parser.add_argument('-a', '--missing_agency', help='Perform an update on 999 agency codes', action='store_true',
                        required=False, default=False)
    parser.add_argument('-s', '--subtier_code', help='Select specific subtier to update. Must be a 4-digit code',
                        type=str, required=False)
    parser.add_argument('-t', '--tables', help='Which tables (fabs, fpds, or both) to update. Defaults to both.',
                        required=False, default='both', choices=['fabs', 'fpds', 'both'])
    args = parser.parse_args()

    if not args.subtier_code and not args.missing_agency:
        logger.error('Missing either subtier_code or missing_agency argument')
    elif args.subtier_code and len(args.subtier_code) != 4:
        logger.error('Subtier not a correct format, must be 4 digits')
    else:
        sess = GlobalDB.db().session

        if args.tables in ('fpds', 'both'):
            update_table(sess, 'awarding', 'detached_award_procurement', args)
            update_table(sess, 'funding', 'detached_award_procurement', args)
            logger.info("Procurement Update Complete")

        if args.tables in ('fabs', 'both'):
            update_table(sess, 'awarding', 'published_award_financial_assistance', args)
            update_table(sess, 'funding', 'published_award_financial_assistance', args)
            logger.info("Award Financial Assistance Update Complete")
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:29,代码来源:update_transaction_agency_codes.py

示例10: get_submission_data

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def get_submission_data(submission, file_type=''):
    """ Get data for the submission specified

        Args:
            submission: submission to retrieve metadata for
            file_type: the type of job to retrieve metadata for

        Returns:
            JsonResponse containing the error information or the object containing metadata for all relevant file types
    """
    sess = GlobalDB.db().session
    file_type = file_type.lower()

    # Make sure the file type provided is valid
    if file_type and file_type not in FILE_TYPE_DICT and file_type != 'cross':
        return JsonResponse.error(ValueError(file_type + ' is not a valid file type'), StatusCode.CLIENT_ERROR)

    # Make sure the file type provided is valid for the submission type
    is_fabs = submission.d2_submission
    if file_type and (is_fabs and file_type != 'fabs') or (not is_fabs and file_type == 'fabs'):
        return JsonResponse.error(ValueError(file_type + ' is not a valid file type for this submission'),
                                  StatusCode.CLIENT_ERROR)

    job_query = sess.query(Job).filter(Job.submission_id == submission.submission_id)
    if not file_type:
        relevant_job_types = (JOB_TYPE_DICT['csv_record_validation'], JOB_TYPE_DICT['validation'])
        job_query = job_query.filter(Job.job_type_id.in_(relevant_job_types))
    elif file_type == 'cross':
        job_query = job_query.filter(Job.job_type_id == JOB_TYPE_DICT['validation'])
    else:
        job_query = job_query.filter(Job.file_type_id == FILE_TYPE_DICT[file_type])

    job_dict = {'jobs': [job_to_dict(job) for job in job_query]}
    return JsonResponse.create(StatusCode.OK, job_dict)
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:36,代码来源:submission_handler.py

示例11: create_generation_job

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def create_generation_job(file_type_name, start_date, end_date):
    """ Add details to jobs for generating files

        Args:
            file_type_name: the name of the file type being generated
            job: the generation job, None if it is a detached generation
            start_date: The start date for the generation job, only used for detached files
            end_date: The end date for the generation job, only used for detached files

        Returns:
            the file generation job
    """
    sess = GlobalDB.db().session

    # Create a new job for a detached generation
    job = Job(job_type_id=lookups.JOB_TYPE_DICT['file_upload'], user_id=g.user.user_id,
              file_type_id=lookups.FILE_TYPE_DICT[file_type_name], start_date=start_date, end_date=end_date)
    sess.add(job)

    # Update the job details
    job.message = None
    job.job_status_id = lookups.JOB_STATUS_DICT["ready"]
    sess.commit()
    sess.refresh(job)

    return job
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:28,代码来源:generation_helper.py

示例12: read_zips

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def read_zips():
    with create_app().app_context():
        sess = GlobalDB.db().session

        # delete old values in case something changed and one is now invalid
        sess.query(Zips).delete(synchronize_session=False)
        sess.commit()

        if CONFIG_BROKER["use_aws"]:
            s3connection = boto.s3.connect_to_region(CONFIG_BROKER['aws_region'])
            s3bucket = s3connection.lookup(CONFIG_BROKER['sf_133_bucket'])
            zip_folder = CONFIG_BROKER["zip_folder"] + "/"
            for key in s3bucket.list(prefix=zip_folder):
                if key.name != zip_folder:
                    zip_4_file_path = key.generate_url(expires_in=600)
                    parse_zip4_file(urllib.request.urlopen(zip_4_file_path), sess)

            # parse remaining 5 digit zips that weren't in the first file
            citystate_file = s3bucket.get_key("ctystate.txt").generate_url(expires_in=600)
            parse_citystate_file(urllib.request.urlopen(citystate_file), sess)
        else:
            base_path = os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config", CONFIG_BROKER["zip_folder"])
            # creating the list while ignoring hidden files on mac
            file_list = [f for f in os.listdir(base_path) if not re.match('^\.', f)]
            for file in file_list:
                parse_zip4_file(open(os.path.join(base_path, file)), sess)

            # parse remaining 5 digit zips that weren't in the first file
            citystate_file = os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config", "ctystate.txt")
            parse_citystate_file(open(citystate_file), sess)

        logger.info("Zipcode script complete")
开发者ID:tony-sappe,项目名称:data-act-broker-backend,代码行数:34,代码来源:readZips.py

示例13: start_a_generation

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def start_a_generation(job, start_date, end_date, agency_code):
    """ Validates the start and end dates of the generation and sends the job information to SQS.

        Args:
            job: File generation job to start
            start_date: String to parse as the start date of the generation
            end_date: String to parse as the end date of the generation
            agency_code: Agency code for A file generations
    """
    if not (StringCleaner.is_date(start_date) and StringCleaner.is_date(end_date)):
        raise ResponseException("Start or end date cannot be parsed into a date of format MM/DD/YYYY",
                                StatusCode.CLIENT_ERROR)

    # Update the Job's start and end dates
    sess = GlobalDB.db().session
    job.start_date = start_date
    job.end_date = end_date
    sess.commit()

    mark_job_status(job.job_id, "waiting")

    file_type = job.file_type.letter_name
    log_data = {'message': 'Sending {} file generation job {} to Validator in SQS'.format(file_type, job.job_id),
                'message_type': 'BrokerInfo', 'job_id': job.job_id, 'file_type': file_type}
    logger.info(log_data)

    # Set SQS message attributes
    message_attr = {'agency_code': {'DataType': 'String', 'StringValue': agency_code}}

    # Add job_id to the SQS job queue
    queue = sqs_queue()
    msg_response = queue.send_message(MessageBody=str(job.job_id), MessageAttributes=message_attr)

    log_data['message'] = 'SQS message response: {}'.format(msg_response)
    logger.debug(log_data)
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:37,代码来源:generation_helper.py

示例14: load_frec

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def load_frec(file_name):
    """ Load FREC (high-level agency names) lookup table.

        Args:
            file_name: path/url to the file to be read
    """
    sess = GlobalDB.db().session
    models = {frec.frec_code: frec for frec in sess.query(FREC)}

    # read FREC values from csv
    data = pd.read_csv(file_name, dtype=str)

    # clean data
    data = clean_data(
        data,
        FREC,
        {"frec": "frec_code", "cgac_agency_code": "cgac_code", "frec_entity_description": "agency_name",
         "agency_abbreviation": "agency_abbreviation"},
        {"frec": {"keep_null": False}, "cgac_code": {"pad_to_length": 3}, "frec_code": {"pad_to_length": 4}}
    )
    # de-dupe
    data.drop_duplicates(subset=['frec_code'], inplace=True)
    # create foreign key dicts
    cgac_dict = {str(cgac.cgac_code): cgac.cgac_id for
                 cgac in sess.query(CGAC).filter(CGAC.cgac_code.in_(data["cgac_code"])).all()}

    # insert to db
    delete_missing_frecs(models, data)
    update_frecs(models, data, cgac_dict)
    sess.add_all(models.values())
    sess.commit()

    logger.info('%s FREC records inserted', len(models))
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:35,代码来源:load_agencies.py

示例15: load_cgac

# 需要导入模块: from dataactcore.interfaces.db import GlobalDB [as 别名]
# 或者: from dataactcore.interfaces.db.GlobalDB import db [as 别名]
def load_cgac(file_name):
    """ Load CGAC (high-level agency names) lookup table.

        Args:
            file_name: path/url to the file to be read
    """
    sess = GlobalDB.db().session
    models = {cgac.cgac_code: cgac for cgac in sess.query(CGAC)}

    # read CGAC values from csv
    data = pd.read_csv(file_name, dtype=str)
    # clean data
    data = clean_data(
        data,
        CGAC,
        {"cgac_agency_code": "cgac_code", "agency_name": "agency_name",
         "agency_abbreviation": "agency_abbreviation"},
        {"cgac_code": {"pad_to_length": 3}}
    )
    # de-dupe
    data.drop_duplicates(subset=['cgac_code'], inplace=True)

    delete_missing_cgacs(models, data)
    update_cgacs(models, data)
    sess.add_all(models.values())
    sess.commit()

    logger.info('%s CGAC records inserted', len(models))
开发者ID:fedspendingtransparency,项目名称:data-act-broker-backend,代码行数:30,代码来源:load_agencies.py


注:本文中的dataactcore.interfaces.db.GlobalDB.db方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。