本文整理汇总了Python中airflow.contrib.hooks.bigquery_hook.BigQueryHook.get_conn方法的典型用法代码示例。如果您正苦于以下问题:Python BigQueryHook.get_conn方法的具体用法?Python BigQueryHook.get_conn怎么用?Python BigQueryHook.get_conn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类airflow.contrib.hooks.bigquery_hook.BigQueryHook
的用法示例。
在下文中一共展示了BigQueryHook.get_conn方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
if not self.schema_fields and self.gcs_schema_object:
gcs_bucket, gcs_object = _parse_gcs_url(self.gcs_schema_object)
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
gcs_bucket,
gcs_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.create_empty_table(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=schema_fields,
time_partitioning=self.time_partitioning,
labels=self.labels
)
示例2: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
gcs_hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
schema_fields = self.schema_fields if self.schema_fields else json.loads(gcs_hook.download(self.bucket, self.schema_object))
source_uris = map(lambda schema_object: 'gs://{}/{}'.format(self.bucket, schema_object), self.source_objects)
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.run_load(
destination_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
create_disposition=self.create_disposition,
skip_leading_rows=self.skip_leading_rows,
write_disposition=self.write_disposition,
field_delimiter=self.field_delimiter)
if self.max_id_key:
cursor.execute('SELECT MAX({}) FROM {}'.format(self.max_id_key, self.destination_project_dataset_table))
row = cursor.fetchone()
max_id = row[0] if row[0] else 0
logging.info('Loaded BQ data with max {}.{}={}'.format(self.destination_project_dataset_table, self.max_id_key, max_id))
return max_id
示例3: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
self.log.info('Fetching Data from:')
self.log.info('Dataset: %s ; Table: %s ; Max Results: %s',
self.dataset_id, self.table_id, self.max_results)
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
response = cursor.get_tabledata(dataset_id=self.dataset_id,
table_id=self.table_id,
max_results=self.max_results,
selected_fields=self.selected_fields)
self.log.info('Total Extracted rows: %s', response['totalRows'])
rows = response['rows']
table_data = []
for dict_row in rows:
single_row = []
for fields in dict_row['f']:
single_row.append(fields['v'])
table_data.append(single_row)
return table_data
示例4: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
logging.info('Deleting: %s', self.deletion_dataset_table)
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_table_delete(self.deletion_dataset_table, self.ignore_if_missing)
示例5: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
logging.info('Executing: %s', self.bql)
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_query(self.bql, self.destination_dataset_table, self.write_disposition,
self.allow_large_results, self.udf_config, self.use_legacy_sql)
示例6: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
logging.info('Executing copy of %s into: %s', self.source_project_dataset_tables, self.destination_project_dataset_table)
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id, delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_copy(
self.source_project_dataset_tables,
self.destination_project_dataset_table,
self.write_disposition,
self.create_disposition)
示例7: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
if(self.bq_cursor == None):
self.log.info('Executing: %s', self.bql)
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
conn = hook.get_conn()
self.bq_cursor = conn.cursor()
self.bq_cursor.run_query(self.bql, self.destination_dataset_table, self.write_disposition,
self.allow_large_results, self.udf_config,
self.use_legacy_sql, self.maximum_billing_tier,
self.create_disposition, self.query_params)
示例8: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
logging.info('Executing extract of %s into: %s', self.source_project_dataset_table, self.destination_cloud_storage_uris)
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id, delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_extract(
self.source_project_dataset_table,
self.destination_cloud_storage_uris,
self.compression,
self.export_format,
self.field_delimiter,
self.print_header)
示例9: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
bq_hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to)
if not self.schema_fields and self.schema_object \
and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
schema_fields = json.loads(gcs_hook.download(
self.bucket,
self.schema_object).decode("utf-8"))
else:
schema_fields = self.schema_fields
source_uris = ['gs://{}/{}'.format(self.bucket, source_object)
for source_object in self.source_objects]
conn = bq_hook.get_conn()
cursor = conn.cursor()
cursor.run_load(
destination_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
create_disposition=self.create_disposition,
skip_leading_rows=self.skip_leading_rows,
write_disposition=self.write_disposition,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
schema_update_options=self.schema_update_options,
src_fmt_configs=self.src_fmt_configs,
time_partitioning=self.time_partitioning)
if self.max_id_key:
cursor.execute('SELECT MAX({}) FROM {}'.format(
self.max_id_key,
self.destination_project_dataset_table))
row = cursor.fetchone()
max_id = row[0] if row[0] else 0
self.log.info(
'Loaded BQ data with max %s.%s=%s',
self.destination_project_dataset_table, self.max_id_key, max_id
)
return max_id
示例10: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
if self.bq_cursor is None:
self.log.info('Executing: %s', self.bql)
hook = BigQueryHook(
bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql,
delegate_to=self.delegate_to)
conn = hook.get_conn()
self.bq_cursor = conn.cursor()
self.bq_cursor.run_query(
self.bql,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
create_disposition=self.create_disposition,
query_params=self.query_params,
schema_update_options=self.schema_update_options)
示例11: execute
# 需要导入模块: from airflow.contrib.hooks.bigquery_hook import BigQueryHook [as 别名]
# 或者: from airflow.contrib.hooks.bigquery_hook.BigQueryHook import get_conn [as 别名]
def execute(self, context):
logging.info('Executing: %s', str(self.bql))
hook = BigQueryHook(bigquery_conn_id=self.bigquery_conn_id, delegate_to=self.delegate_to)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_query(self.bql, self.destination_dataset_table, self.write_disposition)