本文整理汇总了Python中google.cloud.bigquery.Client方法的典型用法代码示例。如果您正苦于以下问题:Python bigquery.Client方法的具体用法?Python bigquery.Client怎么用?Python bigquery.Client使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类google.cloud.bigquery
的用法示例。
在下文中一共展示了bigquery.Client方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: connect
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def connect(self, params):
self.logger.info(f"Connect: Connecting...")
self.client = bigquery.Client(
project=params.get(Input.PROJECT_ID),
credentials=service_account.Credentials.from_service_account_info({
"type": "service_account",
"project_id": params.get(Input.PROJECT_ID),
"private_key_id": params.get(Input.PRIVATE_KEY_ID),
"private_key": params.get(Input.PRIVATE_KEY).get("privateKey").replace('\\n', "\n", -1),
"client_email": params.get(Input.CLIENT_EMAIL),
"client_id": params.get(Input.CLIENT_ID),
"auth_uri": params.get(Input.AUTH_URI),
"client_x509_cert_url": params.get(Input.CLIENT_X509_CERT_URL),
"token_uri": params.get(Input.TOKEN_URI, "https://oauth2.googleapis.com/token"),
"auth_provider_x509_cert_url": params.get(Input.AUTH_PROVIDER_X509_CERT_URL,
"https://www.googleapis.com/oauth2/v1/certs")
})
)
示例2: _upload_to_gcs
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def _upload_to_gcs(self, gcs_project_id, target_bucket_name, bucket_folder, filename):
'''upload CSV to file in GCS
Args:
gcs_project_id (str): project name
target_bucket_name (str): name of GCS bucket
bucket_folder (str): name of GCS folder
filename (str): filepath to upload
Returns:
nothing. Side effect is that data is uploaded to GCS
'''
storage_client = storage.Client(gcs_project_id)
bucket = storage_client.get_bucket(target_bucket_name)
path = bucket_folder + os.sep + filename
logging.info("Loading to GCS: %s", path)
blob = bucket.blob(path) #name in GCS
blob.upload_from_filename(filename)
示例3: __init__
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def __init__(self, logging_dir, gcp_project=None, credentials=None):
"""Initialized BigQueryUploader with proper setting.
Args:
logging_dir: string, logging directory that contains the benchmark log.
gcp_project: string, the name of the GCP project that the log will be
uploaded to. The default project name will be detected from local
environment if no value is provided.
credentials: google.auth.credentials. The credential to access the
BigQuery service. The default service account credential will be
detected from local environment if no value is provided. Please use
google.oauth2.service_account.Credentials to load credential from local
file for the case that the test is run out side of GCP.
"""
self._logging_dir = logging_dir
self._bq_client = bigquery.Client(
project=gcp_project, credentials=credentials)
示例4: _create_client_from_credentials
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def _create_client_from_credentials(self, credentials, default_query_job_config, project_id):
if project_id is None:
project_id = credentials.project_id
scopes = (
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/drive'
)
credentials = credentials.with_scopes(scopes)
self._add_default_dataset_to_job_config(default_query_job_config, project_id, self.dataset_id)
return bigquery.Client(
project=project_id,
credentials=credentials,
location=self.location,
default_query_job_config=default_query_job_config,
)
示例5: __init__
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def __init__(self, project_id, dataset_id=None, credentials=None):
"""Construct a BigQueryClient.
Parameters
----------
project_id : str
A project name
dataset_id : Optional[str]
A ``<project_id>.<dataset_id>`` string or just a dataset name
credentials : google.auth.credentials.Credentials
"""
(
self.data_project,
self.billing_project,
self.dataset,
) = parse_project_and_dataset(project_id, dataset_id)
self.client = bq.Client(
project=self.data_project, credentials=credentials
)
示例6: init_gcs
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def init_gcs():
is_user_secrets_token_set = "KAGGLE_USER_SECRETS_TOKEN" in os.environ
from google.cloud import storage
if not is_user_secrets_token_set:
return storage
from kaggle_gcp import get_integrations
if not get_integrations().has_gcs():
return storage
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
monkeypatch_client(
storage.Client,
KaggleKernelCredentials(target=GcpTarget.GCS))
return storage
示例7: load_to_gbq
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def load_to_gbq(client, data, bq_configuration):
"""
Loading data to BigQuery using *bq_configuration* settings.
"""
client = bigquery.Client(project = bq_configuration["project_id"])
dataset_ref = client.dataset(bq_configuration["dataset_id"])
table_ref = dataset_ref.table(bq_configuration["table"])
# determine uploading options
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = 'WRITE_TRUNCATE'
job_config.source_format = "NEWLINE_DELIMITED_JSON"
job_config.autodetect = True
load_job = client.load_table_from_file(
data,
table_ref,
job_config = job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
示例8: load_to_gbq
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def load_to_gbq(filename, bq_configuration):
"""
Loading data to BigQuery using *bq_configuration* settings.
"""
# construct Client object with the path to the table in which data will be stored
client = bigquery.Client(project = bq_configuration["project_id"])
dataset_ref = client.dataset(bq_configuration["dataset_id"])
table_ref = dataset_ref.table(bq_configuration["table"])
# determine uploading options
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = 'WRITE_TRUNCATE'
job_config.source_format = bq_configuration["source_format"]
job_config.autodetect = True
if bq_configuration["source_format"].upper() == "CSV":
job_config.skip_leading_rows = 1
# upload the file to BigQuery table
with open(filename, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, location = bq_configuration["location"], job_config = job_config)
job.result()
print("The Job " + job.job_id + " in status " + job.state + " for table " + bq_configuration["project_id"] + "." +
bq_configuration["dataset_id"] + "." + bq_configuration["table"] + ".")
os.remove(filename)
示例9: give_file_gbq
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def give_file_gbq(path_to_file, bq_configuration):
"""
Download file from *path_to_file* to BigQuery table using *bq_configuration* settings.
"""
# construct Client object with the path to the table in which data will be stored
client = bigquery.Client(project = bq_configuration["project_id"])
dataset_ref = client.dataset(bq_configuration["dataset_id"])
table_ref = dataset_ref.table(bq_configuration["table_id"])
# determine uploading options
job_config = bigquery.LoadJobConfig()
job_config.source_format = bq_configuration["source_format"].upper()
job_config.write_disposition = bq_configuration["write_disposition"]
if bq_configuration["source_format"].upper() == "CSV":
job_config.field_delimiter = bq_configuration["delimiter"]
job_config.skip_leading_rows = 1
job_config.autodetect = True
# upload the file to BigQuery table
with open(path_to_file, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, location = bq_configuration["location"], job_config = job_config)
job.result()
print("The Job " + job.job_id + " in status " + job.state + " for table " + bq_configuration["project_id"] + "." + bq_configuration["dataset_id"] + "." + bq_configuration["table_id"] + ".")
os.remove(path_to_file)
示例10: give_file_gbq
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def give_file_gbq(path_to_file, bq_configuration):
"""
Download file from *path_to_file* to BigQuery table using *bq_configuration* settings.
"""
# construct Client object with the path to the table in which data will be stored
client = bigquery.Client(project = bq_configuration["project_id"])
dataset_ref = client.dataset(bq_configuration["dataset_id"])
table_ref = dataset_ref.table(bq_configuration["table_id"])
# determine uploading options
job_config = bigquery.LoadJobConfig()
job_config.source_format = "NEWLINE_DELIMITED_JSON"
job_config.write_disposition = bq_configuration["write_disposition"]
job_config.autodetect = True
# upload the file to BigQuery table
with open(path_to_file, "rb") as source_file:
job = client.load_table_from_file(source_file, table_ref, location = bq_configuration["location"], job_config = job_config)
job.result()
print("The Job " + job.job_id + " in status " + job.state + " for table " + bq_configuration["project_id"] + "." + bq_configuration["dataset_id"] + "." + bq_configuration["table_id"] + ".")
示例11: _try_credentials
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def _try_credentials(project_id, credentials):
from google.cloud import bigquery
import google.api_core.exceptions
import google.auth.exceptions
if not credentials:
return None
if not project_id:
return credentials
try:
client = bigquery.Client(project=project_id, credentials=credentials)
# Check if the application has rights to the BigQuery project
client.query("SELECT 1").result()
return credentials
except google.api_core.exceptions.GoogleAPIError:
return None
except google.auth.exceptions.RefreshError:
# Sometimes (such as on Travis) google-auth returns GCE credentials,
# but fetching the token for those credentials doesn't actually work.
# See:
# https://github.com/googleapis/google-auth-library-python/issues/287
return None
示例12: _record_latency_to_bigquery
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def _record_latency_to_bigquery(deploy_latency, language, is_xrt):
current_date = datetime.datetime.now()
row = [(language, current_date, deploy_latency, is_xrt)]
project = os.environ.get(DEPLOY_LATENCY_PROJECT_ENV)
if not project:
logging.warn('No project specified to record deployment latency!')
logging.warn('If you wish to record deployment latency, \
please set %s env var and try again.',
DEPLOY_LATENCY_PROJECT_ENV)
return 0
logging.debug('Fetching bigquery client for project %s', project)
client = bigquery.Client(project=project)
dataset = client.dataset(DATASET_NAME)
logging.debug('Writing bigquery data to table %s in dataset %s',
TABLE_NAME, dataset)
table_ref = bigquery.TableReference(dataset_ref=dataset,
table_id=TABLE_NAME)
table = client.get_table(table_ref)
return client.create_rows(table, row)
示例13: _record_build_times_to_bigquery
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def _record_build_times_to_bigquery(self, build_times):
current_date = datetime.datetime.now()
logging.info('Retrieving bigquery client')
client = bigquery.Client(project=self._project)
dataset_ref = client.dataset(self._dataset)
table_ref = dataset_ref.table(self._table)
table = client.get_table(table_ref)
full_name = "{0}:{1}.{2}".format(self._project, self._dataset,
self._table)
logging.info("Adding build time data to {0}".format(full_name))
rows = [(current_date, self._description, bt[0], bt[1])
for bt in build_times]
client.create_rows(table, rows)
logging.info("Finished adding build times to {0}".format(full_name))
示例14: query_stackoverflow
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def query_stackoverflow():
# [START bigquery_simple_app_client]
client = bigquery.Client()
# [END bigquery_simple_app_client]
# [START bigquery_simple_app_query]
query_job = client.query("""
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10""")
results = query_job.result() # Waits for job to complete.
# [END bigquery_simple_app_query]
# [START bigquery_simple_app_print]
for row in results:
print("{} : {} views".format(row.url, row.view_count))
# [END bigquery_simple_app_print]
示例15: main
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Client [as 别名]
def main():
key_path = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
# [START bigquery_client_json_credentials]
from google.cloud import bigquery
from google.oauth2 import service_account
# TODO(developer): Set key_path to the path to the service account key
# file.
# key_path = "path/to/service_account.json"
credentials = service_account.Credentials.from_service_account_file(
key_path,
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
client = bigquery.Client(
credentials=credentials,
project=credentials.project_id,
)
# [END bigquery_client_json_credentials]
return client