本文整理汇总了Python中google.cloud.bigquery.Table方法的典型用法代码示例。如果您正苦于以下问题:Python bigquery.Table方法的具体用法?Python bigquery.Table怎么用?Python bigquery.Table使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类google.cloud.bigquery
的用法示例。
在下文中一共展示了bigquery.Table方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def _create_table(self, table_name, entity_instance):
"""Creates a BigQuery Table or attempts to update an existing schema.
Args:
table_name: str, name of the table to be created or updated.
entity_instance: an ndb.Model entity instance to base the schema on.
"""
table_ref = bigquery.TableReference(self._dataset_ref, table_name)
entity_schema = _generate_entity_schema(entity_instance)
table_schema = _generate_schema(entity_schema)
table = bigquery.Table(table_ref, schema=table_schema)
try:
table = self._client.create_table(table)
except cloud.exceptions.Conflict:
logging.info('Table %s already exists, attempting to update it.',
table_name)
merged_schema = _merge_schemas(table.schema, table_schema)
table.schema = merged_schema
table = self._client.update_table(table, ['schema'])
logging.info('Table %s updated.', table_name)
else:
logging.info('Table %s created.', table_name)
示例2: parquet
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def parquet(tables, data_directory, ignore_missing_dependency, **params):
try:
import pyarrow as pa # noqa: F401
import pyarrow.parquet as pq # noqa: F401
except ImportError:
msg = 'PyArrow dependency is missing'
if ignore_missing_dependency:
logger.warning('Ignored: %s', msg)
return 0
else:
raise click.ClickException(msg)
data_directory = Path(data_directory)
for table, df in read_tables(tables, data_directory):
arrow_table = pa.Table.from_pandas(df)
target_path = data_directory / '{}.parquet'.format(table)
pq.write_table(arrow_table, str(target_path))
示例3: delete
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
示例4: manage_tables
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def manage_tables(dataset_id, table_id):
"""Create tables in datasets in BigQuery."""
try:
get_bq_dataset(dataset_id)
except exceptions.NotFound as e:
return flask.jsonify(error=e.code, text=e.message), e.code
table_ref = bq_client.dataset(dataset_id).table(table_id)
try:
jsonschema.validate(flask.request.json, bq_table_schema)
except jsonschema.ValidationError:
error_msg = 'unable to validate provided payload.'
return flask.jsonify(error=400, text=error_msg), 400
schema = [bigquery.SchemaField(field['name'], field['type'],
field.get('mode') or 'NULLABLE')
for field in flask.request.json]
table = bigquery.Table(table_ref, schema=schema)
try:
table = bq_client.create_table(table)
except exceptions.GoogleAPIError as e:
return flask.jsonify(error=e.message), 400
return flask.jsonify(result='success'), 200
示例5: create_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def create_table(table_id):
# [START bigquery_create_table]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set table_id to the ID of the table to create.
# table_id = "your-project.your_dataset.your_table_name"
schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
table = bigquery.Table(table_id, schema=schema)
table = client.create_table(table) # Make an API request.
print(
"Created table {}.{}.{}".format(table.project, table.dataset_id, table.table_id)
)
# [END bigquery_create_table]
示例6: test_update_table_require_partition_filter
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def test_update_table_require_partition_filter(capsys, random_table_id, client):
# Make a partitioned table.
schema = [bigquery.SchemaField("transaction_timestamp", "TIMESTAMP")]
table = bigquery.Table(random_table_id, schema=schema)
table.time_partitioning = bigquery.TimePartitioning(field="transaction_timestamp")
table = client.create_table(table)
update_table_require_partition_filter.update_table_require_partition_filter(
random_table_id
)
out, _ = capsys.readouterr()
assert (
"Updated table '{}' with require_partition_filter=True".format(random_table_id)
in out
)
示例7: test_get_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def test_get_table(capsys, random_table_id, client):
schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
table = bigquery.Table(random_table_id, schema)
table.description = "Sample Table"
table = client.create_table(table)
get_table.get_table(random_table_id)
out, err = capsys.readouterr()
assert "Got table '{}'.".format(random_table_id) in out
assert "full_name" in out
assert "Table description: Sample Table" in out
assert "Table has 0 rows" in out
client.delete_table(table, not_found_ok=True)
示例8: tearDown
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def tearDown(self):
def _still_in_use(bad_request):
return any(
error["reason"] == "resourceInUse" for error in bad_request._errors
)
retry_in_use = RetryErrors(BadRequest, error_predicate=_still_in_use)
retry_storage_errors_conflict = RetryErrors(
(Conflict, TooManyRequests, InternalServerError, ServiceUnavailable)
)
for doomed in self.to_delete:
if isinstance(doomed, storage.Bucket):
retry_storage_errors_conflict(doomed.delete)(force=True)
elif isinstance(doomed, (Dataset, bigquery.DatasetReference)):
retry_in_use(Config.CLIENT.delete_dataset)(doomed, delete_contents=True)
elif isinstance(doomed, (Table, bigquery.TableReference)):
retry_in_use(Config.CLIENT.delete_table)(doomed)
else:
doomed.delete()
示例9: test_update_table_schema
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def test_update_table_schema(self):
dataset = self.temp_dataset(_make_dataset_id("update_table"))
TABLE_NAME = "test_table"
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
voter = bigquery.SchemaField("voter", "BOOLEAN", mode="NULLABLE")
schema = table.schema
schema.append(voter)
table.schema = schema
updated_table = Config.CLIENT.update_table(table, ["schema"])
self.assertEqual(len(updated_table.schema), len(schema))
for found, expected in zip(updated_table.schema, schema):
self.assertEqual(found.name, expected.name)
self.assertEqual(found.field_type, expected.field_type)
self.assertEqual(found.mode, expected.mode)
示例10: test_extract_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def test_extract_table(self):
local_id = unique_resource_id()
bucket_name = "bq_extract_test" + local_id
source_blob_name = "person_ages.csv"
dataset_id = _make_dataset_id("load_gcs_then_extract")
table_id = "test_table"
project = Config.CLIENT.project
dataset_ref = bigquery.DatasetReference(project, dataset_id)
table_ref = dataset_ref.table(table_id)
table = Table(table_ref)
self.to_delete.insert(0, table)
bucket = self._create_bucket(bucket_name)
self._load_table_for_extract_table(bucket, source_blob_name, table_ref, ROWS)
destination_blob_name = "person_ages_out.csv"
destination = bucket.blob(destination_blob_name)
destination_uri = "gs://{}/person_ages_out.csv".format(bucket_name)
job = Config.CLIENT.extract_table(table_ref, destination_uri)
job.result(timeout=100)
self.to_delete.insert(0, destination)
got_bytes = retry_storage_errors(destination.download_as_string)()
got = got_bytes.decode("utf-8")
self.assertIn("Bharney Rhubble", got)
示例11: test_job_cancel
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def test_job_cancel(self):
DATASET_ID = _make_dataset_id("job_cancel")
JOB_ID_PREFIX = "fetch_" + DATASET_ID
TABLE_NAME = "test_table"
QUERY = "SELECT * FROM %s.%s" % (DATASET_ID, TABLE_NAME)
dataset = self.temp_dataset(DATASET_ID)
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
job = Config.CLIENT.query(QUERY, job_id_prefix=JOB_ID_PREFIX)
job.cancel()
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
# The `cancel` API doesn't leave any reliable traces on
# the status of the job resource, so we can't really assert for
# them here. The best we can do is not that the API call didn't
# raise an error, and that the job completed (in the `retry()`
# above).
示例12: create_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def create_table(self, table_id, schema):
"""Creates a table in BigQuery.
Args:
table_id(str): Id of the table to be created.
schema(List[google.cloud.bigquery.schema.SchemaField]): The
schema of the table to be created in BigQuery format.
Returns: The created table (google.cloud.bigquery.table.Table).
"""
table_ref = self.dataset_ref.table(table_id)
table = bigquery.Table(table_ref, schema=schema)
created_table = self.bq_client.create_table(table)
logging.info('{0:s} Created Table {1:s}'.format(
str(datetime.datetime.now()), table_id))
return created_table
示例13: create_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def create_table(self, table_id):
"""Creates test user tables.
Args:
table_id(str): ID of the user table to be created.
Returns:
The created table (google.cloud.bigquery.table.Table).
"""
table_ref = self.dataset_ref.table(table_id)
table = bigquery.Table(table_ref, schema=self.bq_schema)
try:
self.bq_client.delete_table(table)
return self.bq_client.create_table(table)
except exceptions.NotFound:
return self.bq_client.create_table(table)
示例14: delete_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def delete_table(self, dataset_id, table_name):
"""Deletes BigQuery table.
Args:
dataset_id (str): BigQuery dataset id.
table_name (str): BigQuery table name.
"""
table_ref = self.client.dataset(dataset_id).table(table_name)
try:
self.client.delete_table(table_ref)
logger.debug("Deleted table %s from %s dataset", table_name,
dataset_id)
except exceptions.NotFound as error:
logger.debug(error)
logger.debug("Table %s not found in %s dataset. No need to delete",
table_name, dataset_id)
示例15: create_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import Table [as 别名]
def create_table(self, project_id, dataset_id, table_id, schema=None):
"""Creates a BigQuery table from a schema.
Args:
project_id (str): Project id.
dataset_id (str): Dataset id.
table_id (str): Table id to create.
schema (dict): BigQuery table schema in JSON format.
"""
pyschema = []
if schema is not None:
schema = TABLE_SCHEMA
for row in schema:
field = bigquery.SchemaField(row['name'],
row['type'],
mode=row['mode'])
pyschema.append(field)
table_name = f"{project_id}.{dataset_id}.{table_id}"
LOGGER.info(f"Creating table {table_name}", table_name)
table = bigquery.Table(table_name, schema=pyschema)
return self.client.create_table(table)