本文整理匯總了Python中google.cloud.bigquery.SchemaField方法的典型用法代碼示例。如果您正苦於以下問題:Python bigquery.SchemaField方法的具體用法?Python bigquery.SchemaField怎麽用?Python bigquery.SchemaField使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類google.cloud.bigquery
的用法示例。
在下文中一共展示了bigquery.SchemaField方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_table_to_dataframe
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def test_table_to_dataframe(capsys, clients):
from google.cloud import bigquery
bqclient, bqstorageclient = clients
# [START bigquerystorage_pandas_tutorial_all]
# [START bigquerystorage_pandas_tutorial_read_table]
# Download a table.
table = bigquery.TableReference.from_string(
"bigquery-public-data.utility_us.country_code_iso"
)
rows = bqclient.list_rows(
table,
selected_fields=[
bigquery.SchemaField("country_name", "STRING"),
bigquery.SchemaField("fips_code", "STRING"),
],
)
dataframe = rows.to_dataframe(bqstorage_client=bqstorageclient)
print(dataframe.head())
# [END bigquerystorage_pandas_tutorial_read_table]
# [END bigquerystorage_pandas_tutorial_all]
out, _ = capsys.readouterr()
assert "country_name" in out
示例2: manage_tables
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def manage_tables(dataset_id, table_id):
"""Create tables in datasets in BigQuery."""
try:
get_bq_dataset(dataset_id)
except exceptions.NotFound as e:
return flask.jsonify(error=e.code, text=e.message), e.code
table_ref = bq_client.dataset(dataset_id).table(table_id)
try:
jsonschema.validate(flask.request.json, bq_table_schema)
except jsonschema.ValidationError:
error_msg = 'unable to validate provided payload.'
return flask.jsonify(error=400, text=error_msg), 400
schema = [bigquery.SchemaField(field['name'], field['type'],
field.get('mode') or 'NULLABLE')
for field in flask.request.json]
table = bigquery.Table(table_ref, schema=schema)
try:
table = bq_client.create_table(table)
except exceptions.GoogleAPIError as e:
return flask.jsonify(error=e.message), 400
return flask.jsonify(result='success'), 200
示例3: create_table
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def create_table(table_id):
# [START bigquery_create_table]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set table_id to the ID of the table to create.
# table_id = "your-project.your_dataset.your_table_name"
schema = [
bigquery.SchemaField("full_name", "STRING", mode="REQUIRED"),
bigquery.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
table = bigquery.Table(table_id, schema=schema)
table = client.create_table(table) # Make an API request.
print(
"Created table {}.{}.{}".format(table.project, table.dataset_id, table.table_id)
)
# [END bigquery_create_table]
示例4: test_update_table_require_partition_filter
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def test_update_table_require_partition_filter(capsys, random_table_id, client):
# Make a partitioned table.
schema = [bigquery.SchemaField("transaction_timestamp", "TIMESTAMP")]
table = bigquery.Table(random_table_id, schema=schema)
table.time_partitioning = bigquery.TimePartitioning(field="transaction_timestamp")
table = client.create_table(table)
update_table_require_partition_filter.update_table_require_partition_filter(
random_table_id
)
out, _ = capsys.readouterr()
assert (
"Updated table '{}' with require_partition_filter=True".format(random_table_id)
in out
)
示例5: test_update_table_schema
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def test_update_table_schema(self):
dataset = self.temp_dataset(_make_dataset_id("update_table"))
TABLE_NAME = "test_table"
table_arg = Table(dataset.table(TABLE_NAME), schema=SCHEMA)
self.assertFalse(_table_exists(table_arg))
table = retry_403(Config.CLIENT.create_table)(table_arg)
self.to_delete.insert(0, table)
self.assertTrue(_table_exists(table))
voter = bigquery.SchemaField("voter", "BOOLEAN", mode="NULLABLE")
schema = table.schema
schema.append(voter)
table.schema = schema
updated_table = Config.CLIENT.update_table(table, ["schema"])
self.assertEqual(len(updated_table.schema), len(schema))
for found, expected in zip(updated_table.schema, schema):
self.assertEqual(found.name, expected.name)
self.assertEqual(found.field_type, expected.field_type)
self.assertEqual(found.mode, expected.mode)
示例6: test_load_table_from_dataframe_w_nullable_int64_datatype_automatic_schema
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def test_load_table_from_dataframe_w_nullable_int64_datatype_automatic_schema(self):
"""Test that a DataFrame containing column with None-type values and int64 datatype
can be uploaded without specifying a schema.
https://github.com/googleapis/python-bigquery/issues/22
"""
dataset_id = _make_dataset_id("bq_load_test")
self.temp_dataset(dataset_id)
table_id = "{}.{}.load_table_from_dataframe_w_nullable_int64_datatype".format(
Config.CLIENT.project, dataset_id
)
df_data = collections.OrderedDict(
[("x", pandas.Series([1, 2, None, 4], dtype="Int64"))]
)
dataframe = pandas.DataFrame(df_data, columns=df_data.keys())
load_job = Config.CLIENT.load_table_from_dataframe(dataframe, table_id)
load_job.result()
table = Config.CLIENT.get_table(table_id)
self.assertEqual(tuple(table.schema), (bigquery.SchemaField("x", "INTEGER"),))
self.assertEqual(table.num_rows, 4)
示例7: test_list_rows_max_results_w_bqstorage
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def test_list_rows_max_results_w_bqstorage(self):
table_ref = DatasetReference("bigquery-public-data", "utility_us").table(
"country_code_iso"
)
bqstorage_client = bigquery_storage_v1.BigQueryReadClient(
credentials=Config.CLIENT._credentials
)
row_iterator = Config.CLIENT.list_rows(
table_ref,
selected_fields=[bigquery.SchemaField("country_name", "STRING")],
max_results=100,
)
dataframe = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
self.assertEqual(len(dataframe.index), 100)
示例8: read_bigquery_schema_from_json_recursive
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def read_bigquery_schema_from_json_recursive(json_schema):
"""
CAUTION: Recursive function
This method can generate BQ schemas for nested records
"""
result = []
for field in json_schema:
if field.get('type').lower() == 'record' and field.get('fields'):
schema = bigquery.SchemaField(
name=field.get('name'),
field_type=field.get('type', 'STRING'),
mode=field.get('mode', 'NULLABLE'),
description=field.get('description'),
fields=read_bigquery_schema_from_json_recursive(field.get('fields'))
)
else:
schema = bigquery.SchemaField(
name=field.get('name'),
field_type=field.get('type', 'STRING'),
mode=field.get('mode', 'NULLABLE'),
description=field.get('description')
)
result.append(schema)
return result
示例9: test_get_bq_translated_schema
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def test_get_bq_translated_schema(self):
"""Tests TableUtil.get_bq_translated_schema().
Tests TableUtil's ability to translate a json schema to a BigQuery
schema in List[google.cloud.bigquery.schema.SchemaField] format.
Returns:
True if test passes, else False.
"""
expected_bq_schema = [
bigquery.SchemaField('string1', 'STRING', 'REQUIRED',
'description1'),
bigquery.SchemaField('numeric1', 'NUMERIC', 'REQUIRED',
'description2')
]
bq_schema = self.table_util.get_bq_translated_schema()
assert expected_bq_schema == bq_schema
示例10: csv_in_gcs_to_table
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def csv_in_gcs_to_table(bucket_name: str, object_name: str, dataset_id: str,
table_id: str,
schema: List[bigquery.SchemaField]) -> None:
"""Upload CSV to BigQuery table.
If the table already exists, it overwrites the table data.
Args:
bucket_name: Bucket name for holding the object
object_name: Name of object to be uploaded
dataset_id: Dataset id where the table is located.
table_id: String holding id of hte table.
schema: Schema of the table_id
"""
client = bigquery.Client()
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = schema
job_config.source_format = bigquery.SourceFormat.CSV
job_config.write_disposition = bigquery.WriteDisposition().WRITE_TRUNCATE
uri = "gs://{}/{}".format(bucket_name, object_name)
load_job = client.load_table_from_uri(uri,
dataset_ref.table(table_id),
job_config=job_config)
load_job.result()
示例11: _generate_schema
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def _generate_schema(entity_fields=None):
"""Generates a BigQuery schema.
Args:
entity_fields: list of bigquery.SchemaField objects, the fields to include
in the entity record.
Returns:
A list of bigquery.SchemaField objects.
"""
schema = [
bigquery.SchemaField(
'ndb_key', 'STRING', 'REQUIRED',
description='ndb key of the entity.'),
bigquery.SchemaField('timestamp', 'TIMESTAMP', 'REQUIRED'),
bigquery.SchemaField(
'actor',
'STRING',
'REQUIRED',
description='User performing the action.'),
bigquery.SchemaField(
'method',
'STRING',
'REQUIRED',
description='Method performing the action.'),
bigquery.SchemaField(
'summary',
'STRING',
'REQUIRED',
description='User generated summary.')
]
if entity_fields:
schema.append(
bigquery.SchemaField(
'entity',
'RECORD',
'NULLABLE',
description='Current attributes of the entity.',
fields=entity_fields))
return schema
示例12: setUp
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def setUp(self):
super(BigQueryClientTest, self).setUp()
bq_patcher = mock.patch.object(gcloud_bq, 'Client', autospec=True)
self.addCleanup(bq_patcher.stop)
self.bq_mock = bq_patcher.start()
self.dataset_ref = mock.Mock(spec=gcloud_bq.DatasetReference)
self.table = mock.Mock(spec=gcloud_bq.Table)
self.table.schema = []
self.dataset_ref.table.return_value = self.table
with mock.patch.object(
bigquery.BigQueryClient, '__init__', return_value=None):
self.client = bigquery.BigQueryClient()
self.client._client = self.bq_mock()
self.client._dataset_ref = self.dataset_ref
self.client._client.insert_rows.return_value = None
self.client._client.get_table.return_value = self.table
self.nested_schema = [
gcloud_bq.SchemaField('nested_string_attribute', 'STRING', 'NULLABLE')]
self.entity_schema = [
gcloud_bq.SchemaField('string_attribute', 'STRING', 'NULLABLE'),
gcloud_bq.SchemaField('integer_attribute', 'INTEGER', 'NULLABLE'),
gcloud_bq.SchemaField('boolean_attribute', 'BOOLEAN', 'NULLABLE'),
gcloud_bq.SchemaField(
'nested_attribute', 'RECORD', 'NULLABLE', fields=self.nested_schema)
]
test_device = device_model.Device(
serial_number='abc123', chrome_device_id='123123')
test_device.put()
test_row = bigquery_row_model.BigQueryRow.add(
test_device, datetime.datetime.utcnow(),
loanertest.USER_EMAIL, 'Enroll', 'This is a test')
self.test_row_dict = test_row.to_json_dict()
self.test_table = [(self.test_row_dict['ndb_key'],
self.test_row_dict['timestamp'],
self.test_row_dict['actor'],
self.test_row_dict['method'],
self.test_row_dict['summary'],
self.test_row_dict['entity'])]
示例13: test_generate_schema_no_entity
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def test_generate_schema_no_entity(self):
generated_schema = bigquery._generate_schema()
self.assertLen(generated_schema, 5)
self.assertIsInstance(generated_schema[0], gcloud_bq.SchemaField)
示例14: _populate_schema_names
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def _populate_schema_names(schema):
"""Creates a list with the names that are inside of the schema.
Args:
schema: List[bigquery.SchemaField], a list of bigquery.SchemaField objects.
Returns:
A list containing the names of the schema.
"""
names = []
for name in schema:
names.append(name.name)
return names
示例15: build_schema
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import SchemaField [as 別名]
def build_schema(schema):
SCHEMA = []
for key in schema['properties'].keys():
if not (bool(schema['properties'][key])):
# if we endup with an empty record.
continue
schema_name, schema_type, schema_mode, schema_description, schema_fields = define_schema(schema['properties'][key], key)
SCHEMA.append(SchemaField(schema_name, schema_type, schema_mode, schema_description, schema_fields))
return SCHEMA