本文整理匯總了Python中google.cloud.bigquery.Dataset方法的典型用法代碼示例。如果您正苦於以下問題:Python bigquery.Dataset方法的具體用法?Python bigquery.Dataset怎麽用?Python bigquery.Dataset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類google.cloud.bigquery
的用法示例。
在下文中一共展示了bigquery.Dataset方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: initialize_tables
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def initialize_tables(self):
"""Performs first-time setup by creating dataset/tables."""
if constants.ON_LOCAL:
logging.debug('On local, not connecting to BQ.')
return
logging.info('Beginning BigQuery initialization.')
dataset = bigquery.Dataset(self._dataset_ref)
try:
dataset = self._client.create_dataset(dataset)
except cloud.exceptions.Conflict:
logging.warning('Dataset %s already exists, not creating.',
dataset.dataset_id)
else:
logging.info('Dataset %s successfully created.', dataset.dataset_id)
self._create_table(constants.BIGQUERY_DEVICE_TABLE, device_model.Device())
self._create_table(constants.BIGQUERY_SHELF_TABLE, shelf_model.Shelf())
self._create_table(constants.BIGQUERY_SURVEY_TABLE,
survey_models.Question())
logging.info('BigQuery successfully initialized.')
示例2: create
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
示例3: create_dataset
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def create_dataset(dataset_id):
# [START bigquery_create_dataset]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set dataset_id to the ID of the dataset to create.
# dataset_id = "{}.your_dataset".format(client.project)
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset(dataset_id)
# TODO(developer): Specify the geographic location where the dataset should reside.
dataset.location = "US"
# Send the dataset to the API for creation, with an explicit timeout.
# Raises google.api_core.exceptions.Conflict if the Dataset already
# exists within the project.
dataset = client.create_dataset(dataset, timeout=30) # Make an API request.
print("Created dataset {}.{}".format(client.project, dataset.dataset_id))
# [END bigquery_create_dataset]
示例4: setUp
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def setUp(self):
"""Sets up resources for tests.
"""
self.bq_client = bigquery.Client()
self.dataset_id = 'bq_benchmark_test_dataset'
self.dataset_ref = self.bq_client.dataset(self.dataset_id)
dataset = bigquery.Dataset(self.dataset_ref)
self.dataset = self.bq_client.create_dataset(dataset)
self.table_id = 'test_table'
abs_path = os.path.abspath(os.path.dirname(__file__))
json_schema_filename = os.path.join(abs_path,
'test_schemas/test_schema.json')
self.table_util = table_util.TableUtil(
table_id=self.table_id,
dataset_id=self.dataset_id,
json_schema_filename=json_schema_filename,
)
self.table_util.create_table()
self.test_query_generator = query_generator.QueryGenerator(
table_id=self.table_id, dataset_id=self.dataset_id)
示例5: setUp
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def setUp(self):
"""Sets up resources for tests.
"""
self.bq_client = bigquery.Client()
self.dataset_id = 'bq_benchmark_test_dataset'
self.dataset_ref = self.bq_client.dataset(self.dataset_id)
dataset = bigquery.Dataset(self.dataset_ref)
self.dataset = self.bq_client.create_dataset(dataset)
self.table_id = 'test_table'
abs_path = os.path.abspath(os.path.dirname(__file__))
json_schema_filename = os.path.join(abs_path,
'test_schemas/test_schema.json')
self.table_util = table_util.TableUtil(
table_id=self.table_id,
dataset_id=self.dataset_id,
json_schema_filename=json_schema_filename,
)
示例6: setup
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def setup(self):
"""Sets up resources for tests.
"""
self.bq_client = bigquery.Client()
self.dataset_id = 'user_updater_test'
self.dataset_ref = self.bq_client.dataset(self.dataset_id)
try:
self.dataset = self.bq_client.get_dataset(self.dataset_ref)
except exceptions.NotFound:
dataset = bigquery.Dataset(self.dataset_ref)
self.dataset = self.bq_client.create_dataset(dataset)
schema_path = 'test_schemas/test_nested_schema.json'
abs_path = os.path.abspath(os.path.dirname(__file__))
self.schema_path = os.path.join(abs_path, schema_path)
schema = user_schema.UserSchema(self.schema_path)
self.bq_schema = schema.translate_json_schema()
self.user_info_updates_id = 'test_nested_user_info_updates'
self.user_info_updates_table = self.create_table(
self.user_info_updates_id)
self.temp_user_info_updates_id = 'test_nested_temp_user_info_updates'
self.temp_user_info_updates_table = self.create_table(
self.temp_user_info_updates_id)
self.user_info_final_id = 'test_nested_user_info_final'
self.user_info_final_table = self.create_table(self.user_info_final_id)
示例7: setup
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def setup(self):
"""Sets up resources for tests.
"""
self.bq_client = bigquery.Client()
self.dataset_id = 'user_updater_test'
self.dataset_ref = self.bq_client.dataset(self.dataset_id)
try:
self.dataset = self.bq_client.get_dataset(self.dataset_ref)
except exceptions.NotFound:
dataset = bigquery.Dataset(self.dataset_ref)
self.dataset = self.bq_client.create_dataset(dataset)
schema_path = 'test_schemas/test_schema.json'
abs_path = os.path.abspath(os.path.dirname(__file__))
self.schema_path = os.path.join(abs_path, schema_path)
schema = user_schema.UserSchema(self.schema_path)
self.bq_schema = schema.translate_json_schema()
self.user_info_updates_id = 'test_user_info_updates'
self.user_info_updates_table = self.create_table(
self.user_info_updates_id)
self.temp_user_info_updates_id = 'test_temp_user_info_updates'
self.temp_user_info_updates_table = self.create_table(
self.temp_user_info_updates_id)
self.user_info_final_id = 'test_user_info_final'
self.user_info_final_table = self.create_table(self.user_info_final_id)
示例8: bq_create_dataset
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def bq_create_dataset(bq_client):
"""Creates the BigQuery dataset.
If the dataset already exists, the existing dataset will be returned.
Dataset will be create in the location specified by DATASET_LOCATION.
Args:
bq_client: BigQuery client
Returns:
BigQuery dataset that will be used to store data.
"""
dataset_id = "{}.{}".format(bq_client.project, DATASET_NAME)
dataset = bigquery.Dataset(dataset_id)
dataset.location = DATASET_LOCATION
dataset = bq_client.create_dataset(dataset, exists_ok=True)
return dataset
示例9: format_tree
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def format_tree(self, show_key=False, show_status=False):
log.info(f"Formatting tree...")
tree_string = ""
key = {
"project": (Fore.CYAN + "◉" + Fore.RESET + " = Project".ljust(12)),
"dataset": (Fore.YELLOW + "◉" + Fore.RESET + " = Dataset".ljust(12)),
"table": (Fore.RED + "◉" + Fore.RESET + " = Table".ljust(12)),
"view": (Fore.GREEN + "◉" + Fore.RESET + " = View".ljust(12)),
}
if show_key:
tree_string += "Key:\n{}{}\n{}{}\n\n".format(
key["project"], key["table"], key["dataset"], key["view"]
)
for pre, _, node in RenderTree(self.tree):
tree_string += "%s%s\n" % (
pre,
node.pretty_name(show_authorization_status=show_status),
)
return tree_string
示例10: _fetch_dataset
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def _fetch_dataset(self, dataset_id):
"""Fetch a BigQuery Dataset if it exists, else, create a new one
Parameters
----------
dataset_id : str
ID to name the created Dataset
Returns
-------
:class:`google.cloud.bigquery.dataset.Dataset`
The Dataset class to build tables from
"""
dataset_ref = self.client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
try:
dataset = self.client.create_dataset(dataset)
except Conflict:
dataset = self.client.get_dataset(dataset_ref)
return dataset
示例11: create_new_dataset
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def create_new_dataset(self, dataset_id):
'''
https://cloud.google.com/bigquery/docs/datasets#create-dataset
:param dataset_id: dataset name
:return: None
'''
dataset_ref = self.client.dataset(dataset_id)
try:
# Check if the dataset with specified ID already exists
self.client.get_dataset(dataset_ref)
log.info('Dataset {} already exists! Skipping create operation.'.format(dataset_id))
#print(f'Dataset {dataset_id} already exists! Skipping create operation.')
except NotFound:
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = self.client.create_dataset(dataset) # API request
log.info('Dataset {} created successfully project: {}.'.format(dataset.dataset_id, self.client.project))
#print(f'Dataset {dataset.dataset_id} created successfully project: {self.client.project}.')
示例12: create_new_dataset
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def create_new_dataset(self, dataset_id: str) -> None:
'''
https://cloud.google.com/bigquery/docs/datasets#create-dataset
:param dataset_id: dataset name
:return: None
'''
dataset_ref = self.client.dataset(dataset_id)
try:
# Check if the dataset with specified ID already exists
self.client.get_dataset(dataset_ref)
print(f'Dataset {dataset_id} already exists! Skipping create operation.')
except NotFound:
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = self.client.create_dataset(dataset) # API request
print(f'Dataset {dataset.dataset_id} created successfully project: {self.client.project}.')
示例13: __init__
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def __init__(self, dataset_key=None):
self.project = settings.BQ_PROJECT
# If this raises a DefaultCredentialsError:
# * on a developer's machine, run `gcloud auth application-default login`
# to use OAuth
# * elsewhere, ensure that GOOGLE_APPLICATION_CREDENTIALS is set and
# points to a valid set of credentials for a service account
#
# A warning is raised when authenticating with OAuth, recommending that
# server applications use a service account. We can ignore this.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.gcbq_client = gcbq.Client(project=self.project)
self.dataset_key = dataset_key
if dataset_key is None:
self.dataset_id = None
self.dataset = None
else:
self.dataset_id = DATASETS[dataset_key]
dataset_ref = self.gcbq_client.dataset(self.dataset_id)
self.dataset = gcbq.Dataset(dataset_ref)
示例14: __enter__
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def __enter__(self):
if not self.revalidation_dataset_id:
client = bigquery.Client(project=self.project)
dataset_ref = client.dataset(self.dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
_ = client.create_dataset(dataset) # See #171, pylint: disable=no-member
return self
示例15: __exit__
# 需要導入模塊: from google.cloud import bigquery [as 別名]
# 或者: from google.cloud.bigquery import Dataset [as 別名]
def __exit__(self, *args):
# See #171 for why we need: pylint: disable=no-member
if not self._keep_tables:
client = bigquery.Client(project=self.project)
dataset_ref = client.dataset(self.dataset_id)
dataset = bigquery.Dataset(dataset_ref)
client.delete_dataset(dataset, delete_contents=True)