本文整理汇总了Python中google.cloud.bigquery.ScalarQueryParameter方法的典型用法代码示例。如果您正苦于以下问题:Python bigquery.ScalarQueryParameter方法的具体用法?Python bigquery.ScalarQueryParameter怎么用?Python bigquery.ScalarQueryParameter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类google.cloud.bigquery
的用法示例。
在下文中一共展示了bigquery.ScalarQueryParameter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_client_library_query_with_parameters
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def test_client_library_query_with_parameters():
# [START bigquery_migration_client_library_query_parameters]
from google.cloud import bigquery
client = bigquery.Client()
sql = """
SELECT name
FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE state = @state
LIMIT @limit
"""
query_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter('state', 'STRING', 'TX'),
bigquery.ScalarQueryParameter('limit', 'INTEGER', 100)
]
)
df = client.query(sql, job_config=query_config).to_dataframe()
# [END bigquery_migration_client_library_query_parameters]
assert len(df) > 0
示例2: client_query_w_struct_params
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def client_query_w_struct_params():
# [START bigquery_query_params_structs]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
query = "SELECT @struct_value AS s;"
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.StructQueryParameter(
"struct_value",
bigquery.ScalarQueryParameter("x", "INT64", 1),
bigquery.ScalarQueryParameter("y", "STRING", "foo"),
)
]
)
query_job = client.query(query, job_config=job_config) # Make an API request.
for row in query_job:
print(row.s)
# [END bigquery_query_params_structs]
示例3: bq_param_timestamp
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def bq_param_timestamp(param, value):
assert isinstance(param.type(), dt.Timestamp), str(param.type())
# TODO(phillipc): Not sure if this is the correct way to do this.
timestamp_value = pd.Timestamp(value, tz='UTC').to_pydatetime()
return bq.ScalarQueryParameter(
param.get_name(), 'TIMESTAMP', timestamp_value
)
示例4: bq_param_string
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def bq_param_string(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'STRING', value)
示例5: bq_param_integer
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def bq_param_integer(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'INT64', value)
示例6: bq_param_double
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def bq_param_double(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'FLOAT64', value)
示例7: bq_param_boolean
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def bq_param_boolean(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'BOOL', value)
示例8: client_query_w_timestamp_params
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def client_query_w_timestamp_params():
# [START bigquery_query_params_timestamps]
import datetime
import pytz
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
query = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);"
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter(
"ts_value",
"TIMESTAMP",
datetime.datetime(2016, 12, 7, 8, 0, tzinfo=pytz.UTC),
)
]
)
query_job = client.query(query, job_config=job_config) # Make an API request.
for row in query_job:
print(row)
# [END bigquery_query_params_timestamps]
示例9: client_query_w_positional_params
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def client_query_w_positional_params():
# [START bigquery_query_params_positional]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
query = """
SELECT word, word_count
FROM `bigquery-public-data.samples.shakespeare`
WHERE corpus = ?
AND word_count >= ?
ORDER BY word_count DESC;
"""
# Set the name to None to use positional parameters.
# Note that you cannot mix named and positional parameters.
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter(None, "STRING", "romeoandjuliet"),
bigquery.ScalarQueryParameter(None, "INT64", 250),
]
)
query_job = client.query(query, job_config=job_config) # Make an API request.
for row in query_job:
print("{}: \t{}".format(row.word, row.word_count))
# [END bigquery_query_params_positional]
示例10: client_query_w_named_params
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def client_query_w_named_params():
# [START bigquery_query_params_named]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
query = """
SELECT word, word_count
FROM `bigquery-public-data.samples.shakespeare`
WHERE corpus = @corpus
AND word_count >= @min_word_count
ORDER BY word_count DESC;
"""
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter("corpus", "STRING", "romeoandjuliet"),
bigquery.ScalarQueryParameter("min_word_count", "INT64", 250),
]
)
query_job = client.query(query, job_config=job_config) # Make an API request.
for row in query_job:
print("{}: \t{}".format(row.word, row.word_count))
# [END bigquery_query_params_named]
示例11: client_query_w_array_params
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def client_query_w_array_params():
# [START bigquery_query_params_arrays]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
query = """
SELECT name, sum(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_2013`
WHERE gender = @gender
AND state IN UNNEST(@states)
GROUP BY name
ORDER BY count DESC
LIMIT 10;
"""
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter("gender", "STRING", "M"),
bigquery.ArrayQueryParameter("states", "STRING", ["WA", "WI", "WV", "WY"]),
]
)
query_job = client.query(query, job_config=job_config) # Make an API request.
for row in query_job:
print("{}: \t{}".format(row.name, row.count))
# [END bigquery_query_params_arrays]
示例12: scalar_to_query_parameter
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def scalar_to_query_parameter(value, name=None):
"""Convert a scalar value into a query parameter.
Args:
value (Any):
A scalar value to convert into a query parameter.
name (str):
(Optional) Name of the query parameter.
Returns:
google.cloud.bigquery.ScalarQueryParameter:
A query parameter corresponding with the type and value of the plain
Python object.
Raises:
google.cloud.bigquery.dbapi.exceptions.ProgrammingError:
if the type cannot be determined.
"""
parameter_type = bigquery_scalar_type(value)
if parameter_type is None:
raise exceptions.ProgrammingError(
"encountered parameter {} with value {} of unexpected type".format(
name, value
)
)
return bigquery.ScalarQueryParameter(name, parameter_type, value)
示例13: setParameter
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def setParameter(self, type_, value):
"""
Prepare a parameter for a parameterized query
As documented by Google, only standard SQL syntax supports parameters in queries
See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/bigquery/cloud-client/query_params.py
"""
return bigquery.ScalarQueryParameter(
# Set the name to None to use positional parameters (? symbol
# in the query). Note that you cannot mix named and positional
# parameters.
None, type_, self.varToString(value))
示例14: querySourceItems
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def querySourceItems(self, convOptions):
"""Query the database for list of all source clinical items (medications, etc.)
and yield the results one at a time. If startDate provided, only return items whose
occurrence date is on or after that date.
"""
# TODO need to figure out how to pass date to query in BQ using SQLQuery object
query = "SELECT {} FROM {}".format(', '.join(self.HEADERS), SOURCE_TABLE)
if convOptions.startDate is not None:
query += ' WHERE trtmnt_tm_begin_dt_jittered >= @startDate '
if convOptions.endDate is not None:
query += ' WHERE ' if convOptions.startDate is None else 'AND'
query += ' trtmnt_tm_begin_dt_jittered < @endDate'
query += ' ORDER BY trtmnt_tm_begin_dt_jittered'
query += ';'
query_params = [
bigquery.ScalarQueryParameter(
'startDate',
'TIMESTAMP',
convOptions.startDate,
),
bigquery.ScalarQueryParameter(
'endDate',
'TIMESTAMP',
convOptions.endDate,
)
]
query_job = self.bqClient.queryBQ(str(query), query_params=query_params, location='US', batch_mode=False,
verbose=True)
for row in query_job: # API request - fetches results
rowModel = RowItemModel(list(row.values()), self.HEADERS)
log.debug("rowModel: {}".format(rowModel))
yield self.normalizeRowModel(rowModel, convOptions) # Yield one row worth of data at a time to avoid having to keep the whole result set in memory
示例15: create_table
# 需要导入模块: from google.cloud import bigquery [as 别名]
# 或者: from google.cloud.bigquery import ScalarQueryParameter [as 别名]
def create_table(
from_date,
to_date,
table_name,
query_file,
dataset,
price_scaling,
client):
"""Creates training, validation, and test tables.
Specifies parameters to be passed to the SQL query, specifies name for the
new table being created, generates a dynamic query and executes the query.
Args:
from_date: Intial date for table's data.
to_date: Final date for table's data.
table_name: Name for table.
query_file: Path to file containing the SQL query.
dataset: `BigQuery` `Dataset` in which to save the table.
price_scaling: Float used to scale (multiply with) the labels (price)
for scaling purposes. Given the initialization schemes and
normalized inputs, the expected values for the outputs will be close
to 0. This means that by scaling the labels you will not be too far
off from the start, which helps convergence. If a target is too big,
the mean squared error will be huge which means your gradients will
also be huge and could lead to numerical instability.
client: `google.cloud.bigquery.client.Client` instance.
"""
query_params = [
bigquery.ScalarQueryParameter(
'from_date',
'STRING',
from_date),
bigquery.ScalarQueryParameter(
'to_date',
'STRING',
to_date),
bigquery.ScalarQueryParameter(
'price_scaling',
'FLOAT64',
price_scaling)]
table_ref = client.dataset(
dataset).table(
table_name)
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
job_config.destination = table_ref
with open(query_file, 'r') as myfile:
inner_query = myfile.read()
run_query(
client,
scalar_extraction_query(inner_query),
job_config)