本文整理汇总了Python中dbt.logger.GLOBAL_LOGGER.debug方法的典型用法代码示例。如果您正苦于以下问题:Python GLOBAL_LOGGER.debug方法的具体用法?Python GLOBAL_LOGGER.debug怎么用?Python GLOBAL_LOGGER.debug使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dbt.logger.GLOBAL_LOGGER
的用法示例。
在下文中一共展示了GLOBAL_LOGGER.debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_sql
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def run_sql(self, query, fetch='None', kwargs=None, connection_name=None):
if connection_name is None:
connection_name = '__test'
if query.strip() == "":
return
sql = self.transform_sql(query, kwargs=kwargs)
if self.adapter_type == 'bigquery':
return self.run_sql_bigquery(sql, fetch)
elif self.adapter_type == 'presto':
return self.run_sql_presto(sql, fetch, connection_name)
conn = self.adapter.acquire_connection(connection_name)
with conn.handle.cursor() as cursor:
logger.debug('test connection "{}" executing: {}'.format(connection_name, sql))
try:
cursor.execute(sql)
conn.handle.commit()
if fetch == 'one':
return cursor.fetchone()
elif fetch == 'all':
return cursor.fetchall()
else:
return
except BaseException as e:
conn.handle.rollback()
print(query)
print(e)
raise e
finally:
conn.transaction_open = False
示例2: open
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def open(cls, connection):
if connection.state == 'open':
logger.debug('Connection is already open, skipping open.')
return connection
try:
handle = cls.get_bigquery_client(connection.credentials)
except google.auth.exceptions.DefaultCredentialsError as e:
logger.info("Please log into GCP to continue")
dbt.clients.gcloud.setup_default_credentials()
handle = cls.get_bigquery_client(connection.credentials)
except Exception as e:
raise
logger.debug("Got an error when attempting to create a bigquery "
"client: '{}'".format(e))
connection.handle = None
connection.state = 'fail'
raise dbt.exceptions.FailedToConnectException(str(e))
connection.handle = handle
connection.state = 'open'
return connection
示例3: expand_column_types
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def expand_column_types(self, goal, current, model_name=None):
reference_columns = {
c.name: c for c in
self.get_columns_in_relation(goal, model_name=model_name)
}
target_columns = {
c.name: c for c
in self.get_columns_in_relation(current, model_name=model_name)
}
for column_name, reference_column in reference_columns.items():
target_column = target_columns.get(column_name)
if target_column is not None and \
target_column.can_expand_to(reference_column):
col_string_size = reference_column.string_size()
new_type = self.Column.string_type(col_string_size)
logger.debug("Changing col type from %s to %s in table %s",
target_column.data_type, new_type, current)
self.alter_column_type(current, column_name, new_type,
model_name=model_name)
if model_name is None:
self.release_connection('master')
示例4: clone_and_checkout
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
branch=None):
exists = None
try:
_, err = clone(repo, cwd, dirname=dirname,
remove_git_dir=remove_git_dir)
except dbt.exceptions.CommandResultError as exc:
err = exc.stderr.decode('utf-8')
exists = re.match("fatal: destination path '(.+)' already exists", err)
if not exists: # something else is wrong, raise it
raise
directory = None
start_sha = None
if exists:
directory = exists.group(1)
logger.debug('Updating existing dependency %s.', directory)
else:
matches = re.match("Cloning into '(.+)'", err.decode('utf-8'))
directory = matches.group(1)
logger.debug('Pulling new dependency %s.', directory)
full_path = os.path.join(cwd, directory)
start_sha = get_current_sha(full_path)
checkout(full_path, repo, branch)
end_sha = get_current_sha(full_path)
if exists:
if start_sha == end_sha:
logger.debug(' Already at %s, nothing to do.', start_sha[:7])
else:
logger.debug(' Updated checkout from %s to %s.',
start_sha[:7], end_sha[:7])
else:
logger.debug(' Checked out at %s.', end_sha[:7])
return directory
示例5: patch_nodes
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def patch_nodes(self, patches):
"""Patch nodes with the given dict of patches. Note that this consumes
the input!
"""
# because we don't have any mapping from node _names_ to nodes, and we
# only have the node name in the patch, we have to iterate over all the
# nodes looking for matching names. We could use _find_by_name if we
# were ok with doing an O(n*m) search (one nodes scan per patch)
for node in self.nodes.values():
if node.resource_type != NodeType.Model:
continue
patch = patches.pop(node.name, None)
if not patch:
continue
node.patch(patch)
# log debug-level warning about nodes we couldn't find
if patches:
for patch in patches.values():
# since patches aren't nodes, we can't use the existing
# target_not_found warning
logger.debug((
'WARNING: Found documentation for model "{}" which was '
'not found or is disabled').format(patch.name)
)
示例6: render_profile
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def render_profile(cls, raw_profile, profile_name, target_override,
cli_vars):
"""This is a containment zone for the hateful way we're rendering
profiles.
"""
renderer = ConfigRenderer(cli_vars=cli_vars)
# rendering profiles is a bit complex. Two constraints cause trouble:
# 1) users should be able to use environment/cli variables to specify
# the target in their profile.
# 2) Missing environment/cli variables in profiles/targets that don't
# end up getting selected should not cause errors.
# so first we'll just render the target name, then we use that rendered
# name to extract a profile that we can render.
if target_override is not None:
target_name = target_override
elif 'target' in raw_profile:
# render the target if it was parsed from yaml
target_name = renderer.render_value(raw_profile['target'])
else:
target_name = 'default'
logger.debug(
"target not specified in profile '{}', using '{}'"
.format(profile_name, target_name)
)
raw_profile_data = cls._get_profile_data(
raw_profile, profile_name, target_name
)
profile_data = renderer.render_profile_data(raw_profile_data)
return target_name, profile_data
示例7: handle_error
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def handle_error(cls, error, message, sql):
logger.debug(message.format(sql=sql))
logger.debug(error)
error_msg = "\n".join(
[item['message'] for item in error.errors])
raise dbt.exceptions.DatabaseException(error_msg)
示例8: gcloud_installed
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def gcloud_installed():
try:
run_cmd('.', ['gcloud', '--version'])
return True
except OSError as e:
logger.debug(e)
return False
示例9: initialize_tracking
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def initialize_tracking(cookie_dir):
global active_user
active_user = User(cookie_dir)
try:
active_user.initialize()
except Exception:
logger.debug('Got an exception trying to initialize tracking',
exc_info=True)
active_user = User(None)
示例10: drop_schema
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def drop_schema(self, database, schema, model_name=None):
logger.debug('Dropping schema "%s"."%s".', database, schema)
kwargs = {
'database_name': self.quote_as_configured(database, 'database'),
'schema_name': self.quote_as_configured(schema, 'schema'),
}
self.execute_macro(DROP_SCHEMA_MACRO_NAME,
kwargs=kwargs,
connection_name=model_name)
示例11: compile_node
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def compile_node(self, node, manifest, extra_context=None):
if extra_context is None:
extra_context = {}
logger.debug("Compiling {}".format(node.get('unique_id')))
data = node.to_dict()
data.update({
'compiled': False,
'compiled_sql': None,
'extra_ctes_injected': False,
'extra_ctes': [],
'injected_sql': None,
})
compiled_node = CompiledNode(**data)
context = dbt.context.runtime.generate(
compiled_node, self.config, manifest)
context.update(extra_context)
compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(
node.get('raw_sql'),
context,
node)
compiled_node.compiled = True
injected_node, _ = prepend_ctes(compiled_node, manifest)
should_wrap = {NodeType.Test, NodeType.Operation}
if injected_node.resource_type in should_wrap:
# data tests get wrapped in count(*)
# TODO : move this somewhere more reasonable
if 'data' in injected_node.tags and \
is_type(injected_node, NodeType.Test):
injected_node.wrapped_sql = (
"select count(*) from (\n{test_sql}\n) sbq").format(
test_sql=injected_node.injected_sql)
else:
# don't wrap schema tests or analyses.
injected_node.wrapped_sql = injected_node.injected_sql
elif is_type(injected_node, NodeType.Archive):
# unfortunately we do everything automagically for
# archives. in the future it'd be nice to generate
# the SQL at the parser level.
pass
elif(is_type(injected_node, NodeType.Model) and
get_materialization(injected_node) == 'ephemeral'):
pass
else:
injected_node.wrapped_sql = None
return injected_node
示例12: parse_macro_file
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def parse_macro_file(self, macro_file_path, macro_file_contents, root_path,
package_name, resource_type, tags=None, context=None):
logger.debug("Parsing {}".format(macro_file_path))
to_return = {}
if tags is None:
tags = []
context = {}
# change these to actual kwargs
base_node = UnparsedMacro(
path=macro_file_path,
original_file_path=macro_file_path,
package_name=package_name,
raw_sql=macro_file_contents,
root_path=root_path,
)
try:
ast = dbt.clients.jinja.parse(macro_file_contents)
except dbt.exceptions.CompilationException as e:
e.node = base_node
raise e
for macro_node in ast.find_all(jinja2.nodes.Macro):
macro_name = macro_node.name
node_type = None
if macro_name.startswith(dbt.utils.MACRO_PREFIX):
node_type = NodeType.Macro
name = macro_name.replace(dbt.utils.MACRO_PREFIX, '')
if node_type != resource_type:
continue
unique_id = self.get_path(resource_type, package_name, name)
merged = dbt.utils.deep_merge(
base_node.serialize(),
{
'name': name,
'unique_id': unique_id,
'tags': tags,
'resource_type': resource_type,
'depends_on': {'macros': []},
})
new_node = ParsedMacro(**merged)
to_return[unique_id] = new_node
return to_return
示例13: track
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def track(user, *args, **kwargs):
if user.do_not_track:
return
else:
logger.debug("Sending event: {}".format(kwargs))
try:
tracker.track_struct_event(*args, **kwargs)
except Exception:
logger.debug(
"An error was encountered while trying to send an event"
)
示例14: create_schema
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def create_schema(self, database, schema, model_name=None):
logger.debug('Creating schema "%s"."%s".', database, schema)
if model_name is None:
model_name = 'master'
kwargs = {
'database_name': self.quote_as_configured(database, 'database'),
'schema_name': self.quote_as_configured(schema, 'schema'),
}
self.execute_macro(CREATE_SCHEMA_MACRO_NAME,
kwargs=kwargs,
connection_name=model_name)
self.commit_if_has_connection(model_name)
示例15: cancel
# 需要导入模块: from dbt.logger import GLOBAL_LOGGER [as 别名]
# 或者: from dbt.logger.GLOBAL_LOGGER import debug [as 别名]
def cancel(self, connection):
connection_name = connection.name
pid = connection.handle.get_backend_pid()
sql = "select pg_terminate_backend({})".format(pid)
logger.debug("Cancelling query '{}' ({})".format(connection_name, pid))
_, cursor = self.add_query(sql, 'master')
res = cursor.fetchone()
logger.debug("Cancel query '{}': {}".format(connection_name, res))