本文整理汇总了Python中config.debug方法的典型用法代码示例。如果您正苦于以下问题:Python config.debug方法的具体用法?Python config.debug怎么用?Python config.debug使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类config
的用法示例。
在下文中一共展示了config.debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_table_sql
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def generate_table_sql(file_names, column_data_type):
"""Generates SQL for the table to load data.
Parameters
----------
file_names : str
The file_names to scan for columns
column_data_type : str
The column data type to use
"""
col_list = []
for file_name in file_names:
f.debug("Reading file {0}".format(file_name))
with f.open_file(file_name) as file:
reader = f.get_csv_reader(file)
columns_to_add = f.read_header(reader)
f.debug("Columns to add {0}".format(columns_to_add))
# Add columns to list implicitly removing duplicates for when going over multiple files
col_list.extend(col for col in columns_to_add if col not in col_list)
print_table_and_columns(col_list, column_data_type)
示例2: load_files
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def load_files(file_names):
"""Loads all files into the database.
file_names : str
All the file names to load into the database
"""
for file_name in file_names:
print()
print("Loading file {0}".format(file_name))
f.debug("Opening file handler for '{0}'".format(file_name))
with f.open_file(file_name) as file:
try:
read_and_load_file(file)
print("File loaded.")
except StopIteration:
print("File is empty: {0}".format(file_name))
except Exception as err:
f.error("Error while loading file into table: {0}".format(file.name))
exception, traceback = f.get_exception_details()
f.error(exception)
f.debug(traceback)
cfg.data_loading_error = True
print("Skipping file.")
print()
示例3: search
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def search(self):
print("Search in OFD-YA...")
url = self.url_receipt_get.format(self.kkt, self.fiscal_id)
request = requests.get(url)
if request.status_code == 200 and request.text != '{}':
self.raw = json.dumps(
request.json(), ensure_ascii=False).encode('utf8')
self.receipt_data = json.loads(self.raw)
filename = self.get_receipt_file_name()
if not os.path.exists(filename):
with open(filename, 'w') as outfile:
outfile.write(self.receipt_data)
return True
else:
print("Error {} while searching in ofd-ya!".format(request.status_code))
if config.debug:
print(request.text)
return False
示例4: debug
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def debug(output):
"""Print debug output.
Parameters
----------
output : Any
The output to print
"""
if cfg.debug:
if isinstance(output, list):
output = ", ".join(output)
elif isinstance(output, dict):
output = ", ".join(str(key) + ": " + str(value) for key, value in output.items())
print_color(TerminalColor.YELLOW, "DEBUG: {0}: {1}".format(datetime.datetime.now(), output))
示例5: read_and_load_file
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def read_and_load_file(file):
"""Reads and loads file.
Parameters
----------
file : file_object
The file to load
"""
reader = f.get_csv_reader(file)
col_map = f.read_header(reader)
f.debug("Column map: {0}".format(col_map))
for line in reader:
load_data(col_map, line)
load_data(col_map, None)
示例6: test_load_file_with_insufficient_columns
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def test_load_file_with_insufficient_columns(self):
print("test_load_file_with_insufficient_columns")
self.assertEqual(f.ExitCodes.SUCCESS.value,
csv2db.run(
["load",
"-f", "../resources/bad/201811-citibike-tripdata-not-enough-columns.csv",
"-u", login["user"],
"-p", login["password"],
"-d", login["database"],
"-t", login["table"],
"--debug"
]
)
)
示例7: test_exit_code_DATA_LOADING_ERROR
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def test_exit_code_DATA_LOADING_ERROR(self):
print("test_exit_code_DATA_LOADING_ERROR")
self.assertEqual(f.ExitCodes.DATA_LOADING_ERROR.value,
csv2db.run(
["load",
"-f", "../resources/201811-citibike-tripdata.csv",
"-u", login["user"],
"-p", login["password"],
"-d", login["database"],
"-t", "DOES_NOT_EXIST",
"--debug"
]
)
)
示例8: setUp
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def setUp(self):
# Set the default column separator for all tests
cfg.column_separator = ","
cfg.quote_char = '"'
cfg.data_loading_error = False
cfg.debug = False
示例9: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def __init__(self, old, new):
"""
:param list old: List of 'old' lines to compare to new
:param list new: List of 'new' lines to compare to old
:return:
"""
# Compares best when items are sorted
old.sort()
new.sort()
self.add = []
self.delete = []
self.equal = []
s = difflib.SequenceMatcher(None, old, new)
for tag, i1, i2, j1, j2 in s.get_opcodes():
# This helps to understand what we're adding and removing. From difflib documentation
if config.debug:
print("%7s a[%d:%d] (%s) b[%d:%d] (%s)" % (tag, i1, i2, old[i1:i2], j1, j2, new[j1:j2]))
# replace takes out items from list A[i1:i2] and adds from list B[j1:j2]
if tag == 'replace':
for i in old[i1:i2]:
self.delete.append(i)
for i in new[j1:j2]:
self.add.append(i)
# delete records are not seen in list b. Remove items from list a[i1:i2]
elif tag == 'delete':
for i in old[i1:i2]:
self.delete.append(i)
# insert records are not seen in list a. Add items from list b.
elif tag == 'insert':
for i in new[j1:j2]:
self.add.append(i)
elif tag == 'equal':
for i in old[i1:i2]:
self.equal.append(i)
示例10: setUpClass
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def setUpClass(cls):
""" Setup """
config.debug = False
cls.OFD = ofd.OFDProvider(True).detect(
"t=20170712T133051&s=32.50&fn=8710000100924702&i=1666&fp=3502055476&n=1")
示例11: setUpClass
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def setUpClass(cls):
""" Setup """
config.debug = False
cls.OFD = ofd.OFDProvider(True).detect(
"t=20170714T1311&s=35.00&fn=8710000100837497&i=231&fp=2921685295&n=1")
示例12: setUpClass
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def setUpClass(cls):
""" Setup """
config.debug = False
cls.OFD = ofd.OFDProvider(True).detect(
"t=20170305T005100&s=140.00&fn=8710000100161943&i=8018&fp=2398195357&n=1",
"0000069245023747")
示例13: load_data
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def load_data(col_map, data):
"""Loads the data into the database.
Parameters
----------
col_map : [str,]
The columns to load the data into
data : [str,]
The data to load. If data is None the array will be loaded and flushed.
"""
if data is not None and len(data) > 0:
# If the data has more values than the header provided, ignore the end (green data set has that)
while len(data) > len(col_map):
f.debug("Removing extra row value entry not present in the header.")
data.pop()
# tuple or dictionary only for SQL Server
cfg.input_data.append(tuple(data))
# If batch size has been reached or input array should be flushed
if (len(cfg.input_data) == cfg.batch_size) or (data is None and len(cfg.input_data) > 0):
f.debug("Executing statement:")
stmt = generate_statement(col_map)
f.debug(stmt)
cur = cfg.conn.cursor()
try:
f.executemany(cur, stmt)
except Exception as err:
# Rollback old batch (needed for at least Postgres to finish transaction)
cfg.conn.rollback()
# If debug output is enabled, find failing record
if cfg.debug:
for record in cfg.input_data:
try:
cur.execute(stmt, record)
except Exception as err1:
f.debug("Error with record: {0}".format(record))
# Rollback old batch (needed for at least Postgres to finish transaction)
cfg.conn.rollback()
cur.close()
cfg.input_data.clear()
raise
# Debug output is not enabled, clear current batch and raise error
else:
cur.close()
cfg.input_data.clear()
raise
f.debug("Commit")
cfg.conn.commit()
cur.close()
f.verbose("{0} rows loaded.".format(len(cfg.input_data)))
cfg.input_data.clear()
示例14: process
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def process(db_add, db_del, db_equal):
""" Processes RepDB entries in order for syslog, stdout, csv file, etc
:param repDB db_add: RepDB entry to show added items
:param repDB db_del: RepDB entry to show deleted items
:param repDB db_equal: RepDB entry to show unchanged values
"""
# fun toy for heatmaps later
f = open('cache/coords.txt', 'w')
count_add = 0
count_del = 0
count_equal = 0
for line in db_add:
for i in line:
count_add += 1
msg = buildcef('add', i)
syslog(msg)
if config.debug:
printjson('add', i)
f.write("%s %s\n" % (i['latitude'], i['longitude']))
for line in db_del:
for i in line:
count_del += 1
msg = buildcef('delete', i)
if config.debug:
printjson('delete', i)
syslog(msg)
for line in db_equal:
for i in line:
count_equal += 1
msg = buildcef('update', i)
syslog(msg)
if config.debug:
printjson('update', i)
f.write("%s %s\n" % (i['latitude'], i['longitude']))
f.close()
print("Sent {0} New, {1} deleted, and {2} unchanged entries to {3}:{4}".format(
count_add, count_del, count_equal, config.host, config.port))
# Only run code if invoked directly: This allows a user to import modules without having to run through everything
示例15: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import debug [as 别名]
def __init__(self, args):
# load dictionary and embedding file
with open(config.embedding, "rb") as f:
embedding = pickle.load(f)
embedding = torch.tensor(embedding,
dtype=torch.float).to(config.device)
with open(config.word2idx_file, "rb") as f:
word2idx = pickle.load(f)
# train, dev loader
print("load train data")
self.train_loader = get_loader(config.train_src_file,
config.train_trg_file,
word2idx,
use_tag=True,
batch_size=config.batch_size,
debug=config.debug)
self.dev_loader = get_loader(config.dev_src_file,
config.dev_trg_file,
word2idx,
use_tag=True,
batch_size=128,
debug=config.debug)
train_dir = os.path.join("./save", "seq2seq")
self.model_dir = os.path.join(
train_dir, "train_%d" % int(time.strftime("%m%d%H%M%S")))
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.model = Seq2seq(embedding)
self.model = self.model.to(config.device)
if len(args.model_path) > 0:
print("load check point from: {}".format(args.model_path))
state_dict = torch.load(args.model_path,
map_location="cpu")
self.model.load_state_dict(state_dict)
params = self.model.parameters()
self.lr = config.lr
self.optim = optim.SGD(params, self.lr, momentum=0.8)
# self.optim = optim.Adam(params)
self.criterion = nn.CrossEntropyLoss(ignore_index=0)