本文整理汇总了Python中PythonTools.Debug.printWarning方法的典型用法代码示例。如果您正苦于以下问题:Python Debug.printWarning方法的具体用法?Python Debug.printWarning怎么用?Python Debug.printWarning使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类PythonTools.Debug
的用法示例。
在下文中一共展示了Debug.printWarning方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: correlation
# 需要导入模块: from PythonTools import Debug [as 别名]
# 或者: from PythonTools.Debug import printWarning [as 别名]
def correlation(array, maxtau = 200, step_type = "tau", flag_normalize = True, flag_verbose = False):
"""
Calculation of the correlation using the method Jan used.
The method is slow and probably wrong. Use correlation_fft instead.
For every iteration, the copied array will be 'rolled' or 'rotated' left by 1 for maxtau times. The copied array will be multiplied with the original array, but only the elements with a certain step between them will be used. The larger the step size, the quicker the method but also the more noisy the result will become.
INPUT:
array (ndarray): 1-d array with the data
maxtau (int): the maximum shift, also the maximum to where the correlation is calculated. This directly affects the speed of the calculation. (with N^2?)
step_type ("1", "tau"): The step size. With "1" the step size is 1, this will result in a longer calculation but less noise. With "tau" the step size is the current "tau". The calculation will be faster but the result will be noisier.
flag_normalize (BOOL, True): see note below.
CHANGELOG:
20120215: Introduced step_type
20130131/RB: introduced flag_normalize
20130204/RB: tested if 'array2 = numpy.roll(array2, -1)' is better nested in the itertools call, but it makes no real change on the speed of the function. No changes made.
"""
DEBUG.verbose("Correlation Jan-style", flag_verbose)
array = array - numpy.mean(array)
array2 = numpy.copy(array)
c = numpy.zeros(maxtau)
for i in range(0, maxtau):
array2 = numpy.roll(array2, -1)
if step_type == "tau":
step = i+1
elif step_type == "1":
step = 1
else:
DEBUG.printWarning("step_type is not recognized, will use 'tau'", inspect.stack())
step = i+1
a = list(itertools.islice(array * array2, None, len(array)-i-1, step))
c[i] = numpy.sum(a) / len(a)
if flag_normalize:
return c/c[0]
else:
return c
示例2: find_axes_indices
# 需要导入模块: from PythonTools import Debug [as 别名]
# 或者: from PythonTools.Debug import printWarning [as 别名]
def find_axes_indices(axis, val_min, val_max, flag_verbose = False):
"""
Find the indices of values val_min and val_max in axis, such that val_min and val_max are included.
Because the intended use is to slice an array data[val_min_i:val_max_i], 1 is added to val_max_i.
If the val_min or val_max exceed the axis, then the index is 0 or -1, respectively.
BEHAVIOR:
axis = [3,4,5,6]
if val_min == 4.5: val_min_i = 1
if val_min == 5: val_min_i = 1
if val_min == 1: val_min_i = 0
if val_max = 4.5: val_max_i = 3: axis[0:3] = [3,4,5]
if val_max = 4: val_max_i = 2: axis[0:2] = [3,4]
if val_max = 10: val_max_i = -1: axis[0:-1] = [3,4,5]
CHANGELOG:
201108xx/RB: originated in contourplot
20130213/RB: moved to separate function. Better handling of edge cases.
"""
if val_min > val_max:
DEBUG.printWarning("val_min > val_max ({v1} and {v2}), will give strange result.".format(v1 = val_min, v2 = val_max), inspect.stack())
temp = numpy.where(axis < val_min)
if len(temp[0]) == 0:
val_min_i = 0
else:
val_min_i = temp[0][-1]
temp = numpy.where(axis > val_max)
if len(temp[0]) == 0:
val_max_i = -1
else:
val_max_i = temp[0][0] + 1
if val_max_i == len(axis):
val_max_i = -1
return val_min_i, val_max_i
示例3: import_data_LV_A
# 需要导入模块: from PythonTools import Debug [as 别名]
# 或者: from PythonTools.Debug import printWarning [as 别名]
def import_data_LV_A(path, base_filename):
"""
Imports data for 'LV_file_format.1'
"""
try:
# data
path_and_filename = path + base_filename + ".csv"
data = numpy.loadtxt(path_and_filename, dtype = "float", delimiter = ",")
data = data.T
# time axis in fringes
path_and_filename = path + base_filename + "_t1.csv"
t1_axis = numpy.loadtxt(path_and_filename, dtype = "float", delimiter = ",")
# frequency axis
path_and_filename = path + base_filename + "_w3.csv"
w3_axis = numpy.loadtxt(path_and_filename, dtype = "float", delimiter = ",")
# phase
path_and_filename = path + base_filename + "_phase.txt"
f = open(path_and_filename)
for line in f:
temp = line
f.close()
if temp == "NaN":
DEBUG.printWarning("Phase is Not-a-Number, will be set to 0 ", inspect.stack())
phase = 0
else:
phase = float(temp)
# last pump
path_and_filename = path + base_filename + "_lastpump.txt"
f = open(path_and_filename)
for line in f:
lastpump = line
f.close()
# determine number of fringes
n_fringes = int((len(t1_axis)+1)/2)
n_pixels = len(w3_axis)
# convert NaN to zeros
data = numpy.nan_to_num(data)
# labview measures 4000-N to 4000+N, we want the data split into 4000-N to 4000 (non-rephasing) and 4000 to 4000+N (rephasing)
R = data[n_fringes-1:, :]
NR = numpy.flipud(data[:n_fringes, :])
# for the FFT, we don't want 4000 to be zero. The axes run from 0 to N
# also: calculate the axis in fs
t1fr_axis = numpy.arange(n_fringes)
t1fs_axis = numpy.arange(n_fringes) * CONST.hene_fringe_fs
# return everything
return R, NR, t1fs_axis, t1fr_axis, w3_axis, phase, lastpump, n_fringes, n_pixels
except IOError:
DEBUG.printError("Unable to import LabView data from file " + path + base_filename, inspect.stack())
# raise
return False