本文整理汇总了Python中numpy.Inf方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.Inf方法的具体用法?Python numpy.Inf怎么用?Python numpy.Inf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.Inf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sys_norm_h2_LMI
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def sys_norm_h2_LMI(Acl, Bdisturbance, C):
#doesn't work very well, if problem poorly scaled Riccati works better.
#Dullerud p 210
n = Acl.shape[0]
X = cvxpy.Semidef(n)
Y = cvxpy.Semidef(n)
constraints = [ Acl*X + X*Acl.T + Bdisturbance*Bdisturbance.T == -Y,
]
obj = cvxpy.Minimize(cvxpy.trace(Y))
prob = cvxpy.Problem(obj, constraints)
prob.solve()
eps = 1e-16
if np.max(np.linalg.eigvals((-Acl*X - X*Acl.T - Bdisturbance*Bdisturbance.T).value)) > -eps:
print('Acl*X + X*Acl.T +Bdisturbance*Bdisturbance.T is not neg def.')
return np.Inf
if np.min(np.linalg.eigvals(X.value)) < eps:
print('X is not pos def.')
return np.Inf
return np.sqrt(np.trace(C*X.value*C.T))
示例2: generate_final_dataset
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def generate_final_dataset(self):
if self.sign == False:
shift_var = 1
self.bucket = True
else:
shift_var = -1
self.bucket = False
self.woe_summary[self.column + "_shift"] = self.woe_summary[self.column].shift(shift_var)
if self.sign == False:
self.woe_summary.loc[0, self.column + "_shift"] = -np.inf
self.bins = np.sort(list(self.woe_summary[self.column]) + [np.Inf,-np.Inf])
else:
self.woe_summary.loc[len(self.woe_summary) - 1, self.column + "_shift"] = np.inf
self.bins = np.sort(list(self.woe_summary[self.column]) + [np.Inf,-np.Inf])
self.woe_summary["labels"] = self.woe_summary.apply(self.generate_bin_labels, axis=1)
self.dataset["bins"] = pd.cut(self.dataset[self.column], self.bins, right=self.bucket, precision=0)
self.dataset["bins"] = self.dataset["bins"].astype(str)
self.dataset['bins'] = self.dataset['bins'].map(lambda x: x.lstrip('[').rstrip(')'))
示例3: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def __init__(self, datasets="tmp", patience=7, fname=None, clean=False, verbose=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
timstr = datetime.datetime.now().strftime("%m%d-%H%M%S")
if fname is None:
fname = datasets + "-" + timstr + "-" + self._random_str() + ".pt"
self.fname = os.path.join(folder, fname)
self.clean = clean
示例4: _reset
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
示例5: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def __init__(self, parent_sorting, *, unit_ids=None, renamed_unit_ids=None, start_frame=None, end_frame=None):
SortingExtractor.__init__(self)
start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame)
self._parent_sorting = parent_sorting
self._unit_ids = unit_ids
self._renamed_unit_ids = renamed_unit_ids
self._start_frame = start_frame
self._end_frame = end_frame
if self._unit_ids is None:
self._unit_ids = self._parent_sorting.get_unit_ids()
if self._renamed_unit_ids is None:
self._renamed_unit_ids = self._unit_ids
if self._start_frame is None:
self._start_frame = 0
if self._end_frame is None:
self._end_frame = np.Inf
self._original_unit_id_lookup = {}
for i in range(len(self._unit_ids)):
self._original_unit_id_lookup[self._renamed_unit_ids[i]] = self._unit_ids[i]
self.copy_unit_properties(parent_sorting, unit_ids=self._renamed_unit_ids)
self.copy_unit_spike_features(parent_sorting, unit_ids=self._renamed_unit_ids, start_frame=start_frame,
end_frame=end_frame)
self._kwargs = {'parent_sorting': parent_sorting.make_serialized_dict(), 'unit_ids': unit_ids,
'renamed_unit_ids': renamed_unit_ids, 'start_frame': start_frame, 'end_frame': end_frame}
示例6: get_unit_spike_train
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None):
start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame)
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = np.Inf
original_unit_id = self._original_unit_id_lookup[unit_id]
sf = self._start_frame + start_frame
ef = self._start_frame + end_frame
if sf < self._start_frame:
sf = self._start_frame
if ef > self._end_frame:
ef = self._end_frame
if ef == np.Inf:
ef = None
return self._parent_sorting.get_unit_spike_train(unit_id=original_unit_id, start_frame=sf,
end_frame=ef) - self._start_frame
示例7: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def __init__(self, params):
self.params = params
if self.params.hiddenRatio is not None:
self.params.n_hidden = int(numpy.ceil(self.params.n_visible*self.params.hiddenRatio))
# for 0-1 normlaization
self.norm_max = numpy.ones((self.params.n_visible,)) * -numpy.Inf
self.norm_min = numpy.ones((self.params.n_visible,)) * numpy.Inf
self.n = 0
self.rng = numpy.random.RandomState(1234)
a = 1. / self.params.n_visible
self.W = numpy.array(self.rng.uniform( # initialize W uniformly
low=-a,
high=a,
size=(self.params.n_visible, self.params.n_hidden)))
self.hbias = numpy.zeros(self.params.n_hidden) # initialize h bias 0
self.vbias = numpy.zeros(self.params.n_visible) # initialize v bias 0
self.W_prime = self.W.T
示例8: on_train_begin
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def on_train_begin(self):
self.wait = 0 # Allow instances to be re-used
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
示例9: calculate_dV
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def calculate_dV(self, TL, old_sInd, sInds, sd, slewTimes, tmpCurrentTimeAbs):
"""Finds the change in velocity needed to transfer to a new star line of sight
This method sums the total delta-V needed to transfer from one star
line of sight to another. It determines the change in velocity to move from
one station-keeping orbit to a transfer orbit at the current time, then from
the transfer orbit to the next station-keeping orbit at currentTime + dt.
Station-keeping orbits are modeled as discrete boundary value problems.
This method can handle multiple indeces for the next target stars and calculates
the dVs of each trajectory from the same starting star.
Args:
dt (float 1x1 ndarray):
Number of days corresponding to starshade slew time
TL (float 1x3 ndarray):
TargetList class object
nA (integer):
Integer index of the current star of interest
N (integer):
Integer index of the next star(s) of interest
tA (astropy Time array):
Current absolute mission time in MJD
Returns:
float nx6 ndarray:
State vectors in rotating frame in normalized units
"""
if old_sInd is None:
dV = np.zeros(slewTimes.shape)
else:
dV = np.zeros(slewTimes.shape)
badSlews_i,badSlew_j = np.where(slewTimes.value < self.occ_dtmin.value)
for i in range(len(sInds)):
for t in range(len(slewTimes.T)):
dV[i,t] = self.dV_interp(slewTimes[i,t],sd[i].to('deg'))
dV[badSlews_i,badSlew_j] = np.Inf
return dV*u.m/u.s
示例10: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def __init__(self,shape,var_axes=[(0,),(0,)], name=None, map_est=False):
self.map_est = map_est
# Initial variances
self.zvar0_init= np.Inf
self.zvar1_init= np.Inf
nvars = 2
dtype = np.float64
BaseEst.__init__(self,shape=[shape,shape], var_axes=var_axes, dtype=dtype, name=name,\
type_name='ReLUEst', nvars=nvars, cost_avail=True)
示例11: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def __init__(self,y,shape,var_axes=(0,),thresh=0,perr=1e-6,\
name=None,var_init=np.Inf,dtype=np.float64):
BaseEst.__init__(self, shape=shape, var_axes=var_axes, dtype=dtype,\
name=name,type_name='BinaryQuantEst', nvars=1, cost_avail=True)
self.y = y
self.shape = shape
self.thresh = thresh
self.perr = perr
self.cost_avail = True
self.var_init = var_init
示例12: register
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def register(self, model):
self.model = model
if self.metric in ['auc', 'accuracy'] or model.direction=='max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
示例13: _validate_mab_args
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def _validate_mab_args(arms, learning_policy, context_policy, seed, n_jobs, backend) -> NoReturn:
"""
Validates arguments for the MAB constructor.
"""
# Arms
check_true(isinstance(arms, list), TypeError("The arms should be provided in a list."))
check_true(len(arms) > 1, ValueError("The number of arms should be greater than 1."))
check_false(None in arms, ValueError("The arm list cannot contain None."))
check_false(np.nan in arms, ValueError("The arm list cannot contain NaN."))
check_false(np.Inf in arms, ValueError("The arm list cannot contain Infinity."))
check_true(len(arms) == len(set(arms)), ValueError("The list of arms cannot contain duplicate values."))
# Learning Policy type
check_true(isinstance(learning_policy,
(LearningPolicy.EpsilonGreedy, LearningPolicy.Popularity, LearningPolicy.Random,
LearningPolicy.Softmax, LearningPolicy.ThompsonSampling, LearningPolicy.UCB1,
LearningPolicy.LinTS, LearningPolicy.LinUCB)),
TypeError("Learning Policy type mismatch."))
# Learning policy value
learning_policy._validate()
# Contextual Policy
if context_policy:
check_true(isinstance(context_policy,
(NeighborhoodPolicy.KNearest, NeighborhoodPolicy.Radius,
NeighborhoodPolicy.Clusters)),
TypeError("Context Policy type mismatch."))
context_policy._validate()
# Seed
check_true(isinstance(seed, int), TypeError("The seed must be an integer."))
# Parallel jobs
check_true(isinstance(n_jobs, int), TypeError("Number of parallel jobs must be an integer."))
check_true(n_jobs != 0, ValueError('Number of parallel jobs cannot be zero.'))
if backend is not None:
check_true(isinstance(backend, str), TypeError("Parallel backend must be a string."))
示例14: test_axis
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(ValueError, norm, B, ord=order, axis=axis)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])]
else:
expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])]
assert_almost_equal(n, expected)
示例15: test_keepdims
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import Inf [as 别名]
def test_keepdims(self):
A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
allclose_err = 'order {0}, axis = {1}'
shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(None, None))
expected_shape = (1, 1, 1)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None))
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))