本文整理汇总了Python中numpy.full函数的典型用法代码示例。如果您正苦于以下问题:Python full函数的具体用法?Python full怎么用?Python full使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了full函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_model
def load_model(nbins_sfh=7,sigma=0.3,df=2.,agelims=None,objname=None, **extras):
# we'll need this to access specific model parameters
n = [p['name'] for p in model_params]
# replace nbins_sfh
nbins_sfh = 4 + (int(objname)-1) / 9
# create SFH bins
zred = model_params[n.index('zred')]['init']
tuniv = WMAP9.age(zred).value
# now construct the nonparametric SFH
# current scheme: six bins, four spaced equally in logarithmic
# last bin is 15% age of the Universe, first two are 0-30, 30-100
tbinmax = (tuniv*0.85)*1e9
agelims = agelims[:2] + np.linspace(agelims[2],np.log10(tbinmax),nbins_sfh-2).tolist() + [np.log10(tuniv*1e9)]
agebins = np.array([agelims[:-1], agelims[1:]])
# load nvariables and agebins
model_params[n.index('agebins')]['N'] = nbins_sfh
model_params[n.index('agebins')]['init'] = agebins.T
model_params[n.index('mass')]['N'] = nbins_sfh
model_params[n.index('logsfr_ratios')]['N'] = nbins_sfh-1
model_params[n.index('logsfr_ratios')]['init'] = np.full(nbins_sfh-1,0.0) # constant SFH
model_params[n.index('logsfr_ratios')]['prior'] = priors.StudentT(mean=np.full(nbins_sfh-1,0.0),
scale=np.full(nbins_sfh-1,sigma),
df=np.full(nbins_sfh-1,df))
return sedmodel.SedModel(model_params)
示例2: testMakeTableExceptions
def testMakeTableExceptions(self):
# Verify that contents is being type-checked and shape-checked.
with self.assertRaises(ValueError):
text_plugin.make_table([])
with self.assertRaises(ValueError):
text_plugin.make_table('foo')
with self.assertRaises(ValueError):
invalid_shape = np.full((3, 3, 3), 'nope', dtype=np.dtype('S3'))
text_plugin.make_table(invalid_shape)
# Test headers exceptions in 2d array case.
test_array = np.full((3, 3), 'foo', dtype=np.dtype('S3'))
with self.assertRaises(ValueError):
# Headers is wrong type.
text_plugin.make_table(test_array, headers='foo')
with self.assertRaises(ValueError):
# Too many headers.
text_plugin.make_table(test_array, headers=['foo', 'bar', 'zod', 'zoink'])
with self.assertRaises(ValueError):
# headers is 2d
text_plugin.make_table(test_array, headers=test_array)
# Also make sure the column counting logic works in the 1d array case.
test_array = np.array(['foo', 'bar', 'zod'])
with self.assertRaises(ValueError):
# Too many headers.
text_plugin.make_table(test_array, headers=test_array)
示例3: crossval_predict
def crossval_predict(predictor, X, y, prefix, n_cv=5):
if not np.array_equal(predictor.classes_, [0, 1]):
raise Exception("classes labels NOT match")
can_pred_proba = common.can_predict_probability(predictor)
n_samples = X.shape[0]
print "totally {} samples, divided into {} folds".format(n_samples, n_cv)
if can_pred_proba:
datas = np.full((n_samples, 2), np.NaN)
headers = ["{}_{}".format(prefix, t) for t in ["proba", "log_proba"]]
yvalidates = pd.DataFrame(datas, columns=headers, index=y.index)
else:
datas = np.full((n_samples, 1), np.NaN)
header = "{}_label".format(prefix)
yvalidates = pd.DataFrame(datas, columns=[header], index=y.index)
folds = StratifiedKFold(y, n_folds=n_cv, shuffle=True, random_state=seed)
for index, (train_index, test_index) in enumerate(folds):
Xtrain, Xtest = X[train_index], X[test_index]
ytrain, ytest = y[train_index], y[test_index]
predictor.fit(Xtrain, ytrain)
if can_pred_proba:
ytest_probas = predictor.predict_proba(Xtest)
pos_proba = ytest_probas[:, 1] # probability for label=1 (Positive)
yvalidates.iloc[test_index, 0] = pos_proba
yvalidates.iloc[test_index, 1] = np.log(pos_proba)
else:
yvalidates.iloc[test_index, 0] = predictor.predict(Xtest)
print "====== cross-validated on {}-fold ======".format(index + 1)
return yvalidates
示例4: run
def run(oiter):
# ----- Variable for this run -----
log_alpha_0 = all_log_alpha_0[oiter]
print "Running job {0} on {1}".format(oiter + 1, socket.gethostname())
train_images, train_labels, _, _, _ = load_data()
train_images = train_images[:N_data, :]
train_labels = train_labels[:N_data, :]
batch_idxs = BatchList(N_data, batch_size)
iter_per_epoch = len(batch_idxs)
N_weights, _, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)
def indexed_loss_fun(w, idxs):
return loss_fun(w, X=train_images[idxs], T=train_labels[idxs])
V0 = npr.randn(N_weights) * velocity_scale
losses = []
d_losses = []
alpha_0 = np.exp(log_alpha_0)
for N_iters in all_N_iters:
alphas = np.full(N_iters, alpha_0)
betas = np.full(N_iters, beta_0)
npr.seed(1)
W0 = npr.randn(N_weights) * np.exp(log_param_scale)
results = sgd(indexed_loss_fun, batch_idxs, N_iters, W0, V0, alphas, betas)
losses.append(results['loss_final'])
d_losses.append(d_log_loss(alpha_0, results['d_alphas']))
return losses, d_losses
示例5: test_window_safe
def test_window_safe(self, factor_len):
# all true data set of (days, securities)
data = full(self.default_shape, True, dtype=bool)
class InputFilter(Filter):
inputs = ()
window_length = 0
class TestFactor(CustomFactor):
dtype = float64_dtype
inputs = (InputFilter(), )
window_length = factor_len
def compute(self, today, assets, out, filter_):
# sum for each column
out[:] = np_sum(filter_, axis=0)
results = self.run_graph(
TermGraph({'windowsafe': TestFactor()}),
initial_workspace={InputFilter(): data},
)
# number of days in default_shape
n = self.default_shape[0]
# shape of output array
output_shape = ((n - factor_len + 1), self.default_shape[1])
check_arrays(
results['windowsafe'],
full(output_shape, factor_len, dtype=float64)
)
示例6: _init
def _init(self, X, lengths, params):
init = 1. / self.n_components
if 's' in params or not hasattr(self, "startprob_"):
self.startprob_ = np.full(self.n_components, init)
if 't' in params or not hasattr(self, "transmat_"):
self.transmat_ = np.full((self.n_components, self.n_components),
init)
示例7: complete_obs_table
def complete_obs_table(obs_table, used_columns, filter_list, tolerance,
lim_flag, default_error=0.1, systematic_deviation=0.1):
"""Complete the observation table
For each filter:
* If the corresponding error is not present in the used column list or in
the table columns, add (or replace) an error column with the default
error.
* Adjust the error value.
Parameters
----------
obs_table: astropy.table.Table
The observation table.
used_columns: list of strings
The list of columns to use in the observation table.
filter_list: list of strings
The list of filters used in the analysis.
tolerance: float
Tolerance threshold under flux error is considered as 0.
lim_flag: boolean
Do we process upper limits (True) or treat them as no-data (False)?
default_error: float
Default error factor used when the provided error in under the
tolerance threshold.
systematic_deviation: float
Systematic deviation added to the error.
Returns
-------
obs_table = astropy.table.Table
The completed observation table
Raises
------
Exception: When a filter is not present in the observation table.
"""
# TODO Print or log a warning when an error column is in the used column
# list but is not present in the observation table.
for name in filter_list:
if name not in obs_table.columns:
raise Exception("The filter <{}> (at least) is not present in "
"the observation table.".format(name))
name_err = name + "_err"
if name_err not in obs_table.columns:
obs_table.add_column(Column(name=name_err,
data=np.full(len(obs_table), np.nan)),
index=obs_table.colnames.index(name)+1)
elif name_err not in used_columns:
obs_table[name_err] = np.full(len(obs_table), np.nan)
obs_table[name], obs_table[name_err] = adjust_data(obs_table[name],
obs_table[name_err],
tolerance,
lim_flag,
default_error,
systematic_deviation)
return obs_table
示例8: test_many_inputs
def test_many_inputs(self):
"""
Test adding NumericalExpressions with >10 inputs.
"""
# Create an initial NumericalExpression by adding two factors together.
f = self.f
expr = f + f
self.fake_raw_data = {f: full((5, 5), 0, float)}
expected = 0
# Alternate between adding and subtracting factors. Because subtraction
# is not commutative, this ensures that we are combining factors in the
# correct order.
ops = (add, sub)
for i, name in enumerate(ascii_uppercase):
op = ops[i % 2]
NewFactor = type(
name,
(Factor,),
dict(dtype=float64_dtype, inputs=(), window_length=0),
)
new_factor = NewFactor()
# Again we need a NumericalExpression, so add two factors together.
new_expr = new_factor + new_factor
self.fake_raw_data[new_factor] = full((5, 5), i + 1, float)
expr = op(expr, new_expr)
# Double the expected output since each factor is counted twice.
expected = op(expected, (i + 1) * 2)
self.check_output(expr, full((5, 5), expected, float))
示例9: make_default_configuration
def make_default_configuration(self):
self.global_register = ccpdv4['CCPD_GLOBAL'].copy()
self.pixel_register = {
"threshold": np.full((48, 12), 7, dtype=np.uint8), # 16 columns (triple col) x 6 rows (double row)
# "monitor": value = np.full((48,12), 0, dtype=np.uint8),
"injection": np.full((6, ), 0, dtype=np.uint8)
}
示例10: _clean_timeseries
def _clean_timeseries(self, timeseries, starttime, endtime):
"""Realigns timeseries data so the start and endtimes are the same
as what was originally asked for, even if the data was during
a gap.
Parameters
----------
timeseries: obspy.core.stream
The timeseries stream as returned by the call to getWaveform
starttime: obspy.core.UTCDateTime
the starttime of the requested data
endtime: obspy.core.UTCDateTime
the endtime of the requested data
Notes: the original timeseries object is changed.
"""
for trace in timeseries:
trace_starttime = UTCDateTime(trace.stats.starttime)
trace_endtime = UTCDateTime(trace.stats.endtime)
if trace.stats.starttime > starttime:
cnt = int((trace_starttime - starttime) / trace.stats.delta)
trace.data = numpy.concatenate([
numpy.full(cnt, numpy.nan, dtype=numpy.float64),
trace.data])
trace.stats.starttime = starttime
if trace_endtime < endtime:
cnt = int((endtime - trace_endtime) / trace.stats.delta)
trace.data = numpy.concatenate([
trace.data,
numpy.full(cnt, numpy.nan, dtype=numpy.float64)])
trace.stats.endttime = endtime
示例11: _recognize3
def _recognize3(self, scores, transitions):
lengthT = scores.shape[0]
lengthS = transitions.shape[1]
cost = np.full((lengthT, lengthT), np.inf, 'float32')
back = np.full((lengthT, lengthT), np.inf, 'int32')
cost[0] = np.min(scores[0])
back[0] = -1
transcript = []
attention = []
for s in xrange(1, lengthT):
for t in xrange(min(s * lengthS, lengthT)):
#if s % self.nstates == 0: # end state
cost[s, t] = np.min(scores[s])
q = transitions[t].copy()
q[:min(t,lengthS)] += cost[s - 1, t - min(t,lengthS) : t]
back[s, t] = q.argmin() + 1
cost[s, t] += q.min()
t = lengthT - 1
s = 1
while t >= 0 and s < lengthT:
if s % self.nstates == 0:
attention.append(t)
transcript.append(scores[t].argmin() / self.nstates)
t -= back[-s, t]
s += 1
return transcript[::-1], attention[::-1]
示例12: evaluate
def evaluate(gtdir, preddir, eval_pose=True, eval_track=True,
eval_upper_bound=False):
gtFramesAll, prFramesAll = load_data_dir(['', gtdir, preddir])
print('# gt frames :', len(gtFramesAll))
print('# pred frames:', len(prFramesAll))
apAll = np.full((Joint().count + 1, 1), np.nan)
preAll = np.full((Joint().count + 1, 1), np.nan)
recAll = np.full((Joint().count + 1, 1), np.nan)
if eval_pose:
apAll, preAll, recAll = evaluateAP(gtFramesAll, prFramesAll)
print('Average Precision (AP) metric:')
printTable(apAll)
metrics = np.full((Joint().count + 4, 1), np.nan)
if eval_track:
metricsAll = evaluateTracking(
gtFramesAll, prFramesAll, eval_upper_bound)
for i in range(Joint().count + 1):
metrics[i, 0] = metricsAll['mota'][0, i]
metrics[Joint().count + 1, 0] = metricsAll['motp'][0, Joint().count]
metrics[Joint().count + 2, 0] = metricsAll['pre'][0, Joint().count]
metrics[Joint().count + 3, 0] = metricsAll['rec'][0, Joint().count]
print('Multiple Object Tracking (MOT) metrics:')
printTable(metrics, motHeader=True)
return (apAll, preAll, recAll), metrics
示例13: test_rolling_and_nonrolling
def test_rolling_and_nonrolling(self):
open_ = USEquityPricing.open
close = USEquityPricing.close
volume = USEquityPricing.volume
# Test for thirty days up to the last day that we think all
# the assets existed.
dates_to_test = self.dates[-30:]
constants = {open_: 1, close: 2, volume: 3}
loader = PrecomputedLoader(constants=constants, dates=self.dates, sids=self.asset_ids)
engine = SimplePipelineEngine(lambda column: loader, self.dates, self.asset_finder)
sumdiff = RollingSumDifference()
result = engine.run_pipeline(
Pipeline(
columns={"sumdiff": sumdiff, "open": open_.latest, "close": close.latest, "volume": volume.latest}
),
dates_to_test[0],
dates_to_test[-1],
)
self.assertIsNotNone(result)
self.assertEqual({"sumdiff", "open", "close", "volume"}, set(result.columns))
result_index = self.asset_ids * len(dates_to_test)
result_shape = (len(result_index),)
check_arrays(result["sumdiff"], Series(index=result_index, data=full(result_shape, -3, dtype=float)))
for name, const in [("open", 1), ("close", 2), ("volume", 3)]:
check_arrays(result[name], Series(index=result_index, data=full(result_shape, const, dtype=float)))
示例14: plot_risk
def plot_risk(risk_local, risk_central, risk_dist, iters, name_file, label_x, label_y):
size = iters.shape[0]
# Create the data
risk_local = np.full(size, risk_local)
risk_central = np.full(size, risk_central)
iters = np.array(iters)
# Plot graphs
sns.set_style("ticks")
plt.figure()
plt.rc('text', usetex = True)
plt.rc('text.latex', unicode = True)
tests = list(risk_dist.keys())
with sns.color_palette("tab10", len(tests) + 2):
for test in tests:
label = "SVM distribuído com " + test
plt.plot(iters, risk_dist[test], linewidth = 2, label = label)
plt.plot(iters, risk_local, linewidth = 2.2, linestyle = '-.', label = 'SVM Local')
plt.plot(iters, risk_central, linewidth = 2.2, linestyle = '-.', label = 'SVM Central')
plt.legend(loc = 'upper right')
sns.despine()
plt.xlabel(label_x)
plt.ylabel(label_y)
file = str(plots_path) + "/" + name_file + ".pdf"
plt.savefig(file, transparent = True)
示例15: initBuffers
def initBuffers(self,puzzle):
#define lengths buffer and copy to the GPU
#as we will not read from this buffer later, mapping is not required
self.lengths = np.full(self.simulations,np.iinfo(np.int16).max,dtype=np.int16)
self.lengthsBuffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=self.lengths)
#define buffer for aggregated lengths for each workgroup
self.groupLengths = np.full(self.workGroups,np.iinfo(np.int16).max,dtype=np.int16)
self.groupLengthsBuffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.USE_HOST_PTR, hostbuf=self.groupLengths)
#map group lengths buffer
cl.enqueue_map_buffer(self.queue,self.groupLengthsBuffer,cl.map_flags.READ,0,self.groupLengths.shape,self.groupLengths.dtype)
#get the input puzzle ready for the kernel; convert to 8 bit int (char)
p = np.array(puzzle['puzzle']).astype(np.int8)
#subtract 1 so that -1 denotes a gap and 0 denotes a square to be filled
p = p - np.ones_like(p,dtype=p.dtype)
#copy the puzzle, one for each simulation
self.puzzles = np.zeros((self.simulations,self.height,self.width),dtype=p.dtype)
self.puzzles[:,0:self.height,0:self.width] = p
#define puzzles buffer and copy data (we do not need to worry about getting data out of this buffer, so mapping isn't required)
#this buffer contains the input puzzles, one for each invocation (the puzzle is too large to hold in local or shared memory)
self.puzzlesFlattened = self.puzzles.ravel()
self.puzzlesBuffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=self.puzzlesFlattened)
#define output buffer for best solutions aggregated across workgroups
self.solutions = self.puzzles[0:self.workGroups]
self.solutionsFlattened = self.solutions.ravel()
self.solutionsBuffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.USE_HOST_PTR, hostbuf=self.solutionsFlattened)
#map solutions buffer
cl.enqueue_map_buffer(self.queue,self.solutionsBuffer,cl.map_flags.READ,0,self.solutionsFlattened.shape,self.solutions.dtype)