本文整理汇总了Python中numpy.expm1函数的典型用法代码示例。如果您正苦于以下问题:Python expm1函数的具体用法?Python expm1怎么用?Python expm1使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了expm1函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _frank
def _frank(M, N, alpha):
if(N<2):
raise ValueError('Dimensionality Argument [N] must be an integer >= 2')
elif(N==2):
u1 = uniform.rvs(size=M)
p = uniform.rvs(size=M)
if abs(alpha) > math.log(sys.float_info.max):
u2 = (u1 < 0).astype(int) + np.sign(alpha)*u1 # u1 or 1-u1
elif abs(alpha) > math.sqrt(np.spacing(1)):
u2 = -1*np.log((np.exp(-alpha*u1)*(1-p)/p + np.exp(-alpha))/(1 + np.exp(-alpha*u1)*(1-p)/p))/alpha
else:
u2 = p
U = np.column_stack((u1,u2))
else:
# Algorithm 1 described in both the SAS Copula Procedure, as well as the
# paper: "High Dimensional Archimedean Copula Generation Algorithm"
if(alpha<=0):
raise ValueError('For N>=3, alpha >0 in Frank Copula')
U = np.empty((M,N))
for ii in range(0,M):
p = -1.0*np.expm1(-1*alpha)
if(p==1):
# boundary case protection
p = 1 - np.spacing(1)
v = logser.rvs(p, size=1)
# sample N independent uniform random variables
x_i = uniform.rvs(size=N)
t = -1*np.log(x_i)/v
U[ii,:] = -1.0*np.log1p( np.exp(-t)*np.expm1(-1.0*alpha))/alpha
return U
示例2: output
def output(modelObj):
test = modelObj.test
file = modelObj.outputFile
model = modelObj.model
# remove id column
test['label'] = test['label'].astype(int)
week10 = test[test['Semana']==10]
week11 = test[test['Semana']==11]
week10['pred'] = np.expm1(model.predict(week10.values[:,:-1]))
file.write('id,Demanda_uni_equil\n')
temp = week10[['label', 'pred']]
temp.to_csv(file, index=False, delimiter=',', header=False)
'''
week10['Semana'] = week10['Semana'] + 1
week10 = week10[['Cliente_ID', 'Producto_ID', 'Semana', 'pred']]
week10 = week10.groupby(by=['Cliente_ID', 'Producto_ID', 'Semana'], as_index=False).mean()
week11 = pd.merge(week11, week10, on=['Cliente_ID', 'Producto_ID', 'Semana'], how='left')
week11['l1'] = week11['pred']
del week11['pred']
temp = week11[['l1','l2','l3','l4','l5']]
temp = temp.fillna(0)
week11['lagVar'] = np.var(temp, axis=1)
week11['newProduct'] = np.sum(temp, axis=1) == 0
week11['newProduct'].replace(False, 0, inplace=True)
week11['newProduct'].replace(True, 1, inplace=True)
'''
#week11['lagSum'] = week11['l1'] + week11['l2'] + week11['l3'] + week11['l4'] + week11['l5']
#week11['lagAvg'] = week11['lagSum'] / 5
week11['pred'] = np.expm1(model.predict(week11.values[:,:-1]))
temp = week11[['label', 'pred']]
temp.to_csv(file, index=False, delimiter=',', header=False)
file.flush()
return test.shape[0]
示例3: __init__
def __init__(self, daily_returns, benchmark_daily_returns, risk_free_rate, days, period=DAILY):
assert(len(daily_returns) == len(benchmark_daily_returns))
self._portfolio = daily_returns
self._benchmark = benchmark_daily_returns
self._risk_free_rate = risk_free_rate
self._annual_factor = _annual_factor(period)
self._daily_risk_free_rate = self._risk_free_rate / self._annual_factor
self._alpha = None
self._beta = None
self._sharpe = None
self._return = np.expm1(np.log1p(self._portfolio).sum())
self._annual_return = (1 + self._return) ** (365 / days) - 1
self._benchmark_return = np.expm1(np.log1p(self._benchmark).sum())
self._benchmark_annual_return = (1 + self._benchmark_return) ** (365 / days) - 1
self._max_drawdown = None
self._volatility = None
self._annual_volatility = None
self._benchmark_volatility = None
self._benchmark_annual_volatility = None
self._information_ratio = None
self._sortino = None
self._tracking_error = None
self._annual_tracking_error = None
self._downside_risk = None
self._annual_downside_risk = None
self._calmar = None
self._avg_excess_return = None
示例4: _gpinv
def _gpinv(p, k, sigma):
"""Inverse Generalized Pareto distribution function"""
x = np.full_like(p, np.nan)
if sigma <= 0:
return x
ok = (p > 0) & (p < 1)
if np.all(ok):
if np.abs(k) < np.finfo(float).eps:
x = - np.log1p(-p)
else:
x = np.expm1(-k * np.log1p(-p)) / k
x *= sigma
else:
if np.abs(k) < np.finfo(float).eps:
x[ok] = - np.log1p(-p[ok])
else:
x[ok] = np.expm1(-k * np.log1p(-p[ok])) / k
x *= sigma
x[p == 0] = 0
if k >= 0:
x[p == 1] = np.inf
else:
x[p == 1] = - sigma / k
return x
示例5: expm1
def expm1(a, b):
print((numba.typeof(a)))
print((numba.typeof(np.expm1(a))))
# result = a**2 + b**2
# print "... :)"
# print np.expm1(result), "..."
return np.expm1(a**2) + b
示例6: rmspe_xg
def rmspe_xg(y_hat, y):
y = np.expm1(y.get_label())
w = ToWeight(y)
y_hat = np.expm1(y_hat)
score = np.sqrt(np.mean(((y - y_hat) * w) ** 2))
return "rmspe", score
示例7: numpy_sweep
def numpy_sweep(start_frequency=20.0,
stop_frequency=20000.0,
phase=0.0,
interval=(0, 1.0),
sampling_rate=48000.0,
length=2 ** 16):
"""A pure NumPy implementation of the ExponentialSweep for benchmarking.
See the ExponentialSweep class for documentation of the parameters.
"""
# allocate shared memory for the channels
array = sharedctypes.RawArray(ctypes.c_double, length)
channels = numpy.frombuffer(array, dtype=numpy.float64).reshape((1, length))
# generate the sweep
start, stop = sumpf_internal.index(interval, length)
sweep_offset = float(start / sampling_rate)
sweep_duration = (stop - start) / sampling_rate
frequency_ratio = stop_frequency / start_frequency
l = sweep_duration / math.log(frequency_ratio)
a = 2.0 * math.pi * start_frequency * l
t = numpy.linspace(-sweep_offset, (length - 1) / sampling_rate - sweep_offset, length)
array = t
array /= l
numpy.expm1(array, out=array)
array *= a
array += phase
numpy.sin(array, out=channels[0, :])
# fake store some additional values, because these values are actually stored in the constructor of the sweep
_ = start_frequency * frequency_ratio ** (-sweep_offset / sweep_duration) # noqa: F841
_ = start_frequency * frequency_ratio ** ((sweep_duration - sweep_offset) / sweep_duration) # noqa: F841
return sumpf.Signal(channels=channels, sampling_rate=sampling_rate, offset=0, labels=("Sweep",))
示例8: inverse_transform
def inverse_transform(self, X):
if self.columns:
for column in self.columns:
X[column] = np.expm1(X[column])
return X
else:
return np.expm1(X)
示例9: predict
def predict(self, train_x, train_y, test_x, parameter, times=1, validation_indexs=None, type='regression'):
print parameter['model'] + " predict staring"
train_preds = np.zeros((times, len(train_x)))
test_preds = np.zeros((times, len(test_x)))
for time in xrange(times):
logging.info("time {}".format(str(time)))
validation_indexs = genIndexKFold(train_x, 5)
test_pred = np.zeros((len(validation_indexs), len(test_x)))
train_pred = np.zeros((len(train_x)))
for i, (train_ind, test_ind) in enumerate(validation_indexs):
clf = model_select(parameter)
logging.info("start time:{} Fold:{}".format(str(time), str(i)))
print "start time:{} Fold:{}".format(str(time), str(i))
X_train = train_x[train_ind]
Y_train = np.log1p(train_y[train_ind])
X_test = train_x[test_ind]
Y_test = train_y[test_ind]
clf.fit(X_train, Y_train)
test_pred[i][:] = np.expm1(clf.predict(test_x))
train_pred[test_ind] = np.expm1(clf.predict(X_test))
evaluation = evaluate_function(
Y_test, train_pred[test_ind], 'rmsle')
logging.info("time:{} Fold:{} evaluation:{}".format(
str(time), str(i), str(evaluation)))
train_preds[time] = train_pred
test_preds[time] = np.mean(test_pred, axis=0)
print train_preds, test_preds
return np.mean(train_preds, axis=0), np.mean(test_preds, axis=0)
示例10: hyperbolic_ratio
def hyperbolic_ratio(a, b, sa, sb):
'''
Return ratio of hyperbolic functions
to allow extreme variations of arguments.
Parameters
----------
a, b : array-like
arguments vectors of the same size
sa, sb : scalar integers
defining the hyperbolic function used, i.e., f(x,1)=cosh(x), f(x,-1)=sinh(x)
Returns
-------
r : ndarray
f(a,sa)/f(b,sb), ratio of hyperbolic functions of same
size as a and b
Examples
--------
>>> x = [-2,0,2]
>>> hyperbolic_ratio(x,1,1,1) # gives r=cosh(x)/cosh(1)
array([ 2.438107 , 0.64805427, 2.438107 ])
>>> hyperbolic_ratio(x,1,1,-1) # gives r=cosh(x)/sinh(1)
array([ 3.20132052, 0.85091813, 3.20132052])
>>> hyperbolic_ratio(x,1,-1,1) # gives r=sinh(x)/cosh(1)
array([-2.35040239, 0. , 2.35040239])
>>> hyperbolic_ratio(x,1,-1,-1) # gives r=sinh(x)/sinh(1)
array([-3.08616127, 0. , 3.08616127])
>>> hyperbolic_ratio(1,x,1,1) # gives r=cosh(1)/cosh(x)
array([ 0.41015427, 1.54308063, 0.41015427])
>>> hyperbolic_ratio(1,x,1,-1) # gives r=cosh(1)/sinh(x)
array([-0.42545906, inf, 0.42545906])
>>> hyperbolic_ratio(1,x,-1,1) # gives r=sinh(1)/cosh(x)
array([ 0.3123711 , 1.17520119, 0.3123711 ])
>>> hyperbolic_ratio(1,x,-1,-1) # gives r=sinh(1)/sinh(x)
array([-0.32402714, inf, 0.32402714])
See also
--------
tran
'''
ak, bk, sak, sbk = np.atleast_1d(a, b, np.sign(sa), np.sign(sb))
# old call
#return exp(ak-bk)*(1+sak*exp(-2*ak))/(1+sbk*exp(-2*bk))
# TODO: Does not always handle division by zero correctly
signRatio = np.where(sak * ak < 0, sak, 1)
signRatio = np.where(sbk * bk < 0, sbk * signRatio, signRatio)
bk = np.abs(bk)
ak = np.abs(ak)
num = np.where(sak < 0, expm1(-2 * ak), 1 + exp(-2 * ak))
den = np.where(sbk < 0, expm1(-2 * bk), 1 + exp(-2 * bk))
iden = np.ones(den.shape) * inf
ind = np.flatnonzero(den != 0)
iden.flat[ind] = 1.0 / den[ind]
val = np.where(num == den, 1, num * iden)
return signRatio * exp(ak - bk) * val #((sak+exp(-2*ak))/(sbk+exp(-2*bk)))
示例11: updateParams
def updateParams(self):
self.pop.sort(key=op.attrgetter('f'))
self.pSigma = np.dot(1.0 - self.cSigma, self.pSigma) + np.dot(
np.sqrt(self.cSigma * (2.0 - self.cSigma) * self.muEff),
sum(np.dot(self.rankWeight[i], self.pop[i].z) for i in range(self.popsize)))
rate = np.linalg.norm(self.pSigma) / self.expectationChiDistribution
if rate >= 1.0 :
wsum = 0
for i in range(self.popsize):
self.weight[i] = self.hatWeight[i] * np.expm1(self.alpha * np.linalg.norm(self.pop[i].z) + 1.0)
wsum += self.weight[i]
for i in range(self.popsize):
self.weight[i] = self.weight[i] / wsum - 1.0 / self.popsize
else:
self.weight = self.rankWeight
if rate >= 1.0:
self.etaB = self.etaBMove
self.etaSigma = self.etaSigmaMove
elif rate >= 0.1:
self.etaB = self.etaBStag
self.etaSigma = self.etaSigmaStag
else:
self.etaB = self.etaBConv
self.etaSigma = self.etaSigmaConv
GDelta = sum(np.dot(self.weight[i], self.pop[i].z) for i in range(self.popsize))
GMu = sum(self.weight[i] * (np.outer(self.pop[i].z, self.pop[i].z) - np.eye(self.dim)) for i in range(self.popsize))
GSigma = np.trace(GMu) / self.dim
GB = GMu - GSigma * np.eye(self.dim)
self.mu += self.etaMu * self.sigma * np.dot(self.B, GDelta)
self.sigma *= (np.expm1(0.5 * self.etaSigma * GSigma) + 1.0)
self.B = np.dot(self.B, linalg.expm3(0.5 * self.etaB * GB))
示例12: test_write_subregion_to_file
def test_write_subregion_to_file(
self, machine_timestep, dt, size_in, tau_ref, tau_rc, size_out, probe_spikes, vertex_slice, vertex_neurons
):
# Check that the region is correctly written to file
region = lif.SystemRegion(size_in, size_out, machine_timestep, tau_ref, tau_rc, dt, probe_spikes)
# Create the file
fp = tempfile.TemporaryFile()
# Write to it
region.write_subregion_to_file(fp, vertex_slice)
# Read back and check that the values are sane
fp.seek(0)
values = fp.read()
assert len(values) == region.sizeof()
(n_in, n_out, n_n, m_t, t_ref, dt_over_t_rc, rec_spikes, i_dims) = struct.unpack_from("<8I", values)
assert n_in == size_in
assert n_out == size_out
assert n_n == vertex_neurons
assert m_t == machine_timestep
assert t_ref == int(tau_ref // dt)
assert (
tp.value_to_fix(-np.expm1(-dt / tau_rc)) * 0.9
< dt_over_t_rc
< tp.value_to_fix(-np.expm1(-dt / tau_rc)) * 1.1
)
assert (probe_spikes and rec_spikes != 0) or (not probe_spikes and rec_spikes == 0)
assert i_dims == 1
示例13: myRMSPE_xg
def myRMSPE_xg(yhat,y):
y = np.expm1(y.get_label())
yhat = np.expm1(yhat)
r=myRMSPE(yhat,y)
return "rmspe", r
示例14: predict
def predict(self,trains_x,train_y,tests_x,parameters,times=10,isFile=True,foldername="blend-dir"):
"""
Ensamble many features and regression
:params train_X: dictionary for training
:params train_y: testing vector
"""
#parameter_get
test_data_sample = tests_x.values()[0]
if not os.path.exists(foldername):
os.makedirs(foldername)
skf = None
kfold_file = foldername + "/kfold_index.pkl"
if os.path.exists(kfold_file):
skf = pickle.load(open(kfold_file,"r"))
else:
skf = KFold(n=len(train_y),n_folds=times,shuffle=True)
pickle.dump(skf,open(kfold_file,"w"))
blend_train = np.zeros((len(train_y),len(parameters)))
blend_test = np.zeros((len(test_data_sample),len(parameters)))
for j,parameter in enumerate(parameters):
train_x = trains_x[parameter['data']]
test_x = tests_x[parameter['data']]
blend_test_tmp = np.zeros((len(test_data_sample),len(parameters)))
#file path check
for i, (train_index,valid_index) in enumerate(skf):
clf = model_select(parameter['parameter'])
train = train_x[train_index]
train_valid_y = train_y[train_index]
kfold_filepath = "./" + foldername + "/parameter_{}_kfold_{}.pkl".format(j,i)
if os.path.exists(kfold_filepath):
blend_train_prediction,blend_test_prediction = pickle.load(open(kfold_filepath,"r"))
blend_train[train_index,j] = np.expm1(clf.predict(train))
blend_test_tmp[:,i] = np.expm1(clf.predict(test_x))
else:
clf.fit(train,np.log1p(train_valid_y))
blend_train_prediction = np.expm1(clf.predict(train))
blend_test_prediction = np.expm1(clf.predict(test_x))
pickle.dump((blend_train_prediction,blend_test_prediction),open(kfold_filepath,"w"))
blend_train[train_index,j] = blend_train_prediction
blend_test_tmp[:,i] = blend_test_prediction
blend_test[:,j] = blend_test_tmp.mean(1)
#Blending Model
bclf = LassoCV(n_alphas=100, alphas=None, normalize=True, cv=5, fit_intercept=True, max_iter=10000, positive=True)
bclf.fit(blend_train, train_y)
y_test_predict = bclf.predict(blend_test)
return y_test_predict
示例15: merge_predict
def merge_predict(model1, model2, test_data):
# Combine the predictions of two separately trained models.
# The input models are in the log domain and returns the predictions
# in original domain (expm1).
p1 = np.expm1(model1.predict(test_data))
p2 = np.expm1(model2.predict(test_data))
p_total = (p1+p2)
return(p_total)