本文整理汇总了Python中theano.tensor.pow函数的典型用法代码示例。如果您正苦于以下问题:Python pow函数的具体用法?Python pow怎么用?Python pow使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pow函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_testing_function
def get_testing_function(test_data, test_mask, pct_blackout=0.5):
raise Error("fix me!")
i, batch_size = T.iscalars('i', 'batch_size')
self.test_noise = T.shared_randomstreams.RandomStreams(1234).binomial(
(self.inputs.shape), n=1, p=1-pct_blackout,
dtype=theano.config.floatX)
self.test_noisy = self.test_noise * self.inputs
self.test_active_hidden = T.nnet.sigmoid(T.dot(self.test_noisy, self.W) + self.b_in)
self.test_output = T.nnet.sigmoid(T.dot(self.test_active_hidden, self.W.T) + self.b_out)
# root mean squared error of unknowns only
# taking the original input vector's mask of which beers had no input information (no rating)
# mask out any output predicted ratings where there was no rating of the original beer
# so we aren't affecting the error factor in dimensions where we don't have any meaningful information in the original input data
# flattenedOutputVector = dot product ( (mask vector of which items we sent through the network to test, so we only test accuracy of non-inputted answers) with dot product ( inputMask with full output vector ) )
self.only_originally_unknown = T.dot(1-self.test_noise, T.dot(self.inputs_mask, self.test_output))
self.test_error = T.pow(T.mean(T.pow(T.dot(self.inputs_mask, self.test_output) - self.inputs, 2)), 0.5)
self.testing_function = theano.function([i, batch_size], self.test_error,
givens={self.inputs: test_data[i:i+batch_size],
self.inputs_mask: test_mask[i:i+batch_size]})
return self.testing_function
示例2: __init__
def __init__(self,inputData,image_shape):
self.input=inputData
num_out=image_shape[1]
epsilon=0.01
self.image_shape=image_shape
gamma_values = numpy.ones((num_out,), dtype=theano.config.floatX)
self.gamma_vals = theano.shared(value=gamma_values, borrow=True)
beta_values = numpy.zeros((num_out,), dtype=theano.config.floatX)
self.beta_vals = theano.shared(value=beta_values, borrow=True)
batch_mean=T.mean(self.input,keepdims=True,axis=(0,2,3))
batch_var=T.var(self.input,keepdims=True,axis=(0,2,3))+epsilon
self.batch_mean=self.adjustVals(batch_mean)
batch_var=self.adjustVals(batch_var)
self.batch_var=T.pow(batch_var,0.5)
batch_normalize=(inputData-self.batch_mean)/(T.pow(self.batch_var,0.5))
self.beta = self.beta_vals.dimshuffle('x', 0, 'x', 'x')
self.gamma = self.gamma_vals.dimshuffle('x', 0, 'x', 'x')
self.output=batch_normalize*self.gamma+self.beta
#self.output=inputData-self.batch_mean
self.params=[self.gamma_vals,self.beta_vals]
示例3: test_0
def test_0():
N = 16*1000*10*1
if 1:
aval = abs(numpy.random.randn(N).astype('float32'))+.1
bval = numpy.random.randn(N).astype('float32')
a = T.fvector()
b = T.fvector()
else:
aval = abs(numpy.random.randn(N))+.1
bval = numpy.random.randn(N)
a = T.dvector()
b = T.dvector()
f = theano.function([a,b], T.pow(a,b), mode='LAZY')
theano_opencl.elemwise.swap_impls=False
g = theano.function([a,b], T.pow(a,b), mode='LAZY')
print 'ocl time', timeit.Timer(lambda: f(aval, bval)).repeat(3,3)
print 'gcc time', timeit.Timer(lambda: g(aval, bval)).repeat(3,3)
print 'numpy time', timeit.Timer(lambda: aval**bval).repeat(3,3)
assert ((f(aval, bval) - aval**bval)**2).sum() < 1.1
assert ((g(aval, bval) - aval**bval)**2).sum() < 1.1
示例4: _step
def _step(self,xg_t, xo_t, xc_t, mask_tm1,h_tm1, c_tm1, u_g, u_o, u_c):
h_mask_tm1 = mask_tm1 * h_tm1
c_mask_tm1 = mask_tm1 * c_tm1
act = T.tensordot( xg_t + h_mask_tm1, u_g , [[1],[2]])
gate = T.nnet.softmax(act.reshape((-1, act.shape[-1]))).reshape(act.shape)
c_tilda = self.activation(xc_t + T.dot(h_mask_tm1, u_c))
sigma_se = self.k_parameters[0]
sigma_per = self.k_parameters[1]
sigma_b_lin = self.k_parameters[2]
sigma_v_lin = self.k_parameters[3]
sigma_rq = self.k_parameters[4]
l_se = self.k_parameters[5]
l_per = self.k_parameters[6]
l_lin = self.k_parameters[7]
l_rq = self.k_parameters[8]
alpha_rq = self.k_parameters[9]
p_per = self.k_parameters[10]
k_se = T.pow(sigma_se,2) * T.exp( -T.pow(c_mask_tm1 - c_tilda,2) / (2* T.pow(l_se,2) + self.EPS))
k_per = T.pow(sigma_per,2) * T.exp( -2*T.pow(T.sin( math.pi*(c_mask_tm1 - c_tilda)/ (p_per + self.EPS) ),2) / ( T.pow(l_per,2) + self.EPS ))
k_lin = T.pow(sigma_b_lin,2) + T.pow(sigma_v_lin,2) * (c_mask_tm1 - l_lin) * (c_tilda - l_lin )
k_rq = T.pow(sigma_rq,2) * T.pow( 1 + T.pow( (c_mask_tm1 - c_tilda),2) / ( 2 * alpha_rq * T.pow(l_rq,2) + self.EPS), -alpha_rq)
ops = [c_mask_tm1,c_tilda,k_se, k_per, k_lin,k_rq]
yshuff = T.as_tensor_variable( ops, name='yshuff').dimshuffle(1,2,0)
c_t = (gate.reshape((-1,gate.shape[-1])) * yshuff.reshape((-1,yshuff.shape[-1]))).sum(axis = 1).reshape(gate.shape[:2])
o_t = self.inner_activation(xo_t + T.dot(h_mask_tm1, u_o))
h_t = o_t * self.activation(c_t)
return h_t, c_t
示例5: _model_setup
def _model_setup(self):
with self._model:
# COSMOLOGY
omega_m = pm.Uniform("OmegaM", lower=0, upper=1.)
# dark energy EOS
w = pm.Normal("w", mu=-1, sd=1)
# My custom distance mod. function to enable
# ADVI and HMC smapling.
dm = distmod_w_flat(omega_m, self._h0, w, self._zcmb)
# PHILIPS PARAMETERS
# M0 is the location parameter for the distribution
# sys_scat is the scale parameter for the M0 distribution
# rather than "unexpalined variance"
M0 = pm.Normal("M0", mu=-19.3, sd=2.)
sys_scat = pm.HalfCauchy('sys_scat', beta=2.5) # Gelman recommendation for variance parameter
M_true = pm.Normal('M_true', M0, sys_scat, shape=self._n_SN)
# following Rubin's Unity model... best idea? not sure
taninv_alpha = pm.Uniform("taninv_alpha", lower=-.2, upper=.3)
taninv_beta = pm.Uniform("taninv_beta", lower=-1.4, upper=1.4)
# Transform variables
alpha = pm.Deterministic('alpha', T.tan(taninv_alpha))
beta = pm.Deterministic('beta', T.tan(taninv_beta))
# Again using Rubin's Unity model.
# After discussion with Rubin, the idea is that
# these parameters are ideally sampled from a Gaussian,
# but we know they are not entirely correct. So instead,
# the Cauchy is less informative around the mean, while
# still having informative tails.
xm = pm.Cauchy('xm', alpha=0, beta=1)
cm = pm.Cauchy('cm', alpha=0, beta=1)
Rx_log = pm.Uniform('Rx_log', lower=-0.5, upper=0.5)
Rc_log = pm.Uniform('Rc_log', lower=-1.5, upper=1.5)
# Transformed variables
Rx = pm.Deterministic("Rx", T.pow(10., Rx_log))
Rc = pm.Deterministic("Rc", T.pow(10., Rc_log))
x_true = pm.Normal('x_true', mu=xm, sd=Rx, shape=self._n_SN)
c_true = pm.Normal('c_true', mu=cm, sd=Rc, shape=self._n_SN)
# Do the correction
mb = pm.Deterministic("mb", M_true + dm - alpha * x_true + beta * c_true)
# Likelihood and measurement error
obsc = pm.Normal("obsc", mu=c_true, sd=self._dcolor, observed=self._color)
obsx = pm.Normal("obsx", mu=x_true, sd=self._dx1, observed=self._x1)
obsm = pm.Normal("obsm", mu=mb, sd=self._dmb_obs, observed=self._mb_obs)
示例6: lp_norm
def lp_norm(self, n, k, r, c, z):
'''
Lp = ( 1/n * sum(|x_i|^p, 1..n))^(1/p) where p = 1 + ln(1+e^P)
:param n:
:param k:
:param r:
:param c:
:param z:
:return:
'''
ds0, ds1 = self.pool_size
st0, st1 = self.stride
pad_h = self.pad[0]
pad_w = self.pad[1]
row_st = r * st0
row_end = T.minimum(row_st + ds0, self.img_rows)
row_st = T.maximum(row_st, self.pad[0])
row_end = T.minimum(row_end, self.x_m2d + pad_h)
col_st = c * st1
col_end = T.minimum(col_st + ds1, self.img_cols)
col_st = T.maximum(col_st, self.pad[1])
col_end = T.minimum(col_end, self.x_m1d + pad_w)
Lp = T.pow(
T.mean(T.pow(
T.abs_(T.flatten(self.y[n, k, row_st:row_end, col_st:col_end], 1)),
1 + T.log(1 + T.exp(self.P))
)),
1 / (1 + T.log(1 + T.exp(self.P)))
)
return T.set_subtensor(z[n, k, r, c], Lp)
示例7: get_reg_ind
def get_reg_ind(self):
drop_ax, drop_ay = T.pow(T.exp(self.params[-2]), 2), T.pow(T.exp(self.params[-1]), 2)
constant = np.cast[theano.config.floatX](.5 * np.log(self.noise_lvl) + c1 * self.noise_lvl + c2 * (self.noise_lvl**2) + c3 * (self.noise_lvl**3))
reg_indx = .5 * T.log(drop_ax) + c1 * drop_ax + c2 * T.pow(drop_ax, 2) + c3 * T.pow(drop_ax, 3) - constant
reg_indy = .5 * T.log(drop_ay) + c1 * drop_ay + c2 * T.pow(drop_ay, 2) + c3 * T.pow(drop_ay, 3) - constant
reg_ind = T.cast(T.prod(self.params[3].shape), theano.config.floatX) * reg_indx + T.cast(T.prod(self.params[4].shape), theano.config.floatX) * reg_indy
return reg_ind
示例8: finetune_cost_updates
def finetune_cost_updates(self, center, mu, learning_rate):
""" This function computes the cost and the updates ."""
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, withd one entry per
# example in minibatch
network_output = self.get_output()
temp = T.pow(center - network_output, 2)
L = T.sum(temp, axis=1)
# Add the network reconstruction error
z = self.get_network_reconst()
reconst_err = T.sum(T.pow(self.x - z, 2), axis = 1)
L = self.beta*L + self.lbd*reconst_err
cost1 = T.mean(L)
cost2 = self.lbd*T.mean(reconst_err)
cost3 = cost1 - cost2
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost1, self.params)
# generate the list of updates
updates = []
grad_values = []
param_norm = []
for param, delta, gparam in zip(self.params, self.delta, gparams):
updates.append( (delta, mu*delta - learning_rate * gparam) )
updates.append( (param, param + mu*mu*delta - (1+mu)*learning_rate*gparam ))
grad_values.append(gparam.norm(L=2))
param_norm.append(param.norm(L=2))
grad_ = T.stack(*grad_values)
param_ = T.stack(*param_norm)
return ((cost1, cost2, cost3, grad_, param_), updates)
示例9: __init__
def __init__(self, n_in):
self.X = theanoTensor.matrix('X', dtype=theano.config.floatX)
self.y = theanoTensor.vector('y', dtype=theano.config.floatX)
self.W = theano.shared(name='W',
value=np.ones((n_in, ), dtype=theano.config.floatX),
borrow=True)
self.b = theano.shared(name='b',
value=np.cast[theano.config.floatX](0.0),
borrow=True)
y_pred = theanoTensor.dot(self.X, self.W) + self.b
self.MSe = theanoTensor.mean(theanoTensor.pow(y_pred - self.y, 2))
self.MSy = theanoTensor.mean(theanoTensor.pow(self.y, 2))
self.R2 = 1 - (self.MSe / self.MSy)
paramList = [self.W, self.b]
grad_wrtParams = theanoTensor.grad(self.getMSE(), wrt=paramList)
learning_rate = 1e-3
updates = [(p[0], p[0] - learning_rate * p[1]) for p in zip(paramList, grad_wrtParams)]
self.train_model = theano.function(
inputs=[self.X, self.y],
outputs=[self.getMSE()],
updates=updates
)
self.test_model = theano.function(
inputs=[self.X, self.y],
outputs=[self.getR2()],
)
示例10: __call__
def __call__(self, loss):
loss += self.l1 * T.sum(T.mean(abs(self.layer.get_output(True)), axis=0))
loss += self.l2 * T.sum(T.mean(self.layer.get_output(True) ** 2, axis=0))
loss += self.l_bin * T.sum(
T.mean(T.pow(self.layer.get_output(True), self.k) * T.pow(1 - self.layer.get_output(True), self.k), axis=0)
)
return loss
示例11: objective
def objective(x):
"""
objective function
@param x: input vector
@return: value of objective function
"""
z = x - objective.offset
return T.sum(T.pow(z, 4) - 16 * T.pow(z, 2) + 5 * z, axis=1) / 2
示例12: evolve
def evolve(self, x, n, k, gamma):
""" Compute time-derivative at current state
Model: dx/dt = x^n / (x^n + K^n) - gamma*x
This leads to single-species bistability.
"""
dxdt = T.pow(x, n)/(T.pow(x, n)+T.pow(k,n)) - gamma*x
return dxdt
示例13: get_box_mask_se
def get_box_mask_se(a,b):
'''
return (batch_size, grid_num, box_num, 1) tensor as mask
'''
se = T.pow(T.pow(a-b, 2).sum(axis=-1), .5)
sem = se.min(axis=-1, keepdims=True) # find the box with lowest square error
se_mask = T.eq(se, sem).reshape((a.shape[0], a.shape[1], a.shape[2], 1))
return se_mask
示例14: gamma_params
def gamma_params(mode=10., sd=10.):
'''
Converst mode and sd to shape and rate of a gamma distribution.
'''
var = Tns.pow(sd, 2)
rate = (mode + Tns.pow(Tns.pow(mode, 2) + 4 * var, 0.5)) / (2 * var)
shape = 1 + mode * rate
return shape, rate
示例15: get_input_vectors
def get_input_vectors(shape, phases, scaling, offset):
x = T.repeat(offset[0] + T.arange(shape[0]) / scaling, shape[1] * phases).reshape(
(shape[0], shape[1], phases)) * T.pow(2, T.arange(phases))
y = T.repeat(T.tile(offset[1] + T.arange(shape[1]) / scaling, shape[0]).reshape(
(shape[0], shape[1], 1)), phases, axis=2) * T.pow(2, T.arange(phases))
z = T.tile(offset[2] + 10 * T.arange(phases), shape[0] * shape[1]).reshape((shape[0], shape[1], phases, 1))
x = x.reshape((shape[0], shape[1], phases, 1))
y = y.reshape((shape[0], shape[1], phases, 1))
return T.concatenate([x, y, z], axis=3).reshape((shape[0] * shape[1] * phases, 3)).astype('float32')