本文整理汇总了Python中theano.tensor.nnet.sigmoid方法的典型用法代码示例。如果您正苦于以下问题:Python nnet.sigmoid方法的具体用法?Python nnet.sigmoid怎么用?Python nnet.sigmoid使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor.nnet
的用法示例。
在下文中一共展示了nnet.sigmoid方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
self.n_in = n_in
self.n_out = n_out
self.activation_fn = activation_fn
self.p_dropout = p_dropout
# Initialize weights and biases
self.w = theano.shared(
np.asarray(
np.random.normal(
loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
dtype=theano.config.floatX),
name='w', borrow=True)
self.b = theano.shared(
np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
dtype=theano.config.floatX),
name='b', borrow=True)
self.params = [self.w, self.b]
示例2: mean_h_given_v
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def mean_h_given_v(self, v):
"""
Compute the mean activation of the hidden units given visible unit
configurations for a set of training examples.
Parameters
----------
v : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the hidden unit
states for a batch (or several) of training examples, with the
first dimension indexing training examples and the second
indexing data dimensions.
Returns
-------
h : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the mean
(deterministic) hidden unit activations given the visible units.
"""
if isinstance(v, tensor.Variable):
return nnet.sigmoid(self.input_to_h_from_v(v))
else:
return [self.mean_h_given_v(vis) for vis in v]
示例3: mean_v_given_h
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def mean_v_given_h(self, h):
"""
Compute the mean activation of the visibles given hidden unit
configurations for a set of training examples.
Parameters
----------
h : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the hidden unit
states for a batch (or several) of training examples, with the
first dimension indexing training examples and the second
indexing hidden units.
Returns
-------
vprime : tensor_like or list of tensor_likes
Theano symbolic (or list thereof) representing the mean
(deterministic) reconstruction of the visible units given the
hidden units.
"""
if isinstance(h, tensor.Variable):
return nnet.sigmoid(self.input_to_v_from_h(h))
else:
return [self.mean_v_given_h(hid) for hid in h]
示例4: input_to_h_from_v
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def input_to_h_from_v(self, v):
"""
.. todo::
WRITEME
"""
D = self.Lambda
alpha = self.alpha
def sum_s(x):
return x.reshape((
-1,
self.nhid,
self.n_s_per_h)).sum(axis=2)
return tensor.add(
self.b,
-0.5 * tensor.dot(v * v, D),
sum_s(self.mu * tensor.dot(v, self.W)),
sum_s(0.5 * tensor.sqr(tensor.dot(v, self.W)) / alpha))
#def mean_h_given_v(self, v):
# inherited version is OK:
# return nnet.sigmoid(self.input_to_h_from_v(v))
示例5: compute_sub_all_scores
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def compute_sub_all_scores(self, start_end):
plu = softmax(T.dot(self.trained_users[start_end], self.trained_items.T))[:, :-1] # (n_batch, n_item)
length = T.max(T.sum(self.tes_masks[start_end], axis=1)) # 253
cidx = T.arange(length).reshape((1, length)) + self.tra_accum_lens[start_end][:, 0].reshape((len(start_end), 1))
cl = T.sum(self.trained_items[self.tra_context_masks[cidx]], axis=2) # n_batch x seq_length x n_size
cl = cl.dimshuffle(1, 2, 0)
pb = self.trained_branch[self.routes] # (n_item x 4 x tree_depth x n_size)
shp0, shp1, shp2 = self.lrs.shape
lrs = self.lrs.reshape((shp0, shp1, shp2, 1, 1))
pr_bc = T.dot(pb, cl)
br = sigmoid(pr_bc * lrs) * T.ceil(abs(pr_bc)) # (n_item x 4 x tree_depth x seq_length x n_batch)
path = T.prod(br, axis=2) * self.probs.reshape((shp0, shp1, 1, 1))
del cl, pb, br, lrs
# paths = T.prod((T.floor(1 - path) + path), axis=1) # (n_item x seq_length x n_batch)
paths = T.sum(path, axis=1)
paths = T.floor(1 - paths) + paths
p = paths[:-1].T * plu.reshape((plu.shape[0], 1, plu.shape[1])) # (n_batch x n_item)
# p = plu.reshape((plu.shape[0], 1, plu.shape[1])) * T.ones((plu.shape[0], length, plu.shape[1]))
return T.reshape(p, (p.shape[0] * p.shape[1], p.shape[2])).eval()
示例6: set_output
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def set_output(self):
self._output = sigmoid(self._prev_layer.output)
示例7: build_prediction
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def build_prediction(self):
# return NN.softmax(self.activation) #use this line to expose a slow subtensor
# implementation
return NN.sigmoid(self.activation)
示例8: __init__
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def __init__(self,
input=tensor.dvector('input'),
target=tensor.dvector('target'),
n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
super(NNet, self).__init__(**kw)
self.input = input
self.target = target
self.lr = shared(lr, 'learning_rate')
self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
# print self.lr.type
self.hidden = sigmoid(tensor.dot(self.w1, self.input))
self.output = tensor.dot(self.w2, self.hidden)
self.cost = tensor.sum((self.output - self.target)**2)
self.sgd_updates = {
self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}
self.sgd_step = pfunc(
params=[self.input, self.target],
outputs=[self.output, self.cost],
updates=self.sgd_updates)
self.compute_output = pfunc([self.input], self.output)
self.output_from_hidden = pfunc([self.hidden], self.output)
示例9: __init__
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def __init__(self,
input=tensor.dvector('input'),
target=tensor.dvector('target'),
n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
super(NNet, self).__init__(**kw)
self.input = input
self.target = target
self.lr = shared(lr, 'learning_rate')
self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
# print self.lr.type
self.hidden = sigmoid(tensor.dot(self.w1, self.input))
self.output = tensor.dot(self.w2, self.hidden)
self.cost = tensor.sum((self.output - self.target)**2)
self.sgd_updates = {
self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}
self.sgd_step = pfunc(
params=[self.input, self.target],
outputs=[self.output, self.cost],
updates=self.sgd_updates)
self.compute_output = pfunc([self.input], self.output)
self.output_from_hidden = pfunc([self.hidden], self.output)
示例10: exe_time
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def exe_time(func):
def new_func(*args, **args2):
t0 = time.time()
print("-- @%s, {%s} start" % (time.strftime("%X", time.localtime()), func.__name__))
back = func(*args, **args2)
print("-- @%s, {%s} end" % (time.strftime("%X", time.localtime()), func.__name__))
print("-- @%.3fs taken for {%s}" % (time.time() - t0, func.__name__))
return back
return new_func
# 输出时:h*x → sigmoid(T.sum(h*(xp-xq), axis=1))
# 预测时:h*x → np.dot(h, x.T)
# ======================================================================================================================
示例11: predict
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def predict(self, idxs):
return self.seq_predict(idxs)
# 输出时:h*x → sigmoid(T.sum([hx, hm]*([xp, mp] - [xq, mq])))
# 预测是:h*x → np.dot([hx, hm], [x, m].T)
# ======================================================================================================================
示例12: exe_time
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def exe_time(func):
def new_func(*args, **args2):
t0 = time.time()
print("-- @%s, {%s} start" % (time.strftime("%X", time.localtime()), func.__name__))
back = func(*args, **args2)
print("-- @%s, {%s} end" % (time.strftime("%X", time.localtime()), func.__name__))
print("-- @%.3fs taken for {%s}" % (time.time() - t0, func.__name__))
return back
return new_func
# 输出时:h*x → sigmoid(T.sum(h*(xp-xq), axis=1))
# 预测时:h*x → np.dot(h, x.T)
# ======================================================================================================================
示例13: __theano_train__
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def __theano_train__(self, ):
"""
训练阶段跑一遍训练序列
"""
# self.alpha_lambda = ['alpha', 'lambda']
uidx, pqidx = T.iscalar(), T.ivector()
usr = self.ux[uidx] # shape=(n_in, )
xpq = self.lt[pqidx]
"""
输入t时刻正负样本,计算当前损失并更新user/正负样本. 公式里省略了时刻t
# 根据性质:T.dot((n, ), (n, ))得到(1, 1)
uij = user * (xp - xq)
upq = log(sigmoid(uij))
"""
uij = T.dot(usr, xpq[0] - xpq[1])
upq = T.log(sigmoid(uij))
# ----------------------------------------------------------------------------
# cost, gradients, learning rate, L2 regularization
lr, l2 = self.alpha_lambda[0], self.alpha_lambda[1]
bpr_l2_sqr = (
T.sum([T.sum(par ** 2) for par in [usr, xpq]]))
costs = (
- upq +
0.5 * l2 * bpr_l2_sqr)
# 1个user,2个items,这种更新求导是最快的。
pars_subs = [(self.ux, usr), (self.lt, xpq)]
seq_updates = [(par, T.set_subtensor(sub, sub - lr * T.grad(costs, sub)))
for par, sub in pars_subs]
# ----------------------------------------------------------------------------
# 输入用户、正负样本及其它参数后,更新变量,返回损失。
self.bpr_train = theano.function(
inputs=[uidx, pqidx],
outputs=-upq,
updates=seq_updates)
示例14: rbm_ais_gibbs_for_v
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def rbm_ais_gibbs_for_v(rbmA_params, rbmB_params, beta, v_sample, seed=23098):
"""
.. todo::
WRITEME
Parameters
----------
rbmA_params : list
Parameters of the baserate model (usually infinite temperature).
List should be of length 3 and contain numpy.ndarrays
corresponding to model parameters (weights, visbias, hidbias).
rbmB_params : list
Similar to `rbmA_params`, but for model at temperature 1.
beta : theano.shared
Scalar, represents inverse temperature at which we wish to sample from.
v_sample : theano.shared
Matrix of shape (n_runs, nvis), state of current particles.
seed : int, optional
Optional seed parameter for sampling from binomial units.
"""
(weights_a, visbias_a, hidbias_a) = rbmA_params
(weights_b, visbias_b, hidbias_b) = rbmB_params
theano_rng = make_theano_rng(seed, which_method='binomial')
# equation 15 (Salakhutdinov & Murray 2008)
ph_a = nnet.sigmoid((1 - beta) * (tensor.dot(v_sample, weights_a) +
hidbias_a))
ha_sample = theano_rng.binomial(size=(v_sample.shape[0], len(hidbias_a)),
n=1, p=ph_a, dtype=config.floatX)
# equation 16 (Salakhutdinov & Murray 2008)
ph_b = nnet.sigmoid(beta * (tensor.dot(v_sample, weights_b) + hidbias_b))
hb_sample = theano_rng.binomial(size=(v_sample.shape[0], len(hidbias_b)),
n=1, p=ph_b, dtype=config.floatX)
# equation 17 (Salakhutdinov & Murray 2008)
pv_act = (1 - beta) * (tensor.dot(ha_sample, weights_a.T) + visbias_a) + \
beta * (tensor.dot(hb_sample, weights_b.T) + visbias_b)
pv = nnet.sigmoid(pv_act)
new_v_sample = theano_rng.binomial(
size=(v_sample.shape[0], len(visbias_b)),
n=1, p=pv, dtype=config.floatX
)
return new_v_sample
示例15: __theano_predict__
# 需要导入模块: from theano.tensor import nnet [as 别名]
# 或者: from theano.tensor.nnet import sigmoid [as 别名]
def __theano_predict__(self, n_in, n_hidden):
"""
测试阶段再跑一遍训练序列得到各个隐层。用全部数据一次性得出所有用户的表达
"""
ui, wh = self.ui, self.wh
tra_mask = T.imatrix()
actual_batch_size = tra_mask.shape[0]
seq_length = T.max(T.sum(tra_mask, axis=1)) # 获取mini-batch里各序列的长度最大值作为seq_length
h0 = T.alloc(self.h0, actual_batch_size, n_hidden) # shape=(n, 20)
bi = T.alloc(self.bi, actual_batch_size, 3, n_hidden) # shape=(n, 3, 20), 原维度放在后边
bi = bi.dimshuffle(1, 2, 0) # shape=(3, 20, n)
# 隐层是1个GRU Unit:都可以用这个统一的格式。
pidxs = T.imatrix()
ps = self.trained_items[pidxs] # shape((actual_batch_size, seq_length, n_hidden))
ps = ps.dimshuffle(1, 0, 2) # shape=(seq_length, batch_size, n_hidden)=(157, n, 20)
def recurrence(p_t, h_t_pre1):
# 特征、隐层都处理成shape=(batch_size, n_hidden)=(n, 20)
z_r = sigmoid(T.dot(ui[:2], p_t.T) +
T.dot(wh[:2], h_t_pre1.T) + bi[:2])
z, r = z_r[0].T, z_r[1].T # shape=(n, 20)
c = tanh(T.dot(ui[2], p_t.T) +
T.dot(wh[2], (r * h_t_pre1).T) + bi[2]) # shape=(20, n)
h_t = (T.ones_like(z) - z) * h_t_pre1 + z * c.T # shape=(n, 20)
return h_t
h, _ = theano.scan( # h.shape=(157, n, 20)
fn=recurrence,
sequences=ps,
outputs_info=h0,
n_steps=seq_length)
# 得到batch_hts.shape=(n, 20),就是这个batch里每个用户的表达ht。
# 必须要用T.sum(),不然无法建模到theano的graph里、报length not known的错
hs = h.dimshuffle(1, 0, 2) # shape=(batch_size, seq_length, n_hidden)
hts = hs[ # shape=(n, n_hidden)
T.arange(actual_batch_size), # 行. 花式索引a[[1,2,3],[2,5,6]],需给定行列的表示
T.sum(tra_mask, axis=1) - 1] # 列。需要mask是'int32'型的
# givens给数据
start_end = T.ivector()
self.seq_predict = theano.function(
inputs=[start_end],
outputs=hts,
givens={
pidxs: self.tra_buys_masks[start_end], # 类型是 TensorType(int32, matrix)
tra_mask: self.tra_masks[start_end]})