本文整理汇总了Python中utils.sharedX函数的典型用法代码示例。如果您正苦于以下问题:Python sharedX函数的具体用法?Python sharedX怎么用?Python sharedX使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sharedX函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_parameters
def init_parameters(self):
# marginal precision on visible units
self.lambd = sharedX(self.iscales['lambd'] * numpy.ones(self.n_v), name='lambd')
# init scalar norm for each entry of Wv
sn_val = self.iscales['scalar_norms'] * numpy.ones(self.n_f)
self.scalar_norms = sharedX(sn_val, name='scalar_norms')
# init weight matrices
self.Wv = self.init_weight(1.0, (self.n_v, self.n_f), 'Wv')
if self.sparse_gmask or self.sparse_hmask:
assert self.sparse_gmask and self.sparse_hmask
self.Wg = sharedX(self.sparse_gmask.mask * self.iscales.get('Wg', 1.0), name='Wg')
self.Wh = sharedX(self.sparse_hmask.mask * self.iscales.get('Wh', 1.0), name='Wh')
else:
self.Wg = self.init_weight(1.0, (self.n_g, self.n_f), 'Wg')
self.Wh = self.init_weight(1.0, (self.n_h, self.n_f), 'Wh')
# bias parameters of g, h
self.gbias = sharedX(self.iscales['gbias'] * numpy.ones(self.n_g), name='gbias')
self.hbias = sharedX(self.iscales['hbias'] * numpy.ones(self.n_h), name='hbias')
# mean (mu) and precision (alpha) parameters on s
self.mu = sharedX(self.iscales['mu'] * numpy.ones(self.n_g), name='mu')
self.alpha = sharedX(self.iscales['alpha'] * numpy.ones(self.n_g), name='alpha')
# mean (eta) and precision (beta) parameters on t
self.eta = sharedX(self.iscales['eta'] * numpy.ones(self.n_h), name='eta')
self.beta = sharedX(self.iscales['beta'] * numpy.ones(self.n_h), name='beta')
# optional reparametrization of precision parameters
self.lambd_prec = T.nnet.softplus(self.lambd)
self.alpha_prec = T.nnet.softplus(self.alpha)
self.beta_prec = T.nnet.softplus(self.beta)
示例2: init_parameters
def init_parameters(self):
# init scalar norm for each entry of Wv
sn_val = self.iscales['scalar_norms'] * numpy.ones(self.n_s)
self.scalar_norms = sharedX(sn_val, name='scalar_norms')
# init weight matrices
normalize_wv = self.flags['wv_norm'] == 'unit'
self.Wv = self.init_weight(self.iscales['Wv'], (self.n_v, self.n_s), 'Wv', normalize=normalize_wv)
if self.sparse_gmask or self.sparse_hmask:
assert self.sparse_gmask and self.sparse_hmask
self.Wg = sharedX(self.sparse_gmask.mask * self.iscales.get('Wg', 1.0), name='Wg')
self.Wh = sharedX(self.sparse_hmask.mask * self.iscales.get('Wh', 1.0), name='Wh')
else:
normalize_wg = self.flags['wg_norm'] == 'unit'
normalize_wh = self.flags['wh_norm'] == 'unit'
self.Wg = self.init_weight(self.iscales['Wg'], (self.n_g, self.n_s), 'Wg', normalize=normalize_wg)
self.Wh = self.init_weight(self.iscales['Wh'], (self.n_h, self.n_s), 'Wh', normalize=normalize_wh)
# avg norm (for wgh_norm='roland')
norm_wg = numpy.sqrt(numpy.sum(self.Wg.get_value()**2, axis=0)).mean()
norm_wh = numpy.sqrt(numpy.sum(self.Wh.get_value()**2, axis=0)).mean()
self.avg_norm_wg = sharedX(norm_wg, name='avg_norm_wg')
self.avg_norm_wh = sharedX(norm_wh, name='avg_norm_wh')
# allocate shared variables for bias parameters
self.gbias = sharedX(self.iscales['gbias'] * numpy.ones(self.n_g), name='gbias')
self.hbias = sharedX(self.iscales['hbias'] * numpy.ones(self.n_h), name='hbias')
self.vbias = sharedX(self.iscales['vbias'] * numpy.ones(self.n_v), name='vbias')
# mean (mu) and precision (alpha) parameters on s
self.mu = sharedX(self.iscales['mu'] * numpy.ones(self.n_s), name='mu')
self.alpha = sharedX(self.iscales['alpha'] * numpy.ones(self.n_s), name='alpha')
self.alpha_prec = T.nnet.softplus(self.alpha)
示例3: init_chains
def init_chains(self):
""" Allocate shared variable for persistent chain """
# initialize buffers to store inference state
self.pos_g = sharedX(numpy.zeros((self.batch_size, self.n_g)), name='pos_g')
self.pos_h = sharedX(numpy.zeros((self.batch_size, self.n_h)), name='pos_h')
self.pos_s1 = sharedX(numpy.zeros((self.batch_size, self.n_s)), name='pos_s1')
self.pos_s0 = sharedX(numpy.zeros((self.batch_size, self.n_s)), name='pos_s0')
# initialize visible unit chains
scale = numpy.sqrt(1./softplus(self.lambd.get_value()))
neg_v = self.rng.normal(loc=0, scale=scale, size=(self.batch_size, self.n_v))
self.neg_v = sharedX(neg_v, name='neg_v')
# initialize s-chain
loc = self.mu.get_value()
scale = numpy.sqrt(1./softplus(self.alpha.get_value()))
neg_s = self.rng.normal(loc=loc, scale=scale, size=(self.batch_size, self.n_s))
self.neg_s = sharedX(neg_s, name='neg_s')
# initialize binary g-h chains
pval_g = sigm(self.gbias.get_value())
pval_h = sigm(self.hbias.get_value())
neg_g = self.rng.binomial(n=1, p=pval_g, size=(self.batch_size, self.n_g))
neg_h = self.rng.binomial(n=1, p=pval_h, size=(self.batch_size, self.n_h))
self.neg_h = sharedX(neg_h, name='neg_h')
self.neg_g = sharedX(neg_g, name='neg_g')
# other misc.
self.pos_counter = sharedX(0., name='pos_counter')
self.odd_even = sharedX(0., name='odd_even')
示例4: __init__
def __init__(self, conf, numpy_rng, W, Lambda):
"""
:param W: a LinearTransform instance for the weights.
:param Lambda: a LinearTransform instance, parametrizing the h-dependent
precision information regarding visibles.
"""
self.conf = conf
self.W = W
self.Lambda = Lambda
if Lambda:
if W.col_shape() != Lambda.col_shape():
raise ValueError('col_shape mismatch',
(W.col_shape(), Lambda.col_shape()))
if W.row_shape() != Lambda.row_shape():
raise ValueError('row_shape mismatch',
(W.row_shape(), Lambda.row_shape()))
# Energy term has vW(sh), so...
h_shp = self.h_shp = W.col_shape()
s_shp = self.s_shp = W.col_shape()
v_shp = self.v_shp = W.row_shape()
logger.info("RBM Shapes h_shp=%s, s_shp=%s, v_shp=%s" %(h_shp, s_shp, v_shp))
# alpha (precision on slab variables)
alpha_init = numpy.zeros(s_shp)+conf['alpha0']
if conf['alpha_irange']:
alpha_init += (2 * numpy_rng.rand(*s_shp) - 1)*conf['alpha_irange']
if conf['alpha_logdomain']:
self.alpha = sharedX(numpy.log(alpha_init), name='alpha')
else:
self.alpha = sharedX(alpha_init, name='alpha')
# mu (mean of slab vars)
self.mu = sharedX(
conf['mu0'] + numpy_rng.uniform(size=s_shp,
low=-conf['mu_irange'],
high=conf['mu_irange']),
name='mu')
# b (bias of spike vars)
self.b = sharedX(
conf['b0'] + numpy_rng.uniform(size=h_shp,
low=-conf['b_irange'],
high=conf['b_irange']),
name='b')
# B (precision on visible vars)
if conf['B_full_diag']:
B_init = numpy.zeros(v_shp) + conf['B0']
else:
B_init = numpy.zeros(()) + conf['B0']
if conf['B_logdomain']:
B_init = numpy.log(B_init)
self.B = sharedX(B_init, name='B')
self._params = [self.mu, self.B, self.b, self.alpha]
示例5: init_parameters
def init_parameters(self):
# init weight matrices
self.Wv = self.init_weight(self.iscales.get('Wv', 1.0), (self.n_v, self.n_h), 'Wv', normalize=False)
# allocate shared variables for bias parameters
self.hbias = sharedX(self.iscales['hbias'] * numpy.ones(self.n_h), name='hbias')
# diagonal of precision matrix of visible units
self.lambd = sharedX(self.iscales['lambd'] * numpy.ones(self.n_v), name='lambd')
self.lambd_prec = T.nnet.softplus(self.lambd)
示例6: init_parameters
def init_parameters(self):
# init weight matrices
self.Wv = self.init_weight(self.iscales.get('Wv', 1.0), (self.n_v, self.n_h), 'Wv')
# allocate shared variables for bias parameters
self.vbias = sharedX(self.iscales['vbias'] * numpy.ones(self.n_v), name='vbias')
self.hbias = sharedX(self.iscales['hbias'] * numpy.ones(self.n_h), name='hbias')
self.cv = sharedX(numpy.zeros(self.n_v), name='cv')
ch = numpy.ones(self.n_h) * (0.5 if self.flags['enable_centering'] else 0.)
self.ch = sharedX(ch, name='ch')
示例7: __init__
def __init__(self):
rng = numpy.random.RandomState(123)
self.Wv = sharedX(0.1 * rng.randn(14*14, 10), name='Wv' )
self.hbias = sharedX(-1 * numpy.ones(10), name='hbias')
self.alpha = sharedX(0.1 * rng.rand(10), name='alpha')
self.mu = sharedX(0.1 * numpy.ones(10), name='mu')
self.lambd = sharedX(1.0 * numpy.ones(10), name='lambd')
self.bw_s = 1
self.n_h = 10
self.input = T.matrix('input')
示例8: init_chains
def init_chains(self):
""" Allocate shared variable for persistent chain """
# initialize visible unit chains
scale = numpy.sqrt(1./softplus(self.lambd.get_value()))
neg_v = self.rng.normal(loc=0, scale=scale, size=(self.batch_size, self.n_v))
self.neg_v = sharedX(neg_v, name='neg_v')
# initialize s-chain
scale = numpy.sqrt(1./softplus(self.alpha.get_value()))
neg_s = self.rng.normal(loc=0., scale=scale, size=(self.batch_size, self.n_s))
self.neg_s = sharedX(neg_s, name='neg_s')
# initialize binary g-h chains
pval_h = sigm(self.hbias.get_value())
neg_h = self.rng.binomial(n=1, p=pval_h, size=(self.batch_size, self.n_h))
self.neg_h = sharedX(neg_h, name='neg_h')
示例9: cd_updates
def cd_updates(self, pos_v, neg_v, lr, other_cost=0):
grads = contrastive_grad(self.free_energy_given_v,
pos_v, neg_v,
wrt=self.params(),
other_cost=other_cost)
stepsizes=lr
if self.conf.get('momentum', 0.0):
logger.info('Using momentum %s'%self.conf['momentum'])
rval = dict(
sgd_momentum_updates(
self.params(),
grads,
stepsizes=stepsizes,
momentum=self.conf['momentum']))
else:
rval = dict(
sgd_updates(
self.params(),
grads,
stepsizes=stepsizes))
#DEBUG STORE GRADS
grad_shared_vars = [sharedX(0*p.get_value(),'') for p in self.params()]
self.grad_shared_vars = grad_shared_vars
rval.update(dict(zip(grad_shared_vars, grads)))
return rval
示例10: init_chains
def init_chains(self):
""" Allocate shared variable for persistent chain """
# initialize s-chain
loc = self.mu.get_value()
scale = numpy.sqrt(1./softplus(self.alpha.get_value()))
neg_s = self.rng.normal(loc=loc, scale=scale, size=(self.batch_size, self.n_s))
self.neg_s = sharedX(neg_s, name='neg_s')
# initialize binary v chains
pval_v = sigm(self.vbias.get_value())
neg_v = self.rng.binomial(n=1, p=pval_v, size=(self.batch_size, self.n_v))
self.neg_v = sharedX(neg_v, name='neg_v')
# initialize binary h chains
pval_h = sigm(self.hbias.get_value())
neg_h = self.rng.binomial(n=1, p=pval_h, size=(self.batch_size, self.n_h))
self.neg_h = sharedX(neg_h, name='neg_h')
# moving average values for sparsity
self.sp_pos_v = sharedX(neg_v, name='sp_pos_v')
self.sp_pos_h = sharedX(neg_h, name='sp_pos_h')
示例11: init_chains
def init_chains(self):
""" Allocate shared variable for persistent chain """
self.neg_ev = sharedX(self.rng.rand(self.batch_size, self.n_v), name='neg_ev')
self.neg_h = sharedX(self.rng.rand((self.cratio+1)*self.batch_size, self.n_h), name='neg_h')
self.neg_v = sharedX(self.rng.rand((self.cratio+1)*self.batch_size, self.n_v), name='neg_v')
self.beta = sharedX(numpy.ones((self.cratio+1)*self.batch_size), name='betas')
self.beta_mat = T.shape_padright(self.beta)
### CAST is mostly implemented in numpy ###
# Generate range of possible temperatures
self._betas = numpy.linspace(1.0, self.min_beta, self.num_beta).astype(floatX)
# Chain i is at inverse temperatures betas[beta_idx[i]].
self.beta_idx = self.rng.random_integers(low=0,
high=self.num_beta-1,
size=(self.cratio * self.batch_size))
self.beta_logw = numpy.zeros(self.num_beta)
self.swap_timer = 1
# Beta weights (adaptive weights for WL)
self.update_temperatures()
示例12: init_centering
def init_centering(self):
self.avg_pos_g = sharedX(0.5 * numpy.ones(self.n_g), name='avg_pos_g')
self.avg_pos_h = sharedX(0.5 * numpy.ones(self.n_h), name='avg_pos_h')
self.avg_pos_v = sharedX(numpy.zeros(self.n_v), name='avg_pos_v')
self.avg_pos_g_tm1 = sharedX(0. * numpy.ones(self.n_g), name='avg_pos_g_tm1')
self.avg_pos_h_tm1 = sharedX(0. * numpy.ones(self.n_h), name='avg_pos_h_tm1')
self.avg_pos_v_tm1 = sharedX(numpy.zeros(self.n_v), name='avg_pos_v_tm1')
示例13: init_parameters
def init_parameters(self):
assert self.sparse_hmask
# init scalar norm for each entry of Wv
sn_val = self.iscales['scalar_norms'] * numpy.ones(self.n_s)
self.scalar_norms = sharedX(sn_val, name='scalar_norms')
if self.flags['igo_init']:
print 'Overriding iscales initialization with 1./sqrt(nv x nh)'
self.iscales['Wv'] = 1./numpy.sqrt(max(self.n_v, self.n_s))
self.iscales['Wg'] = 1./numpy.sqrt(max(self.n_g, self.n_s))
self.iscales['Wh'] = 1./numpy.sqrt(max(self.n_h, self.n_s))
# Init (visible, slabs) weight matrix.
self.Wv = self.init_weight(self.iscales['Wv'], (self.n_v, self.n_s), 'Wv',
normalize= (self.flags['wv_norm'] == 'unit'))
# Initialize (slab, hidden) pooling matrix
self.Wh = sharedX(self.sparse_hmask.mask.T * self.iscales.get('Wh', 1.0), name='Wh')
# Initialize (slabs, g-unit) weight matrix.
if self.sparse_gmask:
self.Wg = sharedX(self.sparse_gmask.mask.T * self.iscales.get('Wg', 1.0), name='Wg')
else:
self.Wg = self.init_weight(self.iscales['Wg'], (self.n_s, self.n_g), 'Wg')
# allocate shared variables for bias parameters
self.gbias = sharedX(self.iscales['gbias'] * numpy.ones(self.n_g), name='gbias')
self.hbias = sharedX(self.iscales['hbias'] * numpy.ones(self.n_h), name='hbias')
self.cg = sharedX(0.5 * numpy.ones(self.n_g), name='cg')
self.ch = sharedX(0.5 * numpy.ones(self.n_h), name='ch')
# mean (mu) and precision (alpha) parameters on s
self.mu = sharedX(self.iscales['mu'] * numpy.ones(self.n_s), name='mu')
self.alpha = sharedX(self.iscales['alpha'] * numpy.ones(self.n_s), name='alpha')
self.alpha_prec = T.nnet.softplus(self.alpha)
# diagonal of precision matrix of visible units
self.lambd = sharedX(self.iscales['lambd'] * numpy.ones(self.n_v), name='lambd')
self.lambd_prec = T.nnet.softplus(self.lambd)
示例14: init_parameters_from_model
def init_parameters_from_model(self, model):
self.scalar_norms = model.scalar_norms
self.Wv = model.Wv
self.Wg = model.Wg
self.Wh = model.Wh
self.avg_norm_wg = model.avg_norm_wg
self.avg_norm_wh = model.avg_norm_wh
self.gbias = model.gbias
self.hbias = sharedX(self.iscales['hbias'] * numpy.ones(self.n_h), name='hbias')
self.vbias = model.vbias
self.mu = model.mu
self.alpha = model.alpha
self.alpha_prec = model.alpha_prec
示例15: init_chains
def init_chains(self):
""" Allocate shared variable for persistent chain """
self.neg_g = sharedX(self.rng.rand(self.batch_size, self.n_g), name='neg_g')
self.neg_s = sharedX(self.rng.rand(self.batch_size, self.n_g), name='neg_s')
self.neg_h = sharedX(self.rng.rand(self.batch_size, self.n_h), name='neg_h')
self.neg_t = sharedX(self.rng.rand(self.batch_size, self.n_h), name='neg_t')
self.neg_v = sharedX(self.rng.rand(self.batch_size, self.n_v), name='neg_v')
self.neg_ev = sharedX(self.rng.rand(self.batch_size, self.n_v), name='neg_ev')