本文整理汇总了Python中chainer.Variable.astype方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.astype方法的具体用法?Python Variable.astype怎么用?Python Variable.astype使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.Variable
的用法示例。
在下文中一共展示了Variable.astype方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import astype [as 别名]
def update(self, trajs):
obs = np.concatenate([traj['observations'] for traj in trajs], axis=0)
if self.concat_time:
ts = np.concatenate([np.arange(len(traj['observations'])) / self.env_spec.timestep_limit for traj in trajs],
axis=0)
obs = np.concatenate([obs, ts[:, None]], axis=-1)
returns = np.concatenate([traj['returns'] for traj in trajs], axis=0)
baselines = np.concatenate([traj['baselines']
for traj in trajs], axis=0)
# regress to a mixture of current and past predictions
targets = returns * (1. - self.mixture_fraction) + \
baselines * self.mixture_fraction
# use lbfgs to perform the update
cur_params = get_flat_params(self)
obs = Variable(obs)
targets = Variable(targets.astype(np.float32))
def f_loss_grad(x):
set_flat_params(self, x)
self.cleargrads()
values = self.compute_baselines(obs)
loss = F.mean(F.square(values - targets))
loss.backward()
flat_grad = get_flat_grad(self)
return loss.data.astype(np.float64), flat_grad.astype(np.float64)
new_params = scipy.optimize.fmin_l_bfgs_b(
f_loss_grad, cur_params, maxiter=10)[0]
set_flat_params(self, new_params)
示例2: sim
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import astype [as 别名]
def sim():
zeta0 = (np.random.uniform(-10.0, 10.0, (1,1,H,W)).astype(np.float32))
zeta0 = Variable(chainer.cuda.to_gpu(zeta0.astype(np.float32)), volatile=True)
for it in range(100):
zeta0 += 0.1*lap.forward(zeta0)
zeta = 0.0 + zeta0
psi = poisson_jacobi(zeta, num_iter=1000)
rho = Variable(rho0, volatile=True)
for i in range(10000):
psi = poisson_jacobi(zeta, x0=psi)
dpdx = FTCS_X().forward(psi) # -vy
dpdy = FTCS_Y().forward(psi) # vx
dzdx = upwind(Kawamura_X().forward(zeta), dpdy)
dzdy = upwind(Kawamura_Y().forward(zeta), -dpdx)
lapz = lap.forward(zeta)
rho_ = rho-0.5*dt*(dpdy*upwind(Kawamura_X().forward(rho), dpdy)-dpdx*upwind(Kawamura_Y().forward(rho), -dpdx) - 0.000*lap.forward(rho))
rho_.data[0,0,:,0] = rho_.data[0,0,:,499]
sum_rho = chainer.functions.sum(rho_)
rho_ = rho_/(xp.zeros_like(rho.data)+sum_rho)
dzdt = dpdx*dzdy - dpdy*dzdx + nu*lapz
zeta_ = zeta+0.5*dt * dzdt
psi = poisson_jacobi(zeta_, x0=psi)
dpdx = FTCS_X().forward(psi) # -vy
dpdy = FTCS_Y().forward(psi) # vx
dzdx = upwind(Kawamura_X().forward(zeta_), dpdy)
dzdy = upwind(Kawamura_Y().forward(zeta_), -dpdx)
lapz = lap.forward(zeta_)
rho = rho - dt*(dpdy*upwind(Kawamura_X().forward(rho_), dpdy)-dpdx*upwind(Kawamura_Y().forward(rho_), -dpdx) - 0.000*lap.forward(rho_))
rho.data[0,0,:,0] = rho.data[0,0,:,499]
sum_rho = chainer.functions.sum(rho)
rho = rho/(xp.zeros_like(rho.data)+sum_rho)
dzdt = dpdx*dzdy - dpdy*dzdx + nu*lapz
zeta = zeta + dt * dzdt
if i%10==0:
yield zeta, psi, rho, i