本文整理汇总了Python中nengo_ocl.clraggedarray.CLRaggedArray.to_host方法的典型用法代码示例。如果您正苦于以下问题:Python CLRaggedArray.to_host方法的具体用法?Python CLRaggedArray.to_host怎么用?Python CLRaggedArray.to_host使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nengo_ocl.clraggedarray.CLRaggedArray
的用法示例。
在下文中一共展示了CLRaggedArray.to_host方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_lif_step
# 需要导入模块: from nengo_ocl.clraggedarray import CLRaggedArray [as 别名]
# 或者: from nengo_ocl.clraggedarray.CLRaggedArray import to_host [as 别名]
def test_lif_step(upsample, n_elements):
"""Test the lif nonlinearity, comparing one step with the Numpy version."""
dt = 1e-3
# n_neurons = [3, 3, 3]
n_neurons = [12345, 23456, 34567]
N = len(n_neurons)
J = RA([np.random.normal(scale=1.2, size=n) for n in n_neurons])
V = RA([np.random.uniform(low=0, high=1, size=n) for n in n_neurons])
W = RA([np.random.uniform(low=-5*dt, high=5*dt, size=n) for n in n_neurons])
OS = RA([np.zeros(n) for n in n_neurons])
ref = 2e-3
# tau = 20e-3
# refs = list(np.random.uniform(low=1.7e-3, high=4.2e-3, size=len(n_neurons)))
taus = list(np.random.uniform(low=15e-3, high=80e-3, size=len(n_neurons)))
queue = cl.CommandQueue(ctx)
clJ = CLRA(queue, J)
clV = CLRA(queue, V)
clW = CLRA(queue, W)
clOS = CLRA(queue, OS)
# clRef = CLRA(queue, RA(refs))
clTau = CLRA(queue, RA(taus))
### simulate host
nls = [LIF(n, tau_ref=ref, tau_rc=taus[i])
for i, n in enumerate(n_neurons)]
for i, nl in enumerate(nls):
if upsample <= 1:
nl.step_math(dt, J[i], V[i], W[i], OS[i])
else:
s = np.zeros_like(OS[i])
for j in xrange(upsample):
nl.step_math(dt/upsample, J[i], V[i], W[i], s)
OS[i] = (OS[i] > 0.5) | (s > 0.5)
### simulate device
plan = plan_lif(queue, clJ, clV, clW, clV, clW, clOS, ref, clTau, dt,
n_elements=n_elements, upsample=upsample)
plan()
if 1:
a, b = V, clV
for i in xrange(len(a)):
nc, _ = not_close(a[i], b[i]).nonzero()
if len(nc) > 0:
j = nc[0]
print "i", i, "j", j
print "J", J[i][j], clJ[i][j]
print "V", V[i][j], clV[i][j]
print "W", W[i][j], clW[i][j]
print "...", len(nc) - 1, "more"
n_spikes = np.sum([np.sum(os) for os in OS])
if n_spikes < 1.0:
logger.warn("LIF spiking mechanism was not tested!")
assert ra.allclose(J, clJ.to_host())
assert ra.allclose(V, clV.to_host())
assert ra.allclose(W, clW.to_host())
assert ra.allclose(OS, clOS.to_host())
示例2: test_lif_rate
# 需要导入模块: from nengo_ocl.clraggedarray import CLRaggedArray [as 别名]
# 或者: from nengo_ocl.clraggedarray.CLRaggedArray import to_host [as 别名]
def test_lif_rate(n_elements):
"""Test the `lif_rate` nonlinearity"""
rng = np.random
dt = 1e-3
n_neurons = [123459, 23456, 34567]
J = RA([rng.normal(loc=1, scale=10, size=n) for n in n_neurons])
R = RA([np.zeros(n) for n in n_neurons])
ref = 2e-3
taus = list(rng.uniform(low=15e-3, high=80e-3, size=len(n_neurons)))
queue = cl.CommandQueue(ctx)
clJ = CLRA(queue, J)
clR = CLRA(queue, R)
clTau = CLRA(queue, RA(taus))
# simulate host
nls = [LIFRate(tau_ref=ref, tau_rc=taus[i])
for i, n in enumerate(n_neurons)]
for i, nl in enumerate(nls):
nl.step_math(dt, J[i], R[i])
# simulate device
plan = plan_lif_rate(queue, clJ, clR, ref, clTau, dt=dt,
n_elements=n_elements)
plan()
rate_sum = np.sum([np.sum(r) for r in R])
if rate_sum < 1.0:
logger.warn("LIF rate was not tested above the firing threshold!")
assert ra.allclose(J, clJ.to_host())
assert ra.allclose(R, clR.to_host())
示例3: test_lif_rate
# 需要导入模块: from nengo_ocl.clraggedarray import CLRaggedArray [as 别名]
# 或者: from nengo_ocl.clraggedarray.CLRaggedArray import to_host [as 别名]
def test_lif_rate(n_elements):
"""Test the `lif_rate` nonlinearity"""
# n_neurons = [3, 3, 3]
n_neurons = [123459, 23456, 34567]
N = len(n_neurons)
J = RA([np.random.normal(loc=1, scale=10, size=n) for n in n_neurons])
R = RA([np.zeros(n) for n in n_neurons])
ref = 2e-3
taus = list(np.random.uniform(low=15e-3, high=80e-3, size=len(n_neurons)))
queue = cl.CommandQueue(ctx)
clJ = CLRA(queue, J)
clR = CLRA(queue, R)
clTau = CLRA(queue, RA(taus))
### simulate host
nls = [LIF(n, tau_ref=ref, tau_rc=taus[i])
for i, n in enumerate(n_neurons)]
for i, nl in enumerate(nls):
nl.gain = 1
nl.bias = 0
R[i] = nl.rates(J[i].flatten()).reshape((-1,1))
### simulate device
plan = plan_lif_rate(queue, clJ, clR, ref, clTau, dt=1,
n_elements=n_elements)
plan()
rate_sum = np.sum([np.sum(r) for r in R])
if rate_sum < 1.0:
logger.warn("LIF rate was not tested above the firing threshold!")
assert ra.allclose(J, clJ.to_host())
assert ra.allclose(R, clR.to_host())
示例4: test_lif_step
# 需要导入模块: from nengo_ocl.clraggedarray import CLRaggedArray [as 别名]
# 或者: from nengo_ocl.clraggedarray.CLRaggedArray import to_host [as 别名]
def test_lif_step(upsample):
"""Test the lif nonlinearity, comparing one step with the Numpy version."""
rng = np.random
dt = 1e-3
n_neurons = [12345, 23456, 34567]
J = RA([rng.normal(scale=1.2, size=n) for n in n_neurons])
V = RA([rng.uniform(low=0, high=1, size=n) for n in n_neurons])
W = RA([rng.uniform(low=-5 * dt, high=5 * dt, size=n) for n in n_neurons])
OS = RA([np.zeros(n) for n in n_neurons])
ref = 2e-3
taus = rng.uniform(low=15e-3, high=80e-3, size=len(n_neurons))
queue = cl.CommandQueue(ctx)
clJ = CLRA(queue, J)
clV = CLRA(queue, V)
clW = CLRA(queue, W)
clOS = CLRA(queue, OS)
clTaus = CLRA(queue, RA([t * np.ones(n) for t, n in zip(taus, n_neurons)]))
# simulate host
nls = [nengo.LIF(tau_ref=ref, tau_rc=taus[i])
for i, n in enumerate(n_neurons)]
for i, nl in enumerate(nls):
if upsample <= 1:
nl.step_math(dt, J[i], OS[i], V[i], W[i])
else:
s = np.zeros_like(OS[i])
for j in range(upsample):
nl.step_math(dt / upsample, J[i], s, V[i], W[i])
OS[i] = (1./dt) * ((OS[i] > 0) | (s > 0))
# simulate device
plan = plan_lif(
queue, dt, clJ, clV, clW, clOS, ref, clTaus, upsample=upsample)
plan()
if 1:
a, b = V, clV
for i in range(len(a)):
nc, _ = not_close(a[i], b[i]).nonzero()
if len(nc) > 0:
j = nc[0]
print("i", i, "j", j)
print("J", J[i][j], clJ[i][j])
print("V", V[i][j], clV[i][j])
print("W", W[i][j], clW[i][j])
print("...", len(nc) - 1, "more")
n_spikes = np.sum([np.sum(os) for os in OS])
if n_spikes < 1.0:
logger.warn("LIF spiking mechanism was not tested!")
assert ra.allclose(J, clJ.to_host())
assert ra.allclose(V, clV.to_host())
assert ra.allclose(W, clW.to_host())
assert ra.allclose(OS, clOS.to_host())
示例5: test_small
# 需要导入模块: from nengo_ocl.clraggedarray import CLRaggedArray [as 别名]
# 或者: from nengo_ocl.clraggedarray.CLRaggedArray import to_host [as 别名]
def test_small():
n = 3
sizes = [3] * 3
vals = [np.random.normal(size=size) for size in sizes]
A = RA(vals)
queue = cl.CommandQueue(ctx)
clA = CLRA(queue, A)
assert ra.allclose(A, clA.to_host())
示例6: test_slicedcopy
# 需要导入模块: from nengo_ocl.clraggedarray import CLRaggedArray [as 别名]
# 或者: from nengo_ocl.clraggedarray.CLRaggedArray import to_host [as 别名]
def test_slicedcopy(rng):
sizes = rng.randint(20, 200, size=10)
A = RA([rng.normal(size=size) for size in sizes])
B = RA([rng.normal(size=size) for size in sizes])
incs = RA([rng.randint(0, 2) for _ in sizes])
Ainds = []
Binds = []
for size in sizes:
r = np.arange(size, dtype=np.int32)
u = rng.choice([0, 1, 2])
if u == 0:
Ainds.append(r)
Binds.append(r)
elif u == 1:
Ainds.append(r[:10])
Binds.append(r[-10:])
elif u == 2:
n = rng.randint(2, size - 2)
Ainds.append(rng.permutation(size)[:n])
Binds.append(rng.permutation(size)[:n])
Ainds = RA(Ainds)
Binds = RA(Binds)
queue = cl.CommandQueue(ctx)
clA = CLRA(queue, A)
clB = CLRA(queue, B)
clAinds = CLRA(queue, Ainds)
clBinds = CLRA(queue, Binds)
clincs = CLRA(queue, incs)
# compute on host
for i in range(len(sizes)):
if incs[i]:
B[i][Binds[i]] += A[i][Ainds[i]]
else:
B[i][Binds[i]] = A[i][Ainds[i]]
# compute on device
plan = plan_slicedcopy(queue, clA, clB, clAinds, clBinds, clincs)
plan()
# check result
for y, yy in zip(B, clB.to_host()):
assert np.allclose(y, yy)
示例7: test_elementwise_inc
# 需要导入模块: from nengo_ocl.clraggedarray import CLRaggedArray [as 别名]
# 或者: from nengo_ocl.clraggedarray.CLRaggedArray import to_host [as 别名]
def test_elementwise_inc(rng):
Xsizes = [(3, 3), (32, 64), (457, 342), (1, 100)]
Asizes = [(3, 3), (1, 1), (457, 342), (100, 1)]
A = RA([rng.normal(size=size) for size in Asizes])
X = RA([rng.normal(size=size) for size in Xsizes])
Y = RA([a * x for a, x in zip(A, X)])
queue = cl.CommandQueue(ctx)
clA = CLRA(queue, A)
clX = CLRA(queue, X)
clY = CLRA(queue, RA([np.zeros_like(y) for y in Y]))
# compute on device
plan = plan_elementwise_inc(queue, clA, clX, clY)
plan()
# check result
for y, yy in zip(Y, clY.to_host()):
assert np.allclose(y, yy)