本文整理汇总了Python中veles.memory.Array.initialize方法的典型用法代码示例。如果您正苦于以下问题:Python Array.initialize方法的具体用法?Python Array.initialize怎么用?Python Array.initialize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类veles.memory.Array
的用法示例。
在下文中一共展示了Array.initialize方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: MemCpy
# 需要导入模块: from veles.memory import Array [as 别名]
# 或者: from veles.memory.Array import initialize [as 别名]
class MemCpy(AcceleratedUnit):
def __init__(self, workflow, **kwargs):
super(MemCpy, self).__init__(workflow, **kwargs)
self.output = Array()
self.demand("input")
def initialize(self, device, **kwargs):
super(MemCpy, self).initialize(device, **kwargs)
if (self.output.mem is None or
self.output.mem.size != self.input.mem.size):
self.output.reset()
self.output.mem = numpy.zeros(self.input.mem.shape,
dtype=self.input.mem.dtype)
self.input.initialize(self.device)
self.output.initialize(self.device)
def cuda_init(self):
pass
def ocl_init(self):
pass
def _gpu_run(self):
self.input.unmap()
self.output.unmap()
def ocl_run(self):
self._gpu_run()
self.device.queue_.copy_buffer(self.input.devmem, self.output.devmem,
0, 0, self.input.nbytes)
def cuda_run(self):
self._gpu_run()
self.output.devmem.from_device_async(self.input.devmem)
def numpy_run(self):
self.input.map_read()
self.output.map_invalidate()
numpy.copyto(self.output.mem, self.input.mem)
示例2: All2AllSoftmax
# 需要导入模块: from veles.memory import Array [as 别名]
# 或者: from veles.memory.Array import initialize [as 别名]
class All2AllSoftmax(All2All):
"""All2All with linear activation and softmax normalization.
Must be assigned before initialize():
Updates after run():
max_idx
Creates within initialize():
max_idx
Attributes:
krn_sm_: kernel for softmax activation calculation.
max_idx: indexes of element with maximum value for each sample.
"""
__id__ = "420219fc-3e1a-45b1-87f8-aaa0c1540de4"
MAPPING = {"softmax"}
def __init__(self, workflow, **kwargs):
super(All2AllSoftmax, self).__init__(workflow, **kwargs)
self.max_idx = Array()
self.reduce_size = 256
def init_unpickled(self):
super(All2AllSoftmax, self).init_unpickled()
self.krn_sm_ = None
self._force_gpu_apply_exp = False
def initialize(self, device, **kwargs):
self.reduce_size = min(self.reduce_size,
int(numpy.prod(self.output_sample_shape)))
self.sources_["all2all/softmax"] = {
"REDUCE_SIZE": self.reduce_size
}
retval = super(All2AllSoftmax, self).initialize(
device=device, **kwargs)
if retval:
return retval
if self.output.mem.size // self.output.mem.shape[0] <= 1:
raise error.BadFormatError(
"Output sample size should be greater than 1 for SoftMax.")
if not self.max_idx:
self.max_idx.reset(numpy.zeros(self.output.shape[0],
dtype=numpy.int32))
self.max_idx.initialize(self.device)
return retval
def numpy_apply_exp(self):
self.output.map_write()
self.max_idx.map_invalidate()
out = self.output.mem
out = reshape(out, (out.shape[0], out.size // out.shape[0]))
for i, sample in enumerate(out):
im = sample.argmax()
self.max_idx[i] = im
m = sample[im]
sample -= m
numpy.exp(sample, sample)
smm = sample.sum()
sample /= smm
def ocl_apply_exp(self):
self.unmap_vectors(self.output, self.max_idx)
global_size = (self.output.shape[0] * self.reduce_size,)
local_size = (self.reduce_size,)
self.execute_kernel(global_size, local_size, self.krn_sm_)
def cuda_apply_exp(self):
self.unmap_vectors(self.output, self.max_idx)
global_size = (self.output.shape[0], 1, 1)
local_size = (self.reduce_size, 1, 1)
self.execute_kernel(global_size, local_size, self.krn_sm_)
def numpy_run(self):
"""Forward propagation from batch on CPU only.
"""
super(All2AllSoftmax, self).numpy_run()
if not self._force_gpu_apply_exp:
self.numpy_apply_exp()
def ocl_run(self):
"""Forward propagation from batch on GPU.
"""
self._force_gpu_apply_exp = True
super(All2AllSoftmax, self).ocl_run()
self.ocl_apply_exp()
def cuda_run(self):
"""Forward propagation from batch on GPU.
"""
self._force_gpu_apply_exp = True
super(All2AllSoftmax, self).cuda_run()
self.cuda_apply_exp()
def ocl_init(self):
super(All2AllSoftmax, self).ocl_init()
self.krn_sm_ = self.get_kernel("apply_exp")
self.krn_sm_.set_args(self.output.devmem, self.max_idx.devmem)
#.........这里部分代码省略.........
示例3: OffsetPooling
# 需要导入模块: from veles.memory import Array [as 别名]
# 或者: from veles.memory.Array import initialize [as 别名]
class OffsetPooling(Pooling):
"""Pooling by offset forward propagation.
Must be assigned before initialize():
Updates after run():
input_offset
Creates within initialize():
input_offset
Attributes:
input_offset: offsets in the input where elements are passed through.
"""
MAPPING = set()
hide_from_registry = True
def __init__(self, workflow, **kwargs):
super(OffsetPooling, self).__init__(workflow, **kwargs)
self.input_offset = Array()
self.demand("input")
def initialize(self, device, **kwargs):
super(OffsetPooling, self).initialize(device=device, **kwargs)
if self._no_output:
return
if self.input_offset:
assert self.input_offset.shape[1:] == self.output.shape[1:]
if (not self.input_offset or
self.input_offset.shape[0] != self.output.shape[0]):
self.input_offset.reset(numpy.zeros(self.output.shape,
dtype=numpy.int32))
self.input_offset.initialize(self.device)
def set_args(self, *args):
super(OffsetPooling, self).set_args(self.input, self.output,
self.input_offset, *args)
def ocl_run(self):
self.input_offset.unmap()
super(OffsetPooling, self).ocl_run()
def cuda_run(self):
self.input_offset.unmap()
super(OffsetPooling, self).cuda_run()
def numpy_run(self):
self.input_offset.map_invalidate()
super(OffsetPooling, self).numpy_run()
def numpy_run_cut(self, cut, coords):
batch, y1, x1, ch, out_y, out_x = coords
cut_index = self.numpy_run_cut_offset(
cut, numpy.ravel_multi_index((batch, out_y, out_x, ch),
self.output.shape))
i, j = numpy.unravel_index(cut_index, cut.shape)
idx = numpy.ravel_multi_index((batch, y1 + i, x1 + j, ch),
self.input.shape)
val = numpy.ravel(self.input.mem)[idx]
self.input_offset.mem[batch, out_y, out_x, ch] = idx
return val
示例4: GradientDescentBase
# 需要导入模块: from veles.memory import Array [as 别名]
# 或者: from veles.memory.Array import initialize [as 别名]
class GradientDescentBase(AcceleratedUnit):
"""Base class for gradient descent units.
Attributes:
input: input layer values.
output: output layer values.
err_output: error to backpropagate.
err_input: backpropagated error.
weights: weights.
bias: bias.
batch_size: current minibatch size.
learning_rate: gradient descent speed (positive).
learning_rate_bias
weights_decay: regularization for weights (see l1_vs_l2).
weights_decay_bias
gradient_moment: moment coefficient for weights.
gradient_moment_bias
gradient_weights_with_moment: accumulated moment.
gradient_bias_with_moment
batch_size: effective batch size (if None, get it from y).
weights_transposed: assume weights matrix as a transposed one.
apply_gradient: will apply gradient.
gradient_changed: when True, slave will send gradients to master
(assigned to True just before the run call, so it can be set to
False inside ocl_run, numpy_run if necessary).
ocl_set_const_args: True when constant arguments for the kernel
had been changed and need to be set again.
"""
hide_from_registry = True
MAPPING = set()
REDUCE_SIZE = 64 # used for updating bias
def __init__(self, workflow, **kwargs):
kwargs["view_group"] = kwargs.get("view_group", "TRAINER")
super(GradientDescentBase, self).__init__(workflow, **kwargs)
self.err_input = Array(shallow_pickle=True)
self.ocl_set_const_args = True
self.weights = None
self.bias = None
self.demand("input", "err_output")
self.learning_rate = kwargs.get("learning_rate", 0.01)
self.learning_rate_bias = kwargs.get("learning_rate_bias", self.learning_rate)
self.weights_decay = kwargs.get("weights_decay", 0.00005)
self.weights_decay_bias = kwargs.get("weights_decay_bias", 0.0)
self.l1_vs_l2 = kwargs.get("l1_vs_l2", 0)
self.l1_vs_l2_bias = kwargs.get("l1_vs_l2_bias", self.l1_vs_l2)
self.gradient_moment = kwargs.get("gradient_moment", 0)
self.gradient_moment_bias = kwargs.get("gradient_moment_bias", self.gradient_moment)
self.weights_transposed = kwargs.get("weights_transposed", False)
self.need_err_input = kwargs.get("need_err_input", True)
self.include_bias = kwargs.get("include_bias", True)
self.factor_ortho = kwargs.get("factor_ortho", 0)
self.col_sums = Array() # for orthogonalization
# Current gradient as it is without applying learning_rate etc.
self.gradient_weights = Array()
self.gradient_bias = Array()
# Gradient with applied learning_rate etc.
# optionally accumulated from the previous run
self.accumulate_gradient = kwargs.get("accumulate_gradient", False)
# When accumulate_gradient set to True:
# 1. Calculate gd
# 2. acc = acc_alpha * gd + acc_beta * acc
# 3. gd = gd_alpha * acc + gd_beta * gd
# 4. Apply moments to gd
# 5. weights += gd if apply_gradient set to True
self.acc_alpha = kwargs.get("acc_alpha", 0.0)
self.acc_beta = kwargs.get("acc_beta", 0.0)
self.gd_alpha = kwargs.get("gd_alpha", 0.0)
self.gd_beta = kwargs.get("gd_beta", 1.0)
self.accumulated_gradient_weights = Array()
self.accumulated_gradient_bias = Array()
# Gradient with accumulated moments
self.gradient_weights_with_moment = Array()
self.gradient_bias_with_moment = Array()
# Sets to True when gradient changes
self.gradient_changed = False
# Gradient will be applied to weights immediately just after computing
self.apply_gradient = kwargs.get("apply_gradient", not workflow.is_slave)
@property
def current_batch_size(self):
batch_size = getattr(self, "batch_size", None)
if batch_size is None:
return self.err_output.mem.shape[0]
return int(batch_size)
def initialize(self, device, **kwargs):
super(GradientDescentBase, self).initialize(device, **kwargs)
if self.weights:
assert len(self.weights.shape) == 2
#.........这里部分代码省略.........
示例5: Binarization
# 需要导入模块: from veles.memory import Array [as 别名]
# 或者: from veles.memory.Array import initialize [as 别名]
class Binarization(AcceleratedUnit, EmptyDeviceMethodsMixin):
"""
Input Binarization. Input and output is 2d arrays of the same size.
Each element A(i,j) (in row i and column j) of input is a float
number between 0 and 1. Each element B(i,j) of output is equal 1 with
probability A(i,j) and 0 with 1 - A(i,j).
Must be assigned before initialize():
* input
Updates after run():
* output
Creates within initialize():
* output
Attributes:
input: input as batch of samples.
output: output as batch of samples.
"""
def __init__(self, workflow, **kwargs):
super(Binarization, self).__init__(workflow, **kwargs)
self.output = Array()
self.rand = kwargs.get("rand", prng.get())
self.demand("input", "batch_size")
def run(self):
"""Batch binarization on CPU only.
"""
self.output.map_invalidate()
self.input.map_read()
self.output.mem[:] = self.input.mem[:]
self.output.mem[:self.batch_size, :] = self.matlab_binornd(
1, self.input.mem[:self.batch_size, :])
def initialize(self, device, **kwargs):
super(Binarization, self).initialize(device=device, **kwargs)
if not self.output or self.output.size != self.input.size:
self.output.reset()
self.output.mem = numpy.zeros_like(self.input.mem)
self.output.initialize(self.device)
def matlab_binornd(self, n, p_in):
"""
Analogue binornd in Matlab, but n must be scalar.
The function generates a matrix of random variables,
where the element at (i,j) position is generated from binomial
distribution with the number of trials n and the probability of
success p_in(i,j).
Args:
n (int): number of trials
p_in (2 dimension numpy.array): success probability matrix
Returns:
res (2 dimension numpy.array): matrix of random variables
generated from the binomial distribution
"""
p = numpy.copy(p_in)
if len(p.shape) == 2:
nrow = p.shape[0]
ncol = p.shape[1]
p = numpy.transpose(p)
p = p.flatten()
dim = p.shape[0]
p = matlib.repmat(p, n, 1)
f = self.rand.rand(n, dim)
res = f < p
res = numpy.sum(res, axis=0)
res = numpy.transpose(res.reshape(ncol, nrow)).reshape(nrow, ncol)
elif len(p.shape) == 1:
p = matlib.repmat(p, n, 1)
dim = p.shape[0]
p = matlib.repmat(p, n, 1)
f = self.rand.rand(n, dim)
res = f < p
res = numpy.sum(res, axis=0)
else: # will make exeption
raise ValueError("shape of input Binarization class "
"must be 1 or 2 dimensions")
return res
示例6: KohonenForward
# 需要导入模块: from veles.memory import Array [as 别名]
# 或者: from veles.memory.Array import initialize [as 别名]
class KohonenForward(KohonenBase, AcceleratedUnit):
"""Kohonen forward layer.
Must be assigned before initialize():
input
weights
minibatch_offset (if total == True)
minibatch_size (if total == True)
batch_size (if total == True)
argmins speeds up run() if linked from KohonenTrainer
Updates after run():
output
Creates within initialize():
output
Attributes:
input: input as batch of samples.
weights: the weights of the neurons in Kohonen layer.
output: the list of winners.
total: if total=True is passed in __init__(), the overall winners table
"""
def __init__(self, workflow, **kwargs):
super(KohonenForward, self).__init__(workflow, **kwargs)
self.demand("input", "weights")
self.argmins = None
self._distances = Array()
self.output = Array()
self._chunk_size_ = 0
self.weights_transposed = False
self.total = Array() if kwargs.get("total", False) else None
if self.total is not None:
self.minibatch_offset = None
self.minibatch_size = None
self.batch_size = None
def init_unpickled(self):
super(KohonenForward, self).init_unpickled()
self.sources_["kohonen"] = {"FORWARD": 1}
@property
def neurons_number(self):
return self.weights.mem.shape[0]
@property
def sample_length(self):
return self.weights.mem.shape[1]
@property
def chunk_size(self):
return self._chunk_size_
def initialize(self, device, **kwargs):
super(KohonenForward, self).initialize(device=device, **kwargs)
assert self.input.mem.shape[1] == self.sample_length
batch_size = self.input.mem.shape[0]
self.output.reset(numpy.zeros(batch_size, dtype=numpy.int32))
if self.argmins is None:
self._distances.reset(numpy.zeros(
[batch_size, self.neurons_number],
dtype=self.weights.mem.dtype))
if self.total is not None:
self.total.reset(numpy.zeros(self.batch_size, dtype=numpy.int32))
self._minibatch_offset_ = numpy.zeros(1, dtype=numpy.int32)
def ocl_init(self):
batch_size = self.input.mem.shape[0]
self.output.initialize(self.device)
if self.argmins is None:
self.input.initialize(self.device)
self.weights.initialize(self.device)
self._distances.initialize(self.device)
elif self.total is None:
return
if self.total is not None:
self.total.initialize(self.device)
copy_chunk_size = int(numpy.ceil(batch_size /
self.device.max_group_size))
chunk_size = self.neurons_number // self.device.max_group_size
if chunk_size < 2:
chunk_size = self.neurons_number // 2 + 1
self.argmin_group_size = \
int(numpy.ceil(self.neurons_number / chunk_size))
block_size, vector_opt = self.device.device_info.get_kernel_bs_vo(
kernel="matrix_multiplication", dtype=self.input.dtype)
defines = {
'BLOCK_SIZE': block_size,
'VECTOR_OPT': int(bool(vector_opt)),
'BATCH': batch_size,
'SAMPLE_LENGTH': self.sample_length,
'NEURONS_NUMBER': self.neurons_number,
'CHUNK_SIZE': chunk_size,
'COPY_CHUNK_SIZE': copy_chunk_size,
#.........这里部分代码省略.........
示例7: KohonenTrainer
# 需要导入模块: from veles.memory import Array [as 别名]
# 或者: from veles.memory.Array import initialize [as 别名]
class KohonenTrainer(KohonenBase, AcceleratedUnit):
"""KohonenForward train pass.
Must be assigned before initialize():
input
shape
Creates within initialize():
weights
winners
argmins
_distances
_coords
Updates after run():
weights
Attributes:
weights: weights of the current layer.
input: input of the current layer as batch of 1D samples.
krn_dist_: computes distances between input and neuron weights.
_krn_argmin_: finds indexes of minimal computed distances.
krn_gravity_: computes gravity to the winner neuron.
krn_apply_gradients_: applies gradient to weights.
"""
def __init__(self, workflow, **kwargs):
super(KohonenTrainer, self).__init__(workflow, **kwargs)
self._distances = Array()
self.argmins = Array()
self._coords = Array()
self.weights = Array()
self.winners = Array()
self.weights_filling = kwargs.get("weights_filling", "uniform")
self.weights_stddev = kwargs.get("weights_stddev", None)
self.weights_transposed = kwargs.get("weights_transposed", False)
self.time = 0
self._sigma = 0
self.gradient_decay = kwargs.get("gradient_decay",
lambda t: 0.1 / (1.0 + t * 0.05))
self.radius_decay = kwargs.get("radius_decay",
lambda t: 1.0 / (1.0 + t * 0.05))
self.demand("input", "shape")
self._shape = kwargs.get("shape")
def init_unpickled(self):
super(KohonenTrainer, self).init_unpickled()
self.sources_["kohonen"] = {"TRAIN": 1}
self._krn_distances_ = None
self._krn_argmin_ = None
self._krn_gravity_ = None
self._krn_compute_gradients_ = None
self._krn_apply_gradients_ = None
@property
def gravity_radius(self):
return self.radius_decay(self.time) * self._sigma
@property
def gradient_multiplier(self):
return self.gradient_decay(self.time)
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, value):
self._shape = value
def initialize(self, device, **kwargs):
super(KohonenTrainer, self).initialize(device=device, **kwargs)
self._neurons_number = self.shape[0] * self.shape[1]
self._sample_length = self.input.mem.size // self.input.mem.shape[0]
# Initialize weights
if self.weights_stddev is None:
# Get weights magnitude and cap it to 0.05
self.weights_stddev = min(self._get_weights_magnitude(), 0.05)
weights_size = (self._sample_length * self._neurons_number)
if not self.weights:
self.weights.reset(numpy.zeros(weights_size,
dtype=self.input.mem.dtype))
filling = {
"uniform": lambda rand: rand.fill(
self.weights.mem, -self.weights_stddev,
self.weights_stddev),
"gaussian": lambda rand: rand.fill_normal_real(
self.weights.mem, 0, self.weights_stddev)
}
filling[self.weights_filling](prng.get())
self.weights.mem = self.weights.mem.reshape((
self._neurons_number, self._sample_length))
else:
assert self.weights.shape == (self._neurons_number,
self._sample_length)
if self.weights_transposed:
# Reshape weights as a matrix:
wtrncopy = self.weights.mem.transpose().copy()
self.weights.mem.shape = wtrncopy.shape
#.........这里部分代码省略.........