本文整理汇总了Python中theano.compile.SharedVariable方法的典型用法代码示例。如果您正苦于以下问题:Python compile.SharedVariable方法的具体用法?Python compile.SharedVariable怎么用?Python compile.SharedVariable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.compile
的用法示例。
在下文中一共展示了compile.SharedVariable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sparse_constructor
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def sparse_constructor(value, name=None, strict=False, allow_downcast=None,
borrow=False, format=None):
"""
SharedVariable Constructor for SparseType.
writeme
"""
if not isinstance(value, scipy.sparse.spmatrix):
raise TypeError("Expected a sparse matrix in the sparse shared variable constructor. Received: ",
value.__class__)
if format is None:
format = value.format
type = SparseType(format=format, dtype=value.dtype)
if not borrow:
value = copy.deepcopy(value)
return SparseTensorSharedVariable(type=type, value=value, name=name,
strict=strict, allow_downcast=allow_downcast)
示例2: __init__
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def __init__(self, svobj):
'''Constructor of the MVSharedVariable
The constructor will create ArrayTableHandler and associate the shared
variable with it. The initial value of ArrayTableHandler will be same
as the value of SharedVariable. If different initial value is used in
different processes, the average of them will be used as the initial
value
'''
assert(isinstance(svobj, SharedVariable))
self._svobj = svobj
self._mv_array = mv.ArrayTableHandler(self._svobj.get_value().size,
init_value=self._svobj.get_value().reshape((-1,)))
mv.barrier() # add barrier to make sure the initial values have token effect
# _last_mv_data restore a copy of value. It will be used for calculate
# the update for multiverso when calling mv_sync
self._last_mv_data = self._mv_array.get().reshape(self._svobj.get_value().shape)
self._svobj.set_value(self._last_mv_data, borrow=False)
示例3: tensor_constructor
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def tensor_constructor(value, name=None, strict=False, allow_downcast=None,
borrow=False, broadcastable=None, target='cpu'):
"""
SharedVariable Constructor for TensorType.
Notes
-----
Regarding the inference of the broadcastable pattern...
The default is to assume that the value might be resized in any
dimension, so the default broadcastable is ``(False,)*len(value.shape)``.
The optional `broadcastable` argument will override this default.
"""
if target != 'cpu':
raise TypeError('not for cpu')
if not isinstance(value, numpy.ndarray):
raise TypeError()
# if no broadcastable is given, then the default is to assume that
# the value might be resized in any dimension in the future.
#
if broadcastable is None:
broadcastable = (False,) * len(value.shape)
type = TensorType(value.dtype, broadcastable=broadcastable)
return TensorSharedVariable(type=type,
value=numpy.array(value, copy=(not borrow)),
name=name,
strict=strict,
allow_downcast=allow_downcast)
# TensorSharedVariable brings in the tensor operators, is not ideal, but works
# as long as we dont do purely scalar-scalar operations
# _tensor_py_operators is first to have its version of __{gt,ge,lt,le}__
#
# N.B. THERE IS ANOTHER CLASS CALLED ScalarSharedVariable in the
# theano.scalar.sharedvar file. It is not registered as a shared_constructor,
# this one is.
示例4: scalar_constructor
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
borrow=False, target='cpu'):
"""
SharedVariable constructor for scalar values. Default: int64 or float64.
Notes
-----
We implement this using 0-d tensors for now.
We ignore the borrow parameter as we convert ``value`` to an
ndarray (this is a new object). This respects the semantic of
borrow, as it is a hint to Theano that we can reuse it.
"""
if target != 'cpu':
raise TypeError('not for cpu')
if not isinstance(value, (numpy.number, float, integer_types, complex)):
raise TypeError()
try:
dtype = value.dtype
except Exception:
dtype = numpy.asarray(value).dtype
dtype = str(dtype)
value = theano._asarray(value, dtype=dtype)
tensor_type = TensorType(dtype=str(value.dtype), broadcastable=[])
try:
# Do not pass the dtype to asarray because we want this to fail if
# strict is True and the types do not match.
rval = ScalarSharedVariable(type=tensor_type,
value=numpy.array(value, copy=True),
name=name,
strict=strict,
allow_downcast=allow_downcast)
return rval
except Exception:
traceback.print_exc()
raise
示例5: __init__
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def __init__(self, inputs, outputs, **kwargs):
if not isinstance(outputs, list):
raise TypeError('outputs must be list', outputs)
for i in inputs + outputs:
if not isinstance(i, gof.Variable):
raise TypeError(
'inputs and outputs must be Variable instances', i)
if 'updates' in kwargs or 'givens' in kwargs:
raise TypeError('updates and givens are not allowed in kwargs')
# To support correctly shared variables the inner fct should
# not see them. Otherwise their is problem with the gradient.
self.shared_inputs = [var for var in gof.graph.inputs(outputs)
if isinstance(var, SharedVariable)]
shared_vars = [var.type() for var in self.shared_inputs]
new = rebuild_collect_shared(outputs, inputs=inputs + shared_vars,
replace=dict(izip(self.shared_inputs,
shared_vars)),
copy_inputs_over=False)
(new_inputs, new_outputs,
[clone_d, update_d, update_expr, shared_inputs]) = new
assert len(new_inputs) == len(inputs) + len(self.shared_inputs)
assert len(new_outputs) == len(outputs)
assert not update_d
assert not update_expr
assert not shared_inputs
self.new_inputs = new_inputs
self.new_outputs = new_outputs
self.inputs = inputs
self.outputs = outputs
self.kwargs = kwargs
self.input_types = [input.type for input in inputs]
self.output_types = [output.type for output in outputs]
示例6: get_value
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def get_value(self, borrow=False, return_internal_type=False):
"""
Return the value of this SharedVariable's internal array.
Parameters
----------
borrow
Permit the return of internal storage, when used in conjunction with
``return_internal_type=True``.
return_internal_type
True to return the internal ``cuda_ndarray`` instance rather than a
``numpy.ndarray`` (Default False).
By default ``get_value()`` copies from the GPU to a ``numpy.ndarray``
and returns that host-allocated array.
``get_value(False,True)`` will return a GPU-allocated copy of the
original GPU array.
``get_value(True,True)`` will return the original GPU-allocated array
without any copying.
"""
if return_internal_type or not self.get_value_return_ndarray:
# return a cuda_ndarray
if borrow:
return self.container.value
else:
return copy.deepcopy(self.container.value)
else: # return an ndarray
return numpy.asarray(self.container.value)
示例7: cuda_shared_constructor
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def cuda_shared_constructor(value, name=None, strict=False,
allow_downcast=None, borrow=False,
broadcastable=None, target='gpu'):
"""
SharedVariable Constructor for CudaNdarrayType.
"""
if target != 'gpu':
raise TypeError('not for gpu')
# THIS CONSTRUCTOR TRIES TO CAST VALUE TO A FLOAT32, WHICH THEN GOES ONTO THE CARD
# SO INT shared vars, float64 shared vars, etc. all end up on the card.
# THIS IS NOT THE DEFAULT BEHAVIOUR THAT WE WANT.
# SEE float32_shared_constructor
# TODO: what should strict mean in this context, since we always have to make a copy?
if strict:
_value = value
else:
_value = theano._asarray(value, dtype='float32')
if not isinstance(_value, numpy.ndarray):
raise TypeError('ndarray required')
if _value.dtype.num != CudaNdarrayType.typenum:
raise TypeError('float32 ndarray required')
if broadcastable is None:
broadcastable = (False,) * len(value.shape)
type = CudaNdarrayType(broadcastable=broadcastable)
print("trying to return?")
try:
rval = CudaNdarraySharedVariable(type=type, value=_value, name=name, strict=strict)
except Exception as e:
print("ERROR", e)
raise
return rval
示例8: gpuarray_shared_constructor
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def gpuarray_shared_constructor(value, name=None, strict=False,
allow_downcast=None, borrow=False,
broadcastable=None, target=None):
"""
SharedVariable constructor for GpuArrayType.
See :func:`theano.shared`.
"""
if target == 'gpu' or target == 'cpu':
raise TypeError('not for me')
if not isinstance(value, (numpy.ndarray, pygpu.gpuarray.GpuArray)):
raise TypeError('ndarray or GpuArray required')
try:
get_context(target)
except ContextNotDefined:
# Don't make this a hard error if we attempt to make a shared
# variable while there is no default context.
if target is None:
raise TypeError('No default context and no context specified')
raise
if broadcastable is None:
broadcastable = (False,) * value.ndim
type = GpuArrayType(value.dtype, broadcastable, context_name=target)
deviceval = pygpu.gpuarray.array(value, copy=(not borrow),
context=type.context)
return GpuArraySharedVariable(type=type, value=deviceval, name=name,
strict=strict)
示例9: shared
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def shared(value, name=None, strict=False, allow_downcast=None):
"""
SharedVariable constructor for scalar values. Default: int64 or float64.
Notes
-----
We implement this using 0-d tensors for now.
"""
if not isinstance(value, (numpy.number, float, integer_types, complex)):
raise TypeError()
try:
dtype = value.dtype
except AttributeError:
dtype = numpy.asarray(value).dtype
dtype = str(dtype)
value = getattr(numpy, dtype)(value)
scalar_type = Scalar(dtype=dtype)
rval = ScalarSharedVariable(
type=scalar_type,
value=value,
name=name,
strict=strict,
allow_downcast=allow_downcast)
return rval
示例10: __init__
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def __init__(self, shape, input_var=None, name=None, testval=None, **kwargs):
_InputLayer.__init__(self, shape, input_var=input_var, name=name, **kwargs)
if testval is not None:
self.input_var.tag.test_value = testval
if (not isinstance(self.input_var, SharedVariable)
and not hasattr(self.input_var.tag, 'test_value')):
shape = [s if s is not None else 2 for s in self.shape]
dtype = self.input_var.dtype
self.input_var.tag.test_value = np.random.uniform(size=shape).astype(dtype)
示例11: scalar_constructor
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def scalar_constructor(value, name=None, strict=False, allow_downcast=None,
borrow=False, target='cpu'):
"""
SharedVariable constructor for scalar values. Default: int64 or float64.
Notes
-----
We implement this using 0-d tensors for now.
We ignore the borrow parameter as we convert ``value`` to an
ndarray (this is a new object). This respects the semantic of
borrow, as it is a hint to Theano that we can reuse it.
"""
if target != 'cpu':
raise TypeError('not for cpu')
if not isinstance(value, (numpy.number, float, int, complex)):
raise TypeError()
try:
dtype = value.dtype
except Exception:
dtype = numpy.asarray(value).dtype
dtype = str(dtype)
value = theano._asarray(value, dtype=dtype)
tensor_type = TensorType(dtype=str(value.dtype), broadcastable=[])
try:
# Do not pass the dtype to asarray because we want this to fail if
# strict is True and the types do not match.
rval = ScalarSharedVariable(type=tensor_type,
value=numpy.array(value, copy=True),
name=name,
strict=strict,
allow_downcast=allow_downcast)
return rval
except Exception:
traceback.print_exc()
raise
示例12: shared
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def shared(value, name=None, strict=False, allow_downcast=None):
"""
SharedVariable constructor for scalar values. Default: int64 or float64.
Notes
-----
We implement this using 0-d tensors for now.
"""
if not isinstance(value, (numpy.number, float, int, complex)):
raise TypeError()
try:
dtype = value.dtype
except AttributeError:
dtype = numpy.asarray(value).dtype
dtype = str(dtype)
value = getattr(numpy, dtype)(value)
scalar_type = Scalar(dtype=dtype)
rval = ScalarSharedVariable(
type=scalar_type,
value=value,
name=name,
strict=strict,
allow_downcast=allow_downcast)
return rval
示例13: mv_sync
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def mv_sync(self):
''' sync values with multiverso server
mv_sync will add the delta of SharedVariable, which is usually the
gradients in typical examples, to parameter server and then get the
latest value in multiverso.
'''
# because multiverso always use add method to sync value, the delta
# will be the difference of the current value of last synced value
self._mv_array.add(self._svobj.get_value() - self._last_mv_data)
self._svobj.set_value(self._mv_array.get().reshape(self._svobj.get_value().shape))
self._last_mv_data = self._svobj.get_value(borrow=False)
示例14: __getattribute__
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def __getattribute__(self, attr):
'''This function make MVSharedVariable act same as SharedVariable'''
if attr in ['_svobj', '_mv_array', '_last_mv_data']:
# If get the attribute of self, use parent __getattribute__ to get
# attribute from the object, otherwise it will fall into infinite
# loop
return object.__getattribute__(self, attr)
elif attr in ['mv_sync', "__getstate__"]:
# If call method of MVSharedVariable, then call the method directly
# and bound the method to self object
return getattr(MVSharedVariable, attr).__get__(self)
else:
# Otherwise I will get attribute from the wrapped object
return getattr(self._svobj, attr)
示例15: mv_shared
# 需要导入模块: from theano import compile [as 别名]
# 或者: from theano.compile import SharedVariable [as 别名]
def mv_shared(*args, **kwargs):
'''mv_shared works same as `theano.shared`
It calls `theano.shared` to create the SharedVariable and use
MVSharedVariable to wrap it.
'''
var = shared(*args, **kwargs)
mv_shared.shared_vars.append(MVSharedVariable(var))
return var