本文整理汇总了Python中theano.tensor.basic.get_scalar_constant_value函数的典型用法代码示例。如果您正苦于以下问题:Python get_scalar_constant_value函数的具体用法?Python get_scalar_constant_value怎么用?Python get_scalar_constant_value使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_scalar_constant_value函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: local_concatenateGrad_mkl
def local_concatenateGrad_mkl(node):
if not mkl_available():
return
if not isinstance(node.op, Split):
return
if node.inputs[0].type.ndim != 4:
return
try:
gz, axis, splits, = node.inputs
if not isinstance(axis, integer_types):
try:
axis = int(get_scalar_constant_value(axis))
except NotScalarConstantError:
return
if isinstance(axis, integer_types):
# MKL Concatenate only supports axis=1
if axis != 1:
return
# Retrieve the inputs to Join op
# inp_0 inp_1 inp
# | | |
# Splits <- MakeVector <- [Subtensor...] <- Shape <- inputs
if not isinstance(splits.owner.op, theano.tensor.opt.MakeVector):
return
tensors = []
for inp_0 in splits.owner.inputs:
if not isinstance(inp_0.owner.op, theano.tensor.subtensor.Subtensor):
return
inp_1 = inp_0.owner.inputs[0]
if not isinstance(inp_1.owner.op, theano.compile.ops.Shape):
return
inp = inp_1.owner.inputs[0]
tensors.append(inp)
tensors_internal = [U2IConcatenate()(x) for x in tensors]
new_inputs = [axis] + tensors_internal
z_internal = mkl_concatenate.Concatenate()(*new_inputs)
gz_internal = I2UGrad()(z_internal, gz)
concatenateGradOut = mkl_concatenate.ConcatenateGrad()(gz_internal, axis, *tensors_internal)
gx_user = [U2IGrad()(_x, _gz) for _x, _gz in zip(tensors, concatenateGradOut)]
rval = gx_user
return rval
except Exception as e:
msg = ('Failed to apply local opt to Op %s. '
'Exception message: %s\n') % (node.op, str(e))
_logger.warning(msg)
return
示例2: local_0_dot_x
def local_0_dot_x(node):
if not isinstance(node.op, T.Dot):
return False
x = node.inputs[0]
y = node.inputs[1]
replace = False
try:
if get_scalar_constant_value(x) == 0:
replace = True
except NotScalarConstantError:
pass
try:
if get_scalar_constant_value(y) == 0:
replace = True
except NotScalarConstantError:
pass
if replace:
constant_zero = T.constant(0, dtype=node.outputs[0].type.dtype)
if x.ndim == 2 and y.ndim == 2:
constant_zero = assert_(constant_zero,
T.eq(x.shape[1], y.shape[0]))
return [T.alloc(constant_zero, x.shape[0], y.shape[1])]
elif x.ndim == 1 and y.ndim == 2:
constant_zero = assert_(constant_zero,
T.eq(x.shape[0], y.shape[0]))
return [T.alloc(constant_zero, y.shape[1])]
elif x.ndim == 2 and y.ndim == 1:
constant_zero = assert_(constant_zero,
T.eq(x.shape[1], y.shape[0]))
return [T.alloc(constant_zero, x.shape[0])]
elif x.ndim == 1 and y.ndim == 1:
constant_zero = assert_(constant_zero,
T.eq(x.shape[0], y.shape[0]))
return [constant_zero]
else:
_logger.warning("Optimization Warning: "
"Optimization theano/opt.py:local_0_dot_x Found "
"that it could apply, but was not implemented "
"for dot product with these input types:\n"
"(%s, %s)",
x.type, y.type)
示例3: scalarconsts_rest
def scalarconsts_rest(inputs):
"""Partition a list of variables into two kinds:
scalar constants, and the rest."""
consts = []
origconsts = []
nonconsts = []
for i in inputs:
try:
v = get_scalar_constant_value(i)
consts.append(v)
origconsts.append(i)
except NotScalarConstantError:
nonconsts.append(i)
return consts, origconsts, nonconsts
示例4: local_max_and_argmax
def local_max_and_argmax(node):
"""
If we don't use the argmax, change it to a max only.
"""
if node.op == T._max_and_argmax:
if len(node.outputs[1].clients) == 0:
#MaxAndArgmax support variable axis,
#but CAReduce support only constant axis.
try:
axis = get_scalar_constant_value(node.inputs[1])
except NotScalarConstantError:
return False
new = CAReduce(scal.maximum, axis)(node.inputs[0])
return [new, None]
示例5: shape_dim_i
def shape_dim_i(x, i):
#print 'shape keys', shape_of.keys()
#print 'args (x, i):', x, i
try:
return x.data.shape[i]
except AttributeError:
pass
try:
return int(get_scalar_constant_value(shape_of[x][i]))
except NotScalarConstantError:
pass
try:
return shape_of[x][i].eval()
except:
return -1 # an unsatisfiable shape
示例6: apply
def apply(self, fgraph):
did_something = True
while did_something:
nodelist = fgraph.toposort()
did_something = False
for node in nodelist:
if node.op == T._max_and_argmax:
if len(node.outputs[1].clients) == 0:
try:
axis = get_scalar_constant_value(node.inputs[1])
except NotScalarConstantError:
return False
new = CAReduce(scal.maximum, axis)(node.inputs[0])
try:
fgraph.replace_all_validate(((node.outputs[0], new),), reason=self.__class__.__name__)
did_something = True
break
except InconsistencyError, e:
pass
示例7: make_node
def make_node(self, x, repeats):
x = basic.as_tensor_variable(x)
repeats = basic.as_tensor_variable(repeats)
if repeats.dtype not in tensor.integer_dtypes:
raise TypeError("repeats.dtype must be an integer.")
# Some dtypes are not supported by numpy's implementation of repeat.
# Until another one is available, we should fail at graph construction
# time, not wait for execution.
ptr_bitwidth = theano.configdefaults.local_bitwidth()
if ptr_bitwidth == 64:
numpy_unsupported_dtypes = ("uint64",)
if ptr_bitwidth == 32:
numpy_unsupported_dtypes = ("uint32", "int64", "uint64")
if repeats.dtype in numpy_unsupported_dtypes:
raise TypeError(
(
"dtypes %s are not supported by numpy.repeat "
"for the 'repeats' parameter, " % str(numpy_unsupported_dtypes)
),
repeats.dtype,
)
if self.axis is None:
broadcastable = [False]
else:
try:
const_reps = basic.get_scalar_constant_value(repeats)
except basic.NotScalarConstantError:
const_reps = None
if const_reps == 1:
broadcastable = x.broadcastable
else:
broadcastable = list(x.broadcastable)
broadcastable[self.axis] = False
out_type = theano.tensor.TensorType(x.dtype, broadcastable)
return theano.Apply(self, [x, repeats], [out_type()])
示例8: isNaN_or_Inf_or_None
def isNaN_or_Inf_or_None(x):
isNone = x is None
try:
isNaN = numpy.isnan(x)
isInf = numpy.isinf(x)
isStr = isinstance(x, string_types)
except Exception:
isNaN = False
isInf = False
isStr = False
if not isNaN and not isInf:
try:
val = get_scalar_constant_value(x)
isInf = numpy.isinf(val)
isNaN = numpy.isnan(val)
except Exception:
isNaN = False
isInf = False
if isinstance(x, gof.Constant) and isinstance(x.data, string_types):
isStr = True
else:
isStr = False
return isNone or isNaN or isInf or isStr
示例9: local_concatenate_mkl
def local_concatenate_mkl(node):
if not mkl_available():
return
if not isinstance(node.op, Join):
return
if node.inputs[1].type.ndim != 4:
return
try:
axis, tensors = node.inputs[0], node.inputs[1:]
if not isinstance(axis, integer_types):
try:
axis = int(get_scalar_constant_value(axis))
except NotScalarConstantError:
return
if isinstance(axis, integer_types):
# MKL Concatenate only supports axis=1
if axis != 1:
return
tensors_internal = [U2IConcatenate()(x) for x in tensors]
new_inputs = [axis] + tensors_internal
concatenateOut = mkl_concatenate.Concatenate()(*new_inputs)
z_user = I2U()(concatenateOut)
rval = z_user
return [rval]
except Exception as e:
msg = ('Failed to apply local opt to Op %s. '
'Exception message: %s\n') % (node.op, str(e))
_logger.warning(msg)
return
示例10: local_max_and_argmax
def local_max_and_argmax(node):
"""
If we don't use the argmax, change it to a max only.
"""
if node.op == T._max_and_argmax:
if len(node.outputs[1].clients) == 0:
# MaxAndArgmax support variable axis,
# but CAReduce support only constant axis.
if node.inputs[1].data is None:
axis = None
else:
try:
axis = get_scalar_constant_value(node.inputs[1])
except NotScalarConstantError:
axis = node.inputs[1]
if not isinstance(axis, T.TensorConstant):
return False
axis = axis.data
new = CAReduce(scal.maximum, axis)(node.inputs[0])
return [new, None]
if len(node.outputs[0].clients) == 0:
return [None, T._argmax(node.inputs[0], node.inputs[1])]