当前位置: 首页>>代码示例>>Python>>正文


Python scalar.upcast函数代码示例

本文整理汇总了Python中theano.scalar.upcast函数的典型用法代码示例。如果您正苦于以下问题:Python upcast函数的具体用法?Python upcast怎么用?Python upcast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了upcast函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: filter_inplace

    def filter_inplace(self, data, old_data, strict=False, allow_downcast=None):
        if strict or allow_downcast or isinstance(data, cuda.CudaNdarray):
            return cuda.filter(data, self.broadcastable, strict, old_data)

        else:  # (not strict) and (not allow_downcast)
            # Check if data.dtype can be accurately cast to self.dtype
            if isinstance(data, numpy.ndarray):
                up_dtype = scal.upcast(self.dtype, data.dtype)
                if up_dtype == self.dtype:
                    return cuda.filter(data, self.broadcastable, strict, old_data)
                else:
                    raise TypeError(
                        "%s, with dtype %s, cannot store a value of "
                        "dtype %s without risking loss of precision."
                        "If you do not mind, please cast your data to %s." % (self, self.dtype, data.dtype, self.dtype),
                        data,
                    )
            else:
                converted_data = theano._asarray(data, self.dtype)

                if allow_downcast is None and type(data) is float and self.dtype == theano.config.floatX:
                    return cuda.filter(converted_data, self.broadcastable, strict, old_data)
                elif numpy.all(data == converted_data):
                    return cuda.filter(converted_data, self.broadcastable, strict, old_data)
                else:
                    raise TypeError(
                        "%s, with dtype %s, cannot store accurately value %s, "
                        "it would be represented as %s. If you do not mind, "
                        "you can cast your data to %s." % (self, self.dtype, data, converted_data, self.dtype),
                        data,
                    )
开发者ID:repos-python,项目名称:Theano,代码行数:31,代码来源:type.py

示例2: local_gpua_advanced_incsubtensor

def local_gpua_advanced_incsubtensor(node, context_name):

    # This is disabled on non-cuda contexts
    if get_context(context_name).kind != "cuda":
        return None

    x, y, ilist = node.inputs

    # Gpu Ops needs both inputs to have the same dtype
    if x.type.dtype != y.type.dtype:
        dtype = scalar.upcast(x.type.dtype, y.type.dtype)
        if x.type.dtype != dtype:
            x = tensor.cast(x, dtype)
        if y.type.dtype != dtype:
            y = tensor.cast(y, dtype)

    set_instead_of_inc = node.op.set_instead_of_inc
    active_device_no = theano.sandbox.cuda.active_device_number()
    device_properties = theano.sandbox.cuda.device_properties

    compute_capability = device_properties(active_device_no)["major"]

    if compute_capability < 2 or x.ndim != 2 or y.ndim != 2:
        return GpuAdvancedIncSubtensor1(set_instead_of_inc=set_instead_of_inc)
    else:
        return GpuAdvancedIncSubtensor1_dev20(set_instead_of_inc=set_instead_of_inc)
开发者ID:rollingstone,项目名称:Theano,代码行数:26,代码来源:opt.py

示例3: test_reduce_custom_acc_dtype

    def test_reduce_custom_acc_dtype(self):
        # Test the ability to provide your own accumulator dtype for a reduce.

        # We try multiple axis combinations even though axis should not matter.
        idx = 0
        for method in self.methods:
            for input_dtype in self.dtypes:
                x = tensor.matrix(dtype=input_dtype)
                for acc_dtype in self.dtypes:
                    # If the accumulator is a complex, the gradient of the reduce will
                    # cast the complex to the input dtype. We can't call the normal
                    # cast on a complex to a not complex as this is ambiguous.
                    if not input_dtype.startswith("complex") and acc_dtype.startswith("complex"):
                        continue

                    axis = self.axes[idx % len(self.axes)]
                    # If output_dtype would force a downcast, we expect a TypeError
                    # We always allow int/uint inputs with float/complex outputs.
                    upcasted_dtype = scalar.upcast(input_dtype, acc_dtype)
                    if acc_dtype == upcasted_dtype or (
                        input_dtype in tensor.discrete_dtypes and acc_dtype in tensor.continuous_dtypes
                    ):
                        var = getattr(x, method)(acc_dtype=acc_dtype, axis=axis)
                        assert var.owner.op.acc_dtype == acc_dtype

                        if "complex" in input_dtype:
                            continue
                        # Check that we can take the gradient
                        tensor.grad(var.sum(), x, disconnected_inputs="ignore")
                    else:
                        self.assertRaises(TypeError, getattr(x, method), acc_dtype=acc_dtype, axis=axis)

                    idx += 1
开发者ID:souravsingh,项目名称:Theano,代码行数:33,代码来源:test_elemwise.py

示例4: test_prod_without_zeros_custom_acc_dtype

    def test_prod_without_zeros_custom_acc_dtype(self):
        """
        Test ability to provide your own acc_dtype for a ProdWithoutZeros().
        """
        # We try multiple axis combinations even though axis should not matter.
        axes = [None, 0, 1, [], [0], [1], [0, 1]]
        idx = 0
        for input_dtype in imap(str, theano.scalar.all_types):
            x = tensor.matrix(dtype=input_dtype)
            for acc_dtype in imap(str, theano.scalar.all_types):
                axis = axes[idx % len(axes)]
                # If acc_dtype would force a downcast, we expect a TypeError
                # We always allow int/uint inputs with float/complex outputs.
                upcasted_dtype = scalar.upcast(input_dtype, acc_dtype)
                if (acc_dtype == upcasted_dtype or
                        (input_dtype in tensor.discrete_dtypes and
                            acc_dtype in tensor.continuous_dtypes)
                        ):
                    prod_woz_var = ProdWithoutZeros(
                            axis=axis, acc_dtype=acc_dtype)(x)
                    assert prod_woz_var.owner.op.acc_dtype == acc_dtype

                    if (acc_dtype.startswith('complex') and
                        input_dtype != acc_dtype):
                        continue
                    f = theano.function([x], prod_woz_var)
                    data = numpy.random.rand(2, 3) * 3
                    data = data.astype(input_dtype)
                    f(data)
                else:
                    self.assertRaises(TypeError,
                            ProdWithoutZeros(axis=axis, acc_dtype=acc_dtype),
                            x)

                idx += 1
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:35,代码来源:test_elemwise.py

示例5: make_node

    def make_node(self, a, val, offset):
        a = tensor.as_tensor_variable(a)
        val = tensor.as_tensor_variable(val)
        offset = tensor.as_tensor_variable(offset)
        if a.ndim != 2:
            raise TypeError('%s: first parameter must have exactly'
                            ' two dimensions' % self.__class__.__name__)
        elif val.ndim != 0:
            raise TypeError('%s: second parameter must be a scalar'\
                            % self.__class__.__name__)
        elif offset.ndim != 0:
            raise TypeError('%s: third parameter must be a scalar'\
                            % self.__class__.__name__)
        val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
        if val.dtype != a.dtype:
            raise TypeError('%s: type of second parameter must be the same'
                            ' as the first\'s' % self.__class__.__name__)
        elif offset.dtype[:3] != 'int':
            raise TypeError('%s: type of third parameter must be as integer'
                            ' use theano.tensor.cast( input, \'int32/int64\')' \
                            % self.__class__.__name__)



        return gof.Apply(self, [a, val, offset], [a.type()])
开发者ID:MLevinson-OR,项目名称:Theano,代码行数:25,代码来源:extra_ops.py

示例6: local_gpua_advanced_incsubtensor

def local_gpua_advanced_incsubtensor(node, context_name):
    context = get_context(context_name)
    # This is disabled on non-cuda contexts
    if context.kind != 'cuda':
        return None

    x, y, ilist = node.inputs

    # Gpu Ops needs both inputs to have the same dtype
    if (x.type.dtype != y.type.dtype):
        dtype = scalar.upcast(x.type.dtype, y.type.dtype)
        if x.type.dtype != dtype:
            x = tensor.cast(x, dtype)
        if y.type.dtype != dtype:
            y = tensor.cast(y, dtype)

    set_instead_of_inc = node.op.set_instead_of_inc

    compute_capability = int(context.bin_id[-2])

    if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
        return GpuAdvancedIncSubtensor1(
            set_instead_of_inc=set_instead_of_inc)
    else:
        return GpuAdvancedIncSubtensor1_dev20(
            set_instead_of_inc=set_instead_of_inc)
开发者ID:GeorgyKonoplich,项目名称:vehicle_detection,代码行数:26,代码来源:opt.py

示例7: test_prod_custom_dtype

    def test_prod_custom_dtype(self):
        """
        Test the ability to provide your own output dtype for a prod.
        """
        # We try multiple axis combinations even though axis should not matter.
        axes = [None, 0, 1, [0], [1], [0, 1]]
        idx = 0
        for input_dtype in imap(str, theano.scalar.all_types):
            x = tensor.matrix(dtype=input_dtype)
            for output_dtype in imap(str, theano.scalar.all_types):
                axis = axes[idx % len(axes)]
                # If output_dtype would force a downcast, we expect a TypeError
                # We always allow int/uint inputs with float/complex outputs.
                upcasted_dtype = scalar.upcast(input_dtype, output_dtype)
                if (output_dtype == upcasted_dtype or
                        (input_dtype in tensor.discrete_dtypes and
                            output_dtype in tensor.continuous_dtypes)
                        ):
                    prod_var = x.prod(dtype=output_dtype, axis=axis)
                    assert prod_var.dtype == output_dtype

                    if "complex" in output_dtype:
                        continue
                    # Check that we can take the gradient
                    grad_var = tensor.grad(prod_var.sum(), x,
                            disconnected_inputs='ignore')
                else:
                    self.assertRaises(TypeError,
                            x.prod, dtype=output_dtype, axis=axis)

                idx += 1
开发者ID:jaberg,项目名称:Theano,代码行数:31,代码来源:test_elemwise.py

示例8: filter

    def filter(self, data, strict=False, allow_downcast=None):
        if strict:
            if not isinstance(data, gpuarray.GpuArray):
                raise TypeError("%s expected a GpuArray object." % self,
                                data, type(data))
            if self.typecode != data.typecode:
                raise TypeError("%s expected typecode %d (dtype %s), "
                                "got %d (dtype %s)." %
                                (self, self.typecode, self.dtype,
                                 data.typecode, str(data.dtype)))
            # fallthrough to ndim check
        elif allow_downcast:
            data = gpuarray.array(data, dtype=self.typecode, copy=False,
                                  ndmin=len(self.broadcastable))
        else:
            up_dtype = scalar.upcast(self.dtype, data.dtype)
            if up_dtype == self.dtype:
                data = gpuarray.array(data, dtype=self.dtype, copy=False)
            else:
                raise TypeError("%s cannot store a value of dtype %s "
                                "without risking loss of precision." %
                                (self, data.dtype))

        if self.ndim != data.ndim:
            raise TypeError("Wrong number of dimensions: expected %s, "
                            "got %s with shape %s." % (self.ndim, data.ndim,
                                                       data.shape), data)
        shp = data.shape
        for i, b in enumerate(self.broadcastable):
            if b and shp[i] != 1:
                raise TypeError("Non-unit value on shape on a broadcastable"
                                " dimension.", shp, self.broadcastable)
        return data
开发者ID:Jackwangyang,项目名称:Theano,代码行数:33,代码来源:type.py

示例9: test_sum_custom_dtype

    def test_sum_custom_dtype(self):
        """
        Test the ability to provide your own output dtype for a sum.
        """
        # We try multiple axis combinations even though axis should not matter.
        axes = [None, 0, 1, [0], [1], [0, 1]]
        idx = 0
        for input_dtype in imap(str, theano.scalar.all_types):
            x = tensor.matrix(dtype=input_dtype)
            for output_dtype in imap(str, theano.scalar.all_types):
                # If the output is a complex, the gradient of the sum will
                # cast the complex to the input dtype. We can't call the normal
                # cast on a complex to a not complex as this is ambiguous.
                if not input_dtype.startswith("complex") and output_dtype.startswith("complex"):
                    continue

                axis = axes[idx % len(axes)]
                # If output_dtype would force a downcast, we expect a TypeError
                # We always allow int/uint inputs with float/complex outputs.
                upcasted_dtype = scalar.upcast(input_dtype, output_dtype)
                if output_dtype == upcasted_dtype or (
                    input_dtype in tensor.discrete_dtypes and output_dtype in tensor.continuous_dtypes
                ):
                    sum_var = x.sum(dtype=output_dtype, axis=axis)
                    assert sum_var.dtype == output_dtype

                    if "complex" in input_dtype:
                        continue
                    # Check that we can take the gradient
                    grad_var = tensor.grad(sum_var.sum(), x, disconnected_inputs="ignore")
                else:
                    self.assertRaises(TypeError, x.sum, dtype=output_dtype, axis=axis)

                idx += 1
开发者ID:repos-python,项目名称:Theano,代码行数:34,代码来源:test_elemwise.py

示例10: local_gpua_advanced_incsubtensor

def local_gpua_advanced_incsubtensor(node):

    # This optimization is disabled if cuda is not active
    if pygpu.get_default_context().kind != "cuda":
        return None

    x, y, ilist = node.inputs

    # Gpu Ops needs both inputs to have the same dtype
    if (x.type.dtype != y.type.dtype):
        dtype = scalar.upcast(x.type.dtype, y.type.dtype)
        if x.type.dtype != dtype:
            x = tensor.cast(x, dtype)
        if y.type.dtype != dtype:
            y = tensor.cast(y, dtype)

    set_instead_of_inc = node.op.set_instead_of_inc
    active_device_no = theano.sandbox.cuda.active_device_number()
    device_properties = theano.sandbox.cuda.device_properties

    compute_capability = device_properties(active_device_no)['major']

    if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
        return [GpuAdvancedIncSubtensor1(
                set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
    else:
        return [GpuAdvancedIncSubtensor1_dev20(
                set_instead_of_inc=set_instead_of_inc)(x, y, ilist)]
开发者ID:ballasn,项目名称:Theano,代码行数:28,代码来源:opt.py

示例11: test_prod_without_zeros_custom_dtype

    def test_prod_without_zeros_custom_dtype(self):
        """
        Test the ability to provide your own output dtype for a ProdWithoutZeros().
        """
        # We try multiple axis combinations even though axis should not matter.
        axes = [None, 0, 1, [0], [1], [0, 1]]
        idx = 0
        for input_dtype in imap(str, theano.scalar.all_types):
            x = tensor.matrix(dtype=input_dtype)
            for output_dtype in imap(str, theano.scalar.all_types):
                axis = axes[idx % len(axes)]
                # If output_dtype would force a downcast, we expect a TypeError
                # We always allow int/uint inputs with float/complex outputs.
                upcasted_dtype = scalar.upcast(input_dtype, output_dtype)
                if (output_dtype == upcasted_dtype or
                        (input_dtype in tensor.discrete_dtypes and
                            output_dtype in tensor.continuous_dtypes)
                        ):
                    prod_woz_var = ProdWithoutZeros(
                            axis=axis, dtype=output_dtype)(x)
                    assert prod_woz_var.dtype == output_dtype
                else:
                    self.assertRaises(TypeError,
                            ProdWithoutZeros(axis=axis, dtype=output_dtype),
                            x)

                idx += 1
开发者ID:jaberg,项目名称:Theano,代码行数:27,代码来源:test_elemwise.py

示例12: make_node

 def make_node(self, A, b):
     A_ = tensor.as_tensor_variable(A)
     b_ = tensor.as_tensor_variable(b)
     if A_.broadcastable != (False, False):
         raise TypeError("A must be a matrix", A_.type)
     if b_.broadcastable not in ((False,), (True, False), (False, False)):
         raise TypeError("b must be a matrix or vector", b_.type)
     odtype = scalar.upcast(A_.dtype, b_.dtype)
     otype = tensor.TensorType(broadcastable=b_.broadcastable, dtype=odtype)
     return gof.Apply(op=self, inputs=[A, B], outputs=[otype()])
开发者ID:harlouci,项目名称:Theano,代码行数:10,代码来源:solve.py

示例13: filter

    def filter(self, data, strict=False, allow_downcast=None):
        if (isinstance(data, gpuarray.GpuArray) and
                data.typecode == self.typecode):
            # This is just to make this condition not enter the
            # following branches
            pass
        elif strict:
            if not isinstance(data, gpuarray.GpuArray):
                raise TypeError("%s expected a GpuArray object." % self,
                                data, type(data))
            if self.typecode != data.typecode:
                raise TypeError("%s expected typecode %d (dtype %s), "
                                "got %d (dtype %s)." %
                                (self, self.typecode, self.dtype,
                                 data.typecode, str(data.dtype)))
            if self.context != data.context:
                raise TypeError("data context does not match type context")
            # fallthrough to ndim check
        elif (allow_downcast or
              (allow_downcast is None and
               type(data) == float and
               self.dtype == config.floatX)):
            data = gpuarray.array(data, dtype=self.typecode, copy=False,
                                  ndmin=len(self.broadcastable),
                                  context=self.context)
        else:
            if not hasattr(data, 'dtype'):
                converted_data = theano._asarray(data, self.dtype)
                # We use the `values_eq` static function from TensorType
                # to handle NaN values.
                if TensorType.values_eq(numpy.asarray(data),
                                        converted_data,
                                        force_same_dtype=False):
                    data = converted_data
                    data = gpuarray.array(data, context=self.context)

            up_dtype = scalar.upcast(self.dtype, data.dtype)
            if up_dtype == self.dtype:
                data = gpuarray.array(data, dtype=self.dtype, copy=False,
                                      context=self.context)
            else:
                raise TypeError("%s cannot store a value of dtype %s "
                                "without risking loss of precision." %
                                (self, data.dtype))

        if self.ndim != data.ndim:
            raise TypeError("Wrong number of dimensions: expected %s, "
                            "got %s with shape %s." % (self.ndim, data.ndim,
                                                       data.shape), data)
        shp = data.shape
        for i, b in enumerate(self.broadcastable):
            if b and shp[i] != 1:
                raise TypeError("Non-unit value on shape on a broadcastable"
                                " dimension.", shp, self.broadcastable)
        return data
开发者ID:wgapl,项目名称:Theano,代码行数:55,代码来源:type.py

示例14: make_node

    def make_node(self, x, y, p):
        x = tensor.as_tensor_variable(x)
        y = tensor.as_tensor_variable(y)

        if not _is_sparse_variable(p):
            raise TypeError(p)

        #TODO: use it.
        dtype_out = scalar.upcast(x.type.dtype, y.type.dtype, p.type.dtype)

        return gof.Apply(self, [x, y, p], [p.type()])
开发者ID:lberrada,项目名称:Theano,代码行数:11,代码来源:sp2.py

示例15: make_node

 def make_node(self, a, val):
     a = tensor.as_tensor_variable(a)
     val = tensor.as_tensor_variable(val)
     if a.ndim < 2:
         raise TypeError("%s: first parameter must have at least" " two dimensions" % self.__class__.__name__)
     elif val.ndim != 0:
         raise TypeError("%s: second parameter must be a scalar" % self.__class__.__name__)
     val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
     if val.dtype != a.dtype:
         raise TypeError("%s: type of second parameter must be the same as" " the first's" % self.__class__.__name__)
     return gof.Apply(self, [a, val], [a.type()])
开发者ID:Theano,项目名称:Theano,代码行数:11,代码来源:extra_ops.py


注:本文中的theano.scalar.upcast函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。