当前位置: 首页>>代码示例>>Python>>正文


Python pytools.any函数代码示例

本文整理汇总了Python中pytools.any函数的典型用法代码示例。如果您正苦于以下问题:Python any函数的具体用法?Python any怎么用?Python any使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了any函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_watches

    def add_watches(self, watches):
        """Add quantities that are printed after every time step."""

        from pytools import Record

        class WatchInfo(Record):
            pass

        for watch in watches:
            if isinstance(watch, tuple):
                display, expr = watch
            else:
                display = watch
                expr = watch

            parsed = self._parse_expr(expr)
            parsed, dep_data = self._get_expr_dep_data(parsed)

            from pytools import any
            self.have_nonlocal_watches = self.have_nonlocal_watches or \
                    any(dd.nonlocal_agg for dd in dep_data)

            from pymbolic import compile
            compiled = compile(parsed, [dd.varname for dd in dep_data])

            watch_info = WatchInfo(display=display, parsed=parsed, dep_data=dep_data,
                    compiled=compiled)

            self.watches.append(watch_info)
开发者ID:inducer,项目名称:pytools,代码行数:29,代码来源:log.py

示例2: get_binary_minmax_kernel

def get_binary_minmax_kernel(func, dtype_x, dtype_y, dtype_z):
    if not np.float64 in [dtype_x, dtype_y]:
        func = func +"f"

    from pytools import any
    if any(dt.kind == "f" for dt in [dtype_x, dtype_y, dtype_z]):
        func = "f"+func

    return get_binary_func_kernel(func, dtype_x, dtype_y, dtype_z)
开发者ID:aaahexing,项目名称:pycuda,代码行数:9,代码来源:elementwise.py

示例3: is_affine

    def is_affine(self):
        from pytools import any

        has_high_order_geometry = any(
            sum(mid) >= 2 and abs(mc) >= 1e-13
            for mc_along_axis in self.modal_coeff.T
            for mid, mc in zip(self.ldis.generate_mode_identifiers(), mc_along_axis)
        )

        return not has_high_order_geometry
开发者ID:binho58,项目名称:meshpy,代码行数:10,代码来源:test_gmsh_reader_hedge.py

示例4: get_binary_minmax_kernel

def get_binary_minmax_kernel(func, dtype_x, dtype_y, dtype_z, use_scalar):
    if np.float64 not in [dtype_x, dtype_y]:
        func = func + "f"

    from pytools import any
    if any(dt.kind == "f" for dt in [dtype_x, dtype_y, dtype_z]):
        func = "f"+func

    if use_scalar:
        return get_binary_func_scalar_kernel(func, dtype_x, dtype_y, dtype_z)
    else:
        return get_binary_func_kernel(func, dtype_x, dtype_y, dtype_z)
开发者ID:EelcoHoogendoorn,项目名称:pycuda,代码行数:12,代码来源:elementwise.py

示例5: has_barrier_within

def has_barrier_within(kernel, sched_index):
    sched_item = kernel.schedule[sched_index]

    if isinstance(sched_item, EnterLoop):
        loop_contents, _ = gather_schedule_subloop(kernel.schedule, sched_index)
        from pytools import any

        return any(isinstance(subsched_item, Barrier) for subsched_item in loop_contents)
    elif isinstance(sched_item, Barrier):
        return True
    else:
        return False
开发者ID:navjotk,项目名称:loopy,代码行数:12,代码来源:schedule.py

示例6: get_lpot_applier

    def get_lpot_applier(self, kernels):
        # needs to be separate method for caching

        from pytools import any
        if any(knl.is_complex_valued for knl in kernels):
            value_dtype = self.density_discr.complex_dtype
        else:
            value_dtype = self.density_discr.real_dtype

        from sumpy.qbx import LayerPotential
        return LayerPotential(self.cl_context,
                    [self.expansion_getter(knl, self.qbx_order)
                        for knl in kernels],
                    value_dtypes=value_dtype)
开发者ID:sj90101,项目名称:pytential,代码行数:14,代码来源:__init__.py

示例7: __init__

    def __init__(self, dtype_out,
                 neutral, reduce_expr, arguments=None,
                 map_exprs=[None],
                 name="reduce_kernel", options=[], preamble=""):

        ctx = get_device().context
        dtype_out = self.dtype_out = np.dtype(dtype_out)

        max_group_size = None
        trip_count = 0

        self.n_exprs = len(map_exprs)
        assert self.n_exprs>0

        while True:
            self.stage_1_inf = get_reduction_kernel(1, ctx,
                                                    dtype_out,
                                                    neutral, reduce_expr, arguments,
                                                    name=name+"_stage1", options=options, preamble=preamble,
                                                    map_exprs=map_exprs,
                                                    max_group_size=max_group_size)


            kernel_max_wg_size = self.stage_1_inf.kernel.get_work_group_info(
                cl.kernel_work_group_info.WORK_GROUP_SIZE,
                ctx.devices[0])

            if self.stage_1_inf.group_size<=kernel_max_wg_size:
                break
            else:
                max_group_size = kernel_max_wg_size

            trip_count += 1
            assert trip_count<=2

        self.stage_2_inf = get_reduction_kernel(2, ctx,
                                dtype_out,
                                neutral, reduce_expr, arguments=arguments,
                                name=name+"_stage2", options=options,
                                map_exprs=map_exprs,
                                preamble=preamble,
                                max_group_size=max_group_size)

        from pytools import any
        from pyopencl.tools import VectorArg
        assert any(
            isinstance(arg_tp, VectorArg)
            for arg_tp in self.stage_1_inf.arg_types), \
            "ReductionKernel can only be used with functions " \
            "that have at least one vector argument"
开发者ID:spaghettisort,项目名称:gputools,代码行数:50,代码来源:oclmultireduction.py

示例8: get_p2p

    def get_p2p(self, kernels):
        # needs to be separate method for caching

        from pytools import any
        if any(knl.is_complex_valued for knl in kernels):
            value_dtype = self.density_discr.complex_dtype
        else:
            value_dtype = self.density_discr.real_dtype

        from sumpy.p2p import P2P
        p2p = P2P(self.cl_context,
                    kernels, exclude_self=False, value_dtypes=value_dtype)

        return p2p
开发者ID:sj90101,项目名称:pytential,代码行数:14,代码来源:__init__.py

示例9: get_lpot_applier_on_tgt_subset

    def get_lpot_applier_on_tgt_subset(self, kernels):
        # needs to be separate method for caching

        from pytools import any
        if any(knl.is_complex_valued for knl in kernels):
            value_dtype = self.density_discr.complex_dtype
        else:
            value_dtype = self.density_discr.real_dtype

        from pytential.qbx.direct import LayerPotentialOnTargetAndCenterSubset
        from sumpy.expansion.local import VolumeTaylorLocalExpansion
        return LayerPotentialOnTargetAndCenterSubset(
                self.cl_context,
                [VolumeTaylorLocalExpansion(knl, self.qbx_order)
                    for knl in kernels],
                value_dtypes=value_dtype)
开发者ID:inducer,项目名称:pytential,代码行数:16,代码来源:__init__.py

示例10: finalize_multi_assign

    def finalize_multi_assign(self, names, exprs, do_not_return, priority):
        from pytools import any
        from hedge.tools import is_zero

        has_zero_assignees = any(is_zero(expr) for expr in exprs)
        if has_zero_assignees:
            if len(exprs) > 1:
                raise RuntimeError("found aggregated zero constant assignment")

        from hedge.optemplate import FlopCounter

        flop_count = sum(FlopCounter()(expr) for expr in exprs)

        if has_zero_assignees or flop_count == 0:
            return Assign(names, exprs, priority=priority, dep_mapper_factory=self.dep_mapper_factory)
        else:
            return VectorExprAssign(
                names=names,
                exprs=exprs,
                do_not_return=do_not_return,
                dep_mapper_factory=self.dep_mapper_factory,
                priority=priority,
            )
开发者ID:paulcazeaux,项目名称:hedge,代码行数:23,代码来源:compiler.py

示例11: __init__

    def __init__(self, ctx, dtype_out,
            neutral, reduce_expr, map_expr=None, arguments=None,
            name="reduce_kernel", options=[], preamble=""):

        dtype_out = self.dtype_out = np.dtype(dtype_out)

        self.stage_1_inf = get_reduction_kernel(ctx,
                dtype_to_ctype(dtype_out), dtype_out.itemsize,
                neutral, reduce_expr, map_expr, arguments,
                name=name+"_stage1", options=options, preamble=preamble)

        # stage 2 has only one input and no map expression
        self.stage_2_inf = get_reduction_kernel(ctx,
                dtype_to_ctype(dtype_out), dtype_out.itemsize,
                neutral, reduce_expr,
                name=name+"_stage2", options=options, preamble=preamble)

        from pytools import any
        from pyopencl.tools import VectorArg
        assert any(
                isinstance(arg_tp, VectorArg)
                for arg_tp in self.stage_1_inf.arg_types), \
                "ReductionKernel can only be used with functions that have at least one " \
                "vector argument"
开发者ID:Almclean,项目名称:pyopencl,代码行数:24,代码来源:reduction.py

示例12: aggregate_assignments

    def aggregate_assignments(self, instructions, result):
        from pymbolic.primitives import Variable

        # aggregation helpers -------------------------------------------------
        def get_complete_origins_set(insn, skip_levels=0):
            if skip_levels < 0:
                skip_levels = 0

            result = set()
            for dep in insn.get_dependencies():
                if isinstance(dep, Variable):
                    dep_origin = origins_map.get(dep.name, None)
                    if dep_origin is not None:
                        if skip_levels <= 0:
                            result.add(dep_origin)
                        result |= get_complete_origins_set(
                                dep_origin, skip_levels-1)

            return result

        var_assignees_cache = {}
        def get_var_assignees(insn):
            try:
                return var_assignees_cache[insn]
            except KeyError:
                result = set(Variable(assignee)
                        for assignee in insn.get_assignees())
                var_assignees_cache[insn] = result
                return result

        def aggregate_two_assignments(ass_1, ass_2):
            names = ass_1.names + ass_2.names

            from pymbolic.primitives import Variable
            deps = (ass_1.get_dependencies() | ass_2.get_dependencies()) \
                    - set(Variable(name) for name in names)

            return Assign(
                    names=names, exprs=ass_1.exprs + ass_2.exprs,
                    _dependencies=deps,
                    dep_mapper_factory=self.dep_mapper_factory,
                    priority=max(ass_1.priority, ass_2.priority))

        # main aggregation pass -----------------------------------------------
        origins_map = dict(
                    (assignee, insn)
                    for insn in instructions
                    for assignee in insn.get_assignees())

        from pytools import partition
        unprocessed_assigns, other_insns = partition(
                lambda insn: isinstance(insn, Assign),
                instructions)

        # filter out zero-flop-count assigns--no need to bother with those
        processed_assigns, unprocessed_assigns = partition(
                lambda ass: ass.flop_count() == 0,
                unprocessed_assigns)

        # filter out zero assignments
        from pytools import any
        from hedge.tools import is_zero

        i = 0

        while i < len(unprocessed_assigns):
            my_assign = unprocessed_assigns[i]
            if any(is_zero(expr) for expr in my_assign.exprs):
                processed_assigns.append(unprocessed_assigns.pop())
            else:
                i += 1

        # greedy aggregation
        while unprocessed_assigns:
            my_assign = unprocessed_assigns.pop()

            my_deps = my_assign.get_dependencies()
            my_assignees = get_var_assignees(my_assign)

            agg_candidates = []
            for i, other_assign in enumerate(unprocessed_assigns):
                other_deps = other_assign.get_dependencies()
                other_assignees = get_var_assignees(other_assign)

                if ((my_deps & other_deps
                        or my_deps & other_assignees
                        or other_deps & my_assignees)
                        and my_assign.priority == other_assign.priority):
                    agg_candidates.append((i, other_assign))

            did_work = False

            if agg_candidates:
                my_indirect_origins = get_complete_origins_set(
                        my_assign, skip_levels=1)

                for other_assign_index, other_assign in agg_candidates:
                    if self.max_vectors_in_batch_expr is not None:
                        new_assignee_count = len(
                                set(my_assign.get_assignees())
#.........这里部分代码省略.........
开发者ID:felipeh,项目名称:hedge,代码行数:101,代码来源:compiler.py

示例13: match_dtype_to_c_struct

def match_dtype_to_c_struct(device, name, dtype, context=None):
    """Return a tuple `(dtype, c_decl)` such that the C struct declaration
    in `c_decl` and the structure :class:`numpy.dtype` instance `dtype`
    have the same memory layout.

    Note that *dtype* may be modified from the value that was passed in,
    for example to insert padding.

    (As a remark on implementation, this routine runs a small kernel on
    the given *device* to ensure that :mod:`numpy` and C offsets and
    sizes match.)

    .. versionadded: 2013.1

    This example explains the use of this function::

        >>> import numpy as np
        >>> import pyopencl as cl
        >>> import pyopencl.tools
        >>> ctx = cl.create_some_context()
        >>> dtype = np.dtype([("id", np.uint32), ("value", np.float32)])
        >>> dtype, c_decl = pyopencl.tools.match_dtype_to_c_struct(
        ...     ctx.devices[0], 'id_val', dtype)
        >>> print c_decl
        typedef struct {
          unsigned id;
          float value;
        } id_val;
        >>> print dtype
        [('id', '<u4'), ('value', '<f4')]
        >>> cl.tools.get_or_register_dtype('id_val', dtype)

    As this example shows, it is important to call
    :func:`get_or_register_dtype` on the modified `dtype` returned by this
    function, not the original one.
    """

    fields = sorted(dtype.fields.iteritems(),
            key=lambda (name, (dtype, offset)): offset)

    c_fields = []
    for field_name, (field_dtype, offset) in fields:
        c_fields.append("  %s %s;" % (dtype_to_ctype(field_dtype), field_name))

    c_decl = "typedef struct {\n%s\n} %s;\n\n" % (
            "\n".join(c_fields),
            name)

    cdl = _CDeclList(device)
    for field_name, (field_dtype, offset) in fields:
        cdl.add_dtype(field_dtype)

    pre_decls = cdl.get_declarations()

    offset_code = "\n".join(
            "result[%d] = pycl_offsetof(%s, %s);" % (i+1, name, field_name)
            for i, (field_name, (field_dtype, offset)) in enumerate(fields))

    src = r"""
        #define pycl_offsetof(st, m) \
                 ((size_t) ((__local char *) &(dummy.m) \
                 - (__local char *)&dummy ))

        %(pre_decls)s

        %(my_decl)s

        __kernel void get_size_and_offsets(__global size_t *result)
        {
            result[0] = sizeof(%(my_type)s);
            __local %(my_type)s dummy;
            %(offset_code)s
        }
    """ % dict(
            pre_decls=pre_decls,
            my_decl=c_decl,
            my_type=name,
            offset_code=offset_code)

    if context is None:
        context = cl.Context([device])

    queue = cl.CommandQueue(context)

    prg = cl.Program(context, src)
    knl = prg.build(devices=[device]).get_size_and_offsets

    import pyopencl.array  # noqa
    result_buf = cl.array.empty(queue, 1+len(fields), np.uintp)
    knl(queue, (1,), (1,), result_buf.data)
    queue.finish()
    size_and_offsets = result_buf.get()

    size = int(size_and_offsets[0])

    from pytools import any
    offsets = size_and_offsets[1:]
    if any(ofs >= size for ofs in offsets):
        # offsets not plausible

#.........这里部分代码省略.........
开发者ID:DirkHaehnel,项目名称:pyopencl,代码行数:101,代码来源:tools.py

示例14: do_not_vectorize

    def do_not_vectorize(self):
        from pytools import any

        return self.complex_kernel and any(dev.type == cl.device_type.CPU for dev in self.context.devices)
开发者ID:braincorp,项目名称:pyopencl,代码行数:4,代码来源:algorithm.py

示例15: is_name_conflicting

 def is_name_conflicting(self, name):
     from pytools import any
     return any(
             _is_var_name_conflicting(name, other_name)
             for other_name in self.existing_names)
开发者ID:cmsquared,项目名称:loopy,代码行数:5,代码来源:__init__.py


注:本文中的pytools.any函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。