当前位置: 首页>>代码示例>>Python>>正文


Python numpy.iterable函数代码示例

本文整理汇总了Python中numpy.iterable函数的典型用法代码示例。如果您正苦于以下问题:Python iterable函数的具体用法?Python iterable怎么用?Python iterable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了iterable函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, mesh, material1=None, material2=None, surface=None, color=0x33ffffff):
        self.mesh = mesh

        if np.iterable(material1):
            if len(material1) != len(mesh.triangles):
                raise ValueError('shape mismatch')
            self.material1 = np.array(material1, dtype=np.object)
        else:
            self.material1 = np.tile(material1, len(self.mesh.triangles))

        if np.iterable(material2):
            if len(material2) != len(mesh.triangles):
                raise ValueError('shape mismatch')
            self.material2 = np.array(material2, dtype=np.object)
        else:
            self.material2 = np.tile(material2, len(self.mesh.triangles))

        if np.iterable(surface):
            if len(surface) != len(mesh.triangles):
                raise ValueError('shape mismatch')
            self.surface = np.array(surface, dtype=np.object)
        else:
            self.surface = np.tile(surface, len(self.mesh.triangles))

        if np.iterable(color):
            if len(color) != len(mesh.triangles):
                raise ValueError('shape mismatch')
            self.color = np.array(color, dtype=np.uint32)
        else:
            self.color = np.tile(color, len(self.mesh.triangles)).astype(np.uint32)

        self.unique_materials = \
            np.unique(np.concatenate([self.material1, self.material2]))

        self.unique_surfaces = np.unique(self.surface)
开发者ID:BenLand100,项目名称:chroma,代码行数:35,代码来源:geometry.py

示例2: Min3

def Min3(a,b,c):
    code = """
    for(int i=0;i<ntot;i++) {
        double av = *(a+i);
    """
    if iterable(b):
        code += """
        double bv = *(b+i);
        """
    else:
        code += """
        double bv = b;
        """
    if iterable(c):
        code += """
        double cv = *(c+i);
        """
    else:
        code += """
        double cv = c;
        """
    code += """
        *(result+i) = (av<bv) ? ((cv<av) ? cv:av) : ((cv<bv) ? cv:bv);
    }
    """
    ntot = a.size
    result = GridArray.GridArray.empty(a.shape)
    W.inline(code, ['a','b','c','result','ntot'], extra_compile_args=["-w"])
    return result
开发者ID:mattbierbaum,项目名称:cuda-plasticity,代码行数:29,代码来源:NumericalMethodsOptimized.py

示例3: importance_sample_var

def importance_sample_var(x, est, p_gen, p_tar, log_weight_lim=float('inf'),
                          normalize=False):
    # TODO normalize not used?
    x = np.asarray(x)
    N = len(x)
    log_weight_lim = float(log_weight_lim)    

    if not np.iterable(p_gen):
        p_gen = np.full(N, p_gen)
    else:
        p_gen = np.asarray(p_gen)

    if not np.iterable(p_tar):
        p_tar = np.full(N, p_tar)
    else:
        p_tar = np.asarray(p_tar)

    log_weights = p_tar - p_gen
    valid = np.logical_and(log_weights > -log_weight_lim,
                           log_weights < log_weight_lim)
    if not np.any(valid):
        return float('inf'), 0

    weights = np.exp(log_weights[valid])
    x = x[valid]

    deltas = x - est
    outers = np.asarray([np.outer(d, d) for d in deltas])
    norm_weights = weights / np.sum(weights)

    est_var = np.sum((norm_weights * norm_weights) * outers.T, axis=-1).T
    ess = np.sum(norm_weights ** 2) ** 2 / np.sum(norm_weights ** 4)
    return est_var, ess
开发者ID:Humhu,项目名称:percepto,代码行数:33,代码来源:sampling.py

示例4: mag2fluxcal

def mag2fluxcal( mag, magerr=0 ):
    """ convert magnitudes into SNANA-style FLUXCAL units
    (fixed zero point of 27.5 for all bands) """
    from numpy import iterable, abs, array, zeros, any
    
    if not iterable( mag ) : 
        mag = array( [ mag ] )
        magerr = array( [ magerr ] )
    if not iterable( magerr ) : 
        magerr = zeros( len(mag) ) 

    fluxcal, fluxcalerr = [],[]
    for m,me in zip( mag, magerr) : 
        if me < 0 : 
            fluxcal.append( 0 ) 
            fluxcalerr.append( 10**(-0.4*(m-27.5)) )
        else : 
            fluxcal.append( 10**(-0.4*(m-27.5)) )
            fluxcalerr.append( 0.92103 * me * fluxcal[-1] )
    fluxcal = array( fluxcal )
    fluxcalerr = array( fluxcalerr )

    if len(mag)==1 : 
        fluxcal = fluxcal[0]
        fluxcalerr = fluxcalerr[0] 

    if any( magerr ) : return( fluxcal, fluxcalerr ) 
    else : return( fluxcal )
开发者ID:srodney,项目名称:hstsntools,代码行数:28,代码来源:hstsnphot.py

示例5: dict_diff

def dict_diff(dict1, dict2):
    """Return the difference between two dictionaries as a dictionary of key: [val1, val2] pairs.
    Keys unique to either dictionary are included as key: [val1, '-'] or key: ['-', val2]."""
    diff_keys = []
    common_keys = np.intersect1d(dict1.keys(), dict2.keys())
    for key in common_keys:
        if np.iterable(dict1[key]) or np.iterable(dict2[key]):
            if not np.array_equal(dict1[key], dict2[key]):
                diff_keys.append(key)
        else:
            if dict1[key] != dict2[key]:
                diff_keys.append(key)

    dict1_unique = [key for key in dict1.keys() if key not in common_keys]
    dict2_unique = [key for key in dict2.keys() if key not in common_keys]

    diff = {}
    for key in diff_keys:
        diff[key] = [dict1[key], dict2[key]]

    for key in dict1_unique:
        diff[key] = [dict1[key], '-']

    for key in dict2_unique:
        diff[key] = ['-', dict2[key]]

    return diff
开发者ID:specialforcea,项目名称:labscriptsuite,代码行数:27,代码来源:__init__.py

示例6: point_displ

def point_displ(pt1, pt2):
    """ Calculate the displacement vector between two n-D points.

    pt1 - pt2

    .. todo:: Complete point_disp docstring

    """

    #Imports
    import numpy as np

    # Make iterable
    if not np.iterable(pt1):
        pt1 = np.float64(np.array([pt1]))
    else:
        pt1 = np.float64(np.array(pt1).squeeze())
    ## end if
    if not np.iterable(pt2):
        pt2 = np.float64(np.array([pt2]))
    else:
        pt2 = np.float64(np.array(pt2).squeeze())
    ## end if

    # Calculate the displacement vector and return
    displ = np.matrix(np.subtract(pt2, pt1)).reshape(3,1)
    return displ
开发者ID:bskinn,项目名称:opan,代码行数:27,代码来源:symm.py

示例7: test_kernel_clone

def test_kernel_clone():
    """ Test that sklearn's clone works correctly on kernels. """
    for kernel in kernels:
        kernel_cloned = clone(kernel)

        assert_equal(kernel, kernel_cloned)
        assert_not_equal(id(kernel), id(kernel_cloned))
        for attr in kernel.__dict__.keys():
            attr_value = getattr(kernel, attr)
            attr_value_cloned = getattr(kernel_cloned, attr)
            if attr.startswith("hyperparameter_"):
                assert_equal(attr_value.name, attr_value_cloned.name)
                assert_equal(attr_value.value_type,
                             attr_value_cloned.value_type)
                assert_array_equal(attr_value.bounds,
                                   attr_value_cloned.bounds)
                assert_equal(attr_value.n_elements,
                             attr_value_cloned.n_elements)
            elif np.iterable(attr_value):
                for i in range(len(attr_value)):
                    if np.iterable(attr_value[i]):
                        assert_array_equal(attr_value[i],
                                           attr_value_cloned[i])
                    else:
                        assert_equal(attr_value[i], attr_value_cloned[i])
            else:
                assert_equal(attr_value, attr_value_cloned)
            if not isinstance(attr_value, Hashable):
                # modifiable attributes must not be identical
                assert_not_equal(id(attr_value), id(attr_value_cloned))
开发者ID:AlexanderFabisch,项目名称:scikit-learn,代码行数:30,代码来源:test_kernels.py

示例8: _set_values_to_brick

    def _set_values_to_brick(self, brick_guid, brick_slice, values, value_slice=None):
        brick_file_path = os.path.join(self.brick_path, '{0}.hdf5'.format(brick_guid))
        log.trace('Brick slice to fill: %s', brick_slice)
        log.trace('Value slice to extract: %s', value_slice)

        # Create the HDF5 dataset that represents one brick
        bD = tuple(self.brick_domains[1])
        cD = self.brick_domains[2]
        if value_slice is not None:
            vals = values[value_slice]
        else:
            vals = values

        if values.ndim == 0 and len(values.shape) == 0 and np.iterable(vals): # Prevent single value strings from being iterated
            vals = [vals]

        # Check for object type
        data_type = self.dtype
        fv = self.fill_value

        # Check for object type
        if data_type == '|O8':
            if np.iterable(vals):
                vals = [pack(x) for x in vals]
            else:
                vals = pack(vals)

        if self.inline_data_writes:
            if data_type == '|O8':
                data_type = h5py.special_dtype(vlen=str)
            if 0 in cD or 1 in cD:
                cD = True
            with HDFLockingFile(brick_file_path, 'a') as f:
                # TODO: Due to usage concerns, currently locking chunking to "auto"
                f.require_dataset(brick_guid, shape=bD, dtype=data_type, chunks=None, fillvalue=fv)
                f[brick_guid][brick_slice] = vals
        else:
            work_key = brick_guid
            work = (brick_slice, vals)
            work_metrics = (brick_file_path, bD, cD, data_type, fv)
            log.trace('Work key: %s', work_key)
            log.trace('Work metrics: %s', work_metrics)
            log.trace('Work[0]: %s', work[0])

            # If the brick file doesn't exist, 'touch' it to make sure it's immediately available
            if not os.path.exists(brick_file_path):
                if data_type == '|O8':
                    data_type = h5py.special_dtype(vlen=str)
                if 0 in cD or 1 in cD:
                    cD = True
                with HDFLockingFile(brick_file_path, 'a') as f:
                    # TODO: Due to usage concerns, currently locking chunking to "auto"
                    f.require_dataset(brick_guid, shape=bD, dtype=data_type, chunks=None, fillvalue=fv)

            if self.auto_flush:
                # Immediately submit work to the dispatcher
                self.brick_dispatcher.put_work(work_key, work_metrics, work)
            else:
                # Queue the work for later flushing
                self._queue_work(work_key, work_metrics, work)
开发者ID:emilyhahn,项目名称:coverage-model,代码行数:60,代码来源:persistence.py

示例9: broadcast_indices

def broadcast_indices(indices):
    """ if any array index is present, broadcast all arrays and integer indices on the same shape
    """
    aindices = []

    # convert all booleans, and scan the indices to get the size
    size = None
    for i, ix in enumerate(indices):

        if np.iterable(ix) and np.asarray(ix).dtype is np.dtype(bool):
            ix = np.where(ix)[0]

        if np.iterable(ix):

            if size is None:
                size = np.size(ix)

            # consistency check
            elif size != np.size(ix):
                print size, np.size(ix)
                raise ValueError(
                    "array-indices could not be broadcast on the same shape (got {} and {}, try box[...] or take(..., broadcast_arrays=False) if you intend to sample values along several dimensions independently)".format(
                        size, np.size(ix)
                    )
                )

        aindices.append(ix)

    # Now convert all integers to the same size, if applicable
    if size is not None:
        for i, ix in enumerate(aindices):
            if not np.iterable(ix) and not type(ix) is slice:
                aindices[i] = np.zeros(size, dtype=type(ix)) + ix

    return aindices
开发者ID:koenvo,项目名称:dimarray,代码行数:35,代码来源:indexing.py

示例10: clone_with_param

 def clone_with_param(self, new_length_scale):
     if np.iterable(self.length_scale):
         if not np.iterable(new_length_scale):
             raise Exception("new_legnth_scale is not iterable")
         if len(self.length_scale) != len(new_length_scale):
             raise Exception("new_length_scale mismatched")
     return RBFKernel(new_length_scale, self.length_scale_bounds)
开发者ID:cheng-w-liu,项目名称:computational-notes,代码行数:7,代码来源:kernels.py

示例11: integrate_prop_odeint

    def integrate_prop_odeint(self, D, eps, x,t1,t2):
        '''
        integrate the lineage propagator, accounting for non branching. THIS USES THE SCIPY ODE INTEGRATOR
        parameters:
        D   --  dimensionless diffusion constant
        eps --  initial condition for the generating function, corresponding to the sampling probability
        x   --  fitness at the "closer to the present" end of the branch
        t1  --  time closer to the present
        t2  --  times after which to evaluate the propagator, either a float or iterable of floats
        '''
        if not np.iterable(t2):  # if only one value is provided, produce a list with this value
            t2=[t2]
        else:                    # otherwise, cast to list. This is necessary to concatenate with with t1
            t2=list(t2)

        if np.iterable(x):
            xdim = len(x)
        else:
            xdim = 1
            x=[x]
        
        # allocate array for solution: dimensions: #time points, #late fitness values, #fitness grid points
        sol = np.zeros((len(t2)+1, len(x), self.L))
        # loop over late fitness values
        for ii, x_val in enumerate(x):
            # find index in fitness grid
            xi = np.argmin(x_val >self.fitness_grid)
            # init as delta function, normized
            prop0 = np.zeros(self.L)
            prop0[xi] = self.dxinv
            # propagate backwards and save in correct row
            sol[:,ii,:] = odeint(self.dprop_backward, prop0, [t1]+t2,args=((D,eps),), 
                                 rtol = 0.001,atol = 1e-5, h0=1e-2,hmin = 1e-4, printmessg=False)

        return np.maximum(non_negativity_cutoff,sol)
开发者ID:neherlab,项目名称:FitnessInference,代码行数:35,代码来源:solve_survival.py

示例12: _get_k_variables

    def _get_k_variables(self, k, m, c=None, coord="k"):
        """
        From a raw array in k, mass, returns concentration,
        kappa.

        Returns
        -------
        c : same shape as m
            concentration

        K : 1d or 2d array
            Dimensionless scale parameter, shape (r,[m]).
        """
        if c is None:
            c = self.cm_relation(m)
        r_s = self._rs_from_m(m, c)

        if coord == "k":
            if np.iterable(k) and np.iterable(r_s):
                K = np.outer(k, r_s)
            else:
                K = k*r_s
        elif coord == "kappa":
            K = k

        return c, K
开发者ID:steven-murray,项目名称:halomod,代码行数:26,代码来源:profiles.py

示例13: write_arrays

def write_arrays(filename, args, fields=None, sep=" ", comment="#", clobber=False, linebreak="\n", format="%g"):

    if os.path.isfile(filename) and not clobber:
        raise IOErr("filefound", filename)

    if not numpy.iterable(args) or len(args) == 0:
        raise IOErr("noarrayswrite")

    if not numpy.iterable(args[0]):
        raise IOErr("noarrayswrite")

    size = len(args[0])
    for arg in args:
        if not numpy.iterable(arg):
            raise IOErr("noarrayswrite")
        elif len(arg) != size:
            raise IOErr("arraysnoteq")

    args = numpy.column_stack(numpy.asarray(args))

    f = file(filename, "w")

    if fields is not None:
        f.write(comment + sep.join(fields) + linebreak)

    lines = []
    for arg in args:
        line = [format % elem for elem in arg]
        lines.append(sep.join(line))

    f.write(linebreak.join(lines))

    # add a newline at end
    f.write(linebreak)
    f.close()
开发者ID:taldcroft,项目名称:sherpa-old,代码行数:35,代码来源:io.py

示例14: _values_equal

def _values_equal(a, b):
    "Test equality, taking into account array values"
    if a is b:
        return True
    elif type(a) is not type(b):
        return False
    a_iterable = np.iterable(a)
    b_iterable = np.iterable(b)
    if a_iterable != b_iterable:
        return False
    elif not a_iterable:
        return a == b
    elif len(a) != len(b):
        return False
    elif isinstance(a, np.ndarray):
        if a.shape == b.shape:
            return (a == b).all()
        else:
            return False
    elif isinstance(a, (tuple, list)):
        return all(_values_equal(a_, b_) for a_, b_ in zip(a, b))
    elif isinstance(a, dict):
        if a.keys() == b.keys():
            return all(_values_equal(a[k], b[k]) for k in a)
        else:
            return False
    elif isinstance(a, mne.io.BaseRaw):
        return isinstance(b, a.__class__) and _values_equal(a.info, b.info)
    else:
        return a == b
开发者ID:christianbrodbeck,项目名称:Eelbrain,代码行数:30,代码来源:_info.py

示例15: __init__

    def __init__(self):
        # Seed RNG if specified
        seed = rospy.get_param('~random_seed', None)
        if seed is None:
            rospy.loginfo('No random seed specified. Using default behavior.')
        else:
            rospy.loginfo('Initializing with random seed: ' + str(seed))
            np.random.seed(seed)

        self.save_period = rospy.get_param('~save_period', 1)

        self.input_dim = rospy.get_param('~input_dimension')
        self.input_lower = rospy.get_param('~input_lower_bound')
        self.input_upper = rospy.get_param('~input_upper_bound')
        if not np.iterable(self.input_lower):
            self.input_lower = [self.input_lower]*self.input_dim
        self.input_lower = np.asarray(self.input_lower)
        if not np.iterable(self.input_upper):
            self.input_upper = [self.input_upper]*self.input_dim
        self.input_upper = np.asarray(self.input_upper)

        checker_func = self.check_input

        self.prog_path = rospy.get_param('~progress_path', None)
        self.out_path = rospy.get_param('~output_path')

        crossover_rate = rospy.get_param('~crossover_rate', 0.5)
        crossover_func = lambda x, y: optgen.uniform_crossover(x, y, crossover_rate)

        mutate_cov = float(rospy.get_param('~mutate_cov', 0.1))
        mutate_func = lambda x: optgen.gaussian_mutate(x, mutate_cov)

        selection_k = rospy.get_param('~selection_k', None)
        selection_func = lambda N, w: optgen.tournament_selection(N, w, selection_k)

        crossover_prob = rospy.get_param('~crossover_prob', 0.6)
        init_popsize = rospy.get_param('~init_popsize')
        run_popsize = rospy.get_param('~run_popsize', init_popsize)
        elitist = rospy.get_param('~elitist', False)
        verbose = rospy.get_param('~verbose', False)

        self.max_iters = rospy.get_param('~convergence/max_iters', 100)
        self.iter_counter = 0

        self.optimizer = optgen.GeneticOptimizer(crossover_func=crossover_func,
                                                 mutate_func=mutate_func,
                                                 selection_func=selection_func,
                                                 checker_func=checker_func,
                                                 prob_cx=crossover_prob,
                                                 popsize=run_popsize,
                                                 # elitist=elitist,
                                                 verbose=verbose)
        
        initial_pop = [self.sample_input() for i in range(init_popsize)]
        self.optimizer.initialize(initial_pop)

        self.rounds = []
        self.prog_path = rospy.get_param('~progress_path', None)
        self.out_path = rospy.get_param('~output_path')
开发者ID:Humhu,项目名称:percepto,代码行数:59,代码来源:GeneticOptimization.py


注:本文中的numpy.iterable函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。