当前位置: 首页>>代码示例>>Python>>正文


Python numpy.ix_函数代码示例

本文整理汇总了Python中numpy.ix_函数的典型用法代码示例。如果您正苦于以下问题:Python ix_函数的具体用法?Python ix_怎么用?Python ix_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了ix_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Barabasi_Albert

def Barabasi_Albert(m0, m, N):
#   if m > m0:
#       raise ValueError('m must be smaller than or equal to m0')
#   # initial graph
#   Graph = [Node() for _ in range(m0)]
#   for (ix, node) in enumerate(Graph):
#       node.connect(Graph[ix + 1:])
#   degrees = np.array([node.degree for node in Graph])
#   cum_degrees = np.float(np.cumsum(degrees)) / np.sum(degrees)
    K = np.eye(N, dtype=np.bool)
    K[np.ix_(np.arange(m0), np.arange(m0))] = True
    for ix in np.arange(m0, N):
        selected = np.zeros((ix,), dtype=np.bool)
        for conn in np.arange(m):
            free = np.logical_not(selected)
            p = np.array(np.sum(K[..., free], axis=0), dtype=np.float)
            cdf = np.cumsum(p) / np.sum(p)
            r = np.random.uniform()
            link = np.where(np.logical_and(r < cdf,
                                           np.logical_not(r >= cdf)))
            K[ix, free[link]] = True
            K[free[link], ix] = True
            selected[free[link]] = True
    rp = np.random.permutation(N)
    return K[np.ix_(rp, rp)]
开发者ID:rphlypo,项目名称:connectivity,代码行数:25,代码来源:graph_models.py

示例2: test_chol_add_remove

def test_chol_add_remove():
    N = 5
    X = np.random.randn(10,N)
    A = X.T.dot(X)
    L = np.linalg.cholesky(A)

    Am = A[:-1,:-1]
    bm = A[:-1,-1]
    cm = A[-1,-1]
    Lm = np.linalg.cholesky(Am)

    # Get chol by adding row
    assert np.allclose(L, chol_add_row(Lm, bm, cm))

    # Now get chol by removing a row
    def to_range(start, stop):
        return np.setdiff1d(np.arange(N), np.arange(start,stop))
    assert np.allclose(
        np.linalg.cholesky(A[np.ix_(to_range(4,5),
                                    to_range(4,5))]),
                           chol_remove_row(L,4,5))

    assert np.allclose(
        np.linalg.cholesky(A[np.ix_(to_range(1,3),
                                    to_range(1,3))]),
                           chol_remove_row(L,1,3))
开发者ID:sheqi,项目名称:pyglm,代码行数:26,代码来源:test_linalg.py

示例3: subset

    def subset(self, variables=None, samples=None):
        """Returns a subset of the dataset (and metadata).
        
        Specify the variables and samples for creating a subset of the data.
        variables and samples should be a list of ids. If not specified, it is
        assumed to be all variables or samples. 

        Some examples:
        
            - d.subset([3], [4])
            - d.subset([3,1,2])
            - d.subset(samples=[5,2,7,1])
        
        Note: order matters! d.subset([3,1,2]) != d.subset([1,2,3])

        """

        variables = variables if variables is not None else range(self.variables.size)
        samples = samples if samples is not None else range(self.samples.size)
        skip_stats = not (self.has_interventions or self.has_missing)
        d = Dataset(
            self.observations[N.ix_(samples,variables)],
            self.missing[N.ix_(samples,variables)],
            self.interventions[N.ix_(samples,variables)],
            self.variables[variables],
            self.samples[samples],
            skip_stats = skip_stats
        )
        
        # if self does not have interventions or missing, the subset can't.
        if skip_stats:
            d._has_interventions = False
            d._has_missing = False

        return d
开发者ID:Alwnikrotikz,项目名称:pebl-project,代码行数:35,代码来源:data.py

示例4: _safe_split

def _safe_split(estimator, X, y, indices, train_indices=None):
    """Create subset of dataset and properly handle kernels."""
    from ..gaussian_process.kernels import Kernel as GPKernel

    if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and
            not isinstance(estimator.kernel, GPKernel)):
        # cannot compute the kernel values with custom function
        raise ValueError("Cannot use a custom kernel function. "
                         "Precompute the kernel matrix instead.")

    if not hasattr(X, "shape"):
        if getattr(estimator, "_pairwise", False):
            raise ValueError("Precomputed kernels or affinity matrices have "
                             "to be passed as arrays or sparse matrices.")
        X_subset = [X[index] for index in indices]
    else:
        if getattr(estimator, "_pairwise", False):
            # X is a precomputed square kernel matrix
            if X.shape[0] != X.shape[1]:
                raise ValueError("X should be a square kernel matrix")
            if train_indices is None:
                X_subset = X[np.ix_(indices, indices)]
            else:
                X_subset = X[np.ix_(indices, train_indices)]
        else:
            X_subset = safe_indexing(X, indices)

    if y is not None:
        y_subset = safe_indexing(y, indices)
    else:
        y_subset = None

    return X_subset, y_subset
开发者ID:IsaacHaze,项目名称:scikit-learn,代码行数:33,代码来源:metaestimators.py

示例5: _split

def _split(estimator, X, y, indices, train_indices=None):
    """Create subset of dataset."""
    if hasattr(estimator, 'kernel') and callable(estimator.kernel):
        # cannot compute the kernel values with custom function
        raise ValueError("Cannot use a custom kernel function. "
                         "Precompute the kernel matrix instead.")

    if not hasattr(X, "shape"):
        if getattr(estimator, "_pairwise", False):
            raise ValueError("Precomputed kernels or affinity matrices have "
                             "to be passed as arrays or sparse matrices.")
        X_subset = [X[idx] for idx in indices]
    else:
        if getattr(estimator, "_pairwise", False):
            # X is a precomputed square kernel matrix
            if X.shape[0] != X.shape[1]:
                raise ValueError("X should be a square kernel matrix")
            if train_indices is None:
                X_subset = X[np.ix_(indices, indices)]
            else:
                X_subset = X[np.ix_(indices, train_indices)]
        else:
            X_subset = X[safe_mask(X, indices)]

    if y is not None:
        y_subset = y[safe_mask(y, indices)]
    else:
        y_subset = None

    return X_subset, y_subset
开发者ID:Idan-M,项目名称:scikit-learn,代码行数:30,代码来源:grid_search.py

示例6: runTest

    def runTest(self):
        F=lambda x,y: 100.0*((x>=0.4)&(x<=0.6)&(y>=0.4)&(y<=0.6))
        G=lambda x,y: (y==0)*1.0+(y==1)*(-1.0)

        a=fasm.AssemblerElement(self.mesh,felem.ElementTriP1())

        dudv=lambda du,dv: du[0]*dv[0]+du[1]*dv[1]
        K=a.iasm(dudv)

        uv=lambda u,v: u*v
        B=a.fasm(uv)
        
        fv=lambda v,x: F(x[0],x[1])*v
        f=a.iasm(fv)

        gv=lambda v,x: G(x[0],x[1])*v
        g=a.fasm(gv)

        D=np.nonzero(self.mesh.p[0,:]==0)[0]
        I=np.setdiff1d(np.arange(0,self.mesh.p.shape[1]),D)

        x=np.zeros(K.shape[0])
        x[I]=scipy.sparse.linalg.spsolve(K[np.ix_(I,I)]+B[np.ix_(I,I)],
                                         f[I]+g[I])

        self.assertAlmostEqual(np.max(x),1.89635971369,places=2)
开发者ID:kinnala,项目名称:sp.fem,代码行数:26,代码来源:test_asm.py

示例7: penalty_function

def penalty_function(vocab_indices, summary_indices, sentence_similarity, config):
    """
        This is the penalty function that is described in the paper
        Graph-Based Submodular selection for extractive Summarization
    Args:
        vocab_indices: list
        summary_indices: list
        sentence_similarity: ndarray
        config: dictionary
        Some of the methods require some hyper parameters
        to be set

        This penalises redundancy
    Returns: The value of the graph cut function
    """
    penalty_lambda = config["penalty_lambda"]
    sentence_similartiy_ = np.copy(sentence_similarity)
    np.fill_diagonal(sentence_similartiy_, 0.0)

    if len(summary_indices) == 0:
        fn_value = 0.0
    else:
        v_not_in_s = list(set(vocab_indices) - set(summary_indices))
        rows = v_not_in_s
        cols = summary_indices
        # USING THE ADVANCED INDEXING OF THE NUMPY ARRAY
        fn_value = np.sum(sentence_similarity[np.ix_(rows, cols)]) - \
                   penalty_lambda * np.sum(sentence_similartiy_[np.ix_(summary_indices, summary_indices)])

    return fn_value
开发者ID:abhinavkashyap92,项目名称:extractive_summarisation,代码行数:30,代码来源:sub_modular_functions.py

示例8: load_weight_files

def load_weight_files(weights_files, genes, patients, typeToGeneIndex, typeToPatientIndex, masterGeneToIndex, masterPatientToIndex):
    # Master matrix of all weights
    P = np.zeros((len(genes), len(patients)))
    for i, weights_file in enumerate(weights_files):
        # Load the weights matrix for this cancer type and update the entries appropriately.
        # Note that since genes/patients can be measured in multiple types, we need to map
        # each patient to the "master" index.
        type_P                 = np.load(weights_file)

        ty_genes               = set(typeToGeneIndex[i].keys()) & genes
        ty_gene_indices        = [ typeToGeneIndex[i][g] for g in ty_genes ]
        master_gene_indices    = [ masterGeneToIndex[g] for g in ty_genes ]

        ty_patients            = set(typeToPatientIndex[i].keys()) & patients
        ty_patient_indices     = [ typeToPatientIndex[i][p] for p in ty_patients ]
        master_patient_indices = [ masterPatientToIndex[p] for p in ty_patients ]

        master_mesh            = np.ix_(master_gene_indices, master_patient_indices)
        ty_mesh                = np.ix_(ty_gene_indices, ty_patient_indices)

        if np.any( P[master_mesh] > 0 ):
            raise ValueError("Different weights for same gene-patient pair")
        else:
            P[ master_mesh ] = type_P[ ty_mesh  ]

    # Set any zero entries to the minimum (pseudocount). The only reason for zeros is if
    #  a gene wasn't mutated at all in a particular dataset.
    P[P == 0] = np.min(P[P > 0])

    return dict( (g, P[masterGeneToIndex[g]]) for g in genes )
开发者ID:raphael-group,项目名称:wext,代码行数:30,代码来源:find_sets.py

示例9: normal_eq_comb

def normal_eq_comb(AtA, AtB, PassSet = None):
    num_cholesky = 0
    num_eq = 0
    if AtB.size == 0:
        Z = np.zeros([])

    elif (PassSet is None) or np.all(PassSet):
        Z = nla.solve(AtA, AtB)
        num_cholesky = 1
        num_eq = AtB.shape[1]

    else:
        Z = np.zeros(AtB.shape) #(n, k)
        if PassSet.shape[1] == 1:
            if np.any(PassSet):
                cols = np.nonzero(PassSet)[0]
                Z[cols] = nla.solve(AtA[np.ix_(cols, cols)], AtB[cols])
                num_cholesky = 1
                num_eq = 1
        else:
            groups = column_group(PassSet)

            for g in groups:
                cols = np.nonzero(PassSet[:, g[0]])[0]

                if cols.size > 0:
                    ix1 = np.ix_(cols, g)
                    ix2 = np.ix_(cols, cols)

                    Z[ix1] = nla.solve(AtA[ix2], AtB[ix1])
                    num_cholesky += 1
                    num_eq += len(g)
                    num_eq += len(g)
    return Z, num_cholesky, num_eq
开发者ID:crcrpar,项目名称:DataAnalysis,代码行数:34,代码来源:function.py

示例10: classify_binomial

def classify_binomial(x, data, counts, y):
    classes, y = np.unique(y, return_inverse=True)
    max_label = None
    max = None
    for class_label in np.nditer(classes):
        class_examples = data[np.ix_(y == class_label)]
        class_counts = counts[np.ix_(y == class_label)]
        total_class_counts = sum(class_counts)
        alfas = (class_examples.sum(axis=0) + 0.01)/(total_class_counts + 0.01)

        prior = len(class_examples) / len(data)
        membership = getMembershipBinomial(x,alfas, prior, class_counts, total_class_counts)
        if(max_label is None):
            max_label = class_label
            max = membership
        else:
            if(class_label == 0):
                if membership>max:
                    max = membership
                    max_label = class_label
            else:
                if membership>(max+8.5):
                    max = membership
                    max_label = class_label
    return max_label
开发者ID:marrosenfeld,项目名称:Machine-Learning,代码行数:25,代码来源:naive_bayes.py

示例11: conditional

    def conditional(self, in_dims, out_dims):
        conditionals = []

        for k, (weight_k, mean_k, covar_k) in enumerate(self):
            conditionals.append(conditional(mean_k, covar_k,
                                            in_dims, out_dims,
                                            self.covariance_type))

        cond_weights = lambda v: [(weight_k * Gaussian(mean_k[in_dims].reshape(-1,),
                                  covar_k[ix_(in_dims, in_dims)]).normal(v.reshape(-1,)))
                                  for k, (weight_k, mean_k, covar_k) in enumerate(self)]

        def res(v):
            gmm = GMM(n_components=self.n_components,
                      covariance_type=self.covariance_type,
                      random_state=self.random_state, thresh=self.thresh,
                      min_covar=self.min_covar, n_iter=self.n_iter, n_init=self.n_init,
                      params=self.params, init_params=self.init_params)
            gmm.weights_ = cond_weights(v)
            means_covars = [f(v) for f in conditionals]
            gmm.means_ = array([mc[0] for mc in means_covars]).reshape(self.n_components,
                                                                       -1)
            gmm._set_covars(array([mc[1] for mc in means_covars]))
            return gmm

        return res

        self.in_dims = array(in_dims)
        self.out_dims = array(out_dims)
        means = zeros((self.n_components, len(out_dims)))
        covars = zeros((self.n_components, len(out_dims), len(out_dims)))
        weights = zeros((self.n_components,))
        sig_in = []
        inin_inv = []
        out_in = []
        mu_in = []
        for k, (weight_k, mean_k, covar_k) in enumerate(self):
            sig_in.append(covar_k[ix_(in_dims, in_dims)])
            inin_inv.append(matrix(sig_in).I)
            out_in.append(covar_k[ix_(out_dims, in_dims)])
            mu_in.append(mean_k[in_dims].reshape(-1, 1))

            means[k, :] = (mean_k[out_dims] +
                           (out_in *
                            inin_inv *
                            (value - mu_in)).T)

            covars[k, :, :] = (covar_k[ix_(out_dims, out_dims)] -
                               out_in *
                               inin_inv *
                               covar_k[ix_(in_dims, out_dims)])
            weights[k] = weight_k * Gaussian(mu_in.reshape(-1,),
                                             sig_in).normal(value.reshape(-1,))
        weights /= sum(weights)

        def p(value):
            # hard copy of useful matrices local to the function
            pass

        return p
开发者ID:flowersteam,项目名称:explauto,代码行数:60,代码来源:gmminf.py

示例12: dctt1

def dctt1(a):
    """ dct  Discrete cosine transform.
    y = dct(a) returns the discrete cosine transform of a.
    The vector y is the same size as `a` and contains the
    discrete cosine transform coefficients.
    """
    if len(a.shape)==1:
        a = a.reshape(a.size,1)
    n,m = a.shape
    aa = a[:,:]
    #Compute weights to multiply DFT coefficients
    ww = arrayexp(n)
    if n%2 == 1:
        y = np.zeros([2*n,m])
        y[:n,:] = aa
        y[n:2*n,:] = np.flipud(aa)
        # Compute the FFT and keep the appropriate portion:
        yy = np.fft.fft(y,axis=0)
        yy = yy[:n,:]
    else:
        # Re-order the elements of the columns of x
        y = np.concatenate((aa[np.ix_(range(0,n,2))],\
                            aa[np.ix_(range(1,n,2)[::-1])]), axis=0)
        yy = np.fft.fft(y,axis=0)
        ww = 2*ww  # Double the weights for even-length case 

    wy = np.empty([n,m], complex)
    for j in range(m):
        wy[:,j]  = ww
    # Multiply FFT by weights:
    b = np.multiply(wy,yy)
    
    return b[:n,:m].real
开发者ID:modeha,项目名称:lsq_solver,代码行数:33,代码来源:dctt.py

示例13: comp

        def comp(self,mean,var,covar,resp):

            # Store the indices to the missing and observed responses.
            miss=numpy.isnan(resp)
            obs=numpy.logical_not(miss)

            if miss.all():
                return mean,var,covar

            # Store the size of the model.
            numresp,numpred=numpy.size(self.gain)

            kalmgain=numpy.eye(numresp)
            josgain=numpy.eye(numresp)

            # Fill in the Kalman and Joseph gain matrices.
            ind=numpy.ix_(miss,obs)
            kalmgain[ind]=linalg.solve(self.noise[numpy.ix_(obs,obs)],
                self.noise[ind].transpose()).transpose()
            josgain[:,obs]=josgain[:,obs]-kalmgain[:,obs]

            # Compute the predictor/response co-variance.
            covar=covar.dot(josgain.transpose())

            # Condition the response mean/variance on the observations.
            mean=josgain.dot(mean)+numpy.dot(kalmgain[:,obs],resp[obs])
            var=numpy.dot(josgain,numpy.dot(var,josgain.transpose()))

            return mean,var,covar
开发者ID:gabrieag,项目名称:glds,代码行数:29,代码来源:glds.py

示例14: consgpattern

        def consgpattern():
            """
            binary pattern of the sparse nonlinear constraint gradient

            """

            vfdxpat = ( self.vfielddxpattern
                        if self.vfielddxpattern is not None
                        else np.ones( (self.Nstates,self.Nstates) ) )
            vfdupat = ( self.vfielddupattern
                        if self.vfielddupattern is not None
                        else np.ones( (self.Nstates,self.Ninputs) ) )
            if( self.Ncons > 0 ):
                consdxpat = ( self.consdxpattern
                              if self.consdxpattern is not None
                              else np.ones( (self.Ncons,self.Ninputs) ) )

            out = np.zeros( ( feuler.Ncons, feuler.N ), dtype=np.int )

            for k in range( Nsamples ):
                out[ np.ix_( dconsidx[:,k+1], stidx[:,k] ) ] = vfdxpat
                out[ np.ix_( dconsidx[:,k+1], uidx[:,k] ) ] = vfdupat

                if( self.Ncons > 0 ):
                    out[ np.ix_( iconsidx[:,k], stidx[:,k+1] ) ] = consdxpat

            return out
开发者ID:hgonzale,项目名称:optwrapper,代码行数:27,代码来源:ocp.py

示例15: get_corr_pred

    def get_corr_pred( self, sctx, u, du, tn, tn1 ):

        n_ip_arr, ip_coords_arr, ip_weights_arr = self.ip_scheme

        self.F_int[:] = 0.0
        self.k_arr[...] = 0.0

        B_mtx_grid = None
        J_det_grid = None

        ip_offset = 0
        k_list = []
        for e_id, ( elem, n_ip ) in enumerate( zip( self.sdomain.elements, n_ip_arr ) ):
            ip_coords = ip_coords_arr[ ip_offset : ip_offset + n_ip ]
            ip_weights = ip_weights_arr[ ip_offset : ip_offset + n_ip ]
            ix = elem.get_dof_map()
            sctx.elem = elem
            sctx.elem_state_array = self.state_array[ ip_offset : ip_offset + n_ip ].flatten()
            sctx.X = elem.get_X_mtx()
            if self.cache_geo_matrices:
                B_mtx_grid = self.B_mtx_grid[ e_id, ... ]
                J_det_grid = self.J_det_grid[ e_id, ... ]
            f, k = self.fets_eval.get_corr_pred( sctx, u[ix_( ix )], du[ix_( ix )],
                                                 tn, tn1,
                                                 B_mtx_grid = B_mtx_grid,
                                                 J_det_grid = J_det_grid,
                                                 ip_coords = ip_coords,
                                                 ip_weights = ip_weights )

            self.k_arr[ e_id ] = k
            self.F_int[ ix_( ix ) ] += f
            ip_offset += n_ip

        return self.F_int, SysMtxArray( mtx_arr = self.k_arr, dof_map_arr = self.sdomain.elem_dof_map )
开发者ID:simvisage,项目名称:simvisage,代码行数:34,代码来源:dots_unstructured_eval.py


注:本文中的numpy.ix_函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。