当前位置: 首页>>代码示例>>Python>>正文


Python numpy.array_split函数代码示例

本文整理汇总了Python中numpy.array_split函数的典型用法代码示例。如果您正苦于以下问题:Python array_split函数的具体用法?Python array_split怎么用?Python array_split使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了array_split函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: solar_position_numba

def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
                         atmos_refract, numthreads, sst=False):
    """Calculate the solar position using the numba compiled functions
    and multiple threads. Very slow if functions are not numba compiled.
    """
    loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
                         atmos_refract, sst])
    ulength = unixtime.shape[0]
    result = np.empty((6, ulength), dtype=np.float64)
    if unixtime.dtype != np.float64:
        unixtime = unixtime.astype(np.float64)

    if ulength < numthreads:
        pvl_logger.warning('The number of threads is more than the length of' +
                           ' the time array. Only using %s threads.',
                            ulength)
        numthreads = ulength

    if numthreads <= 1:
        pvl_logger.debug('Only using one thread for calculation')
        solar_position_loop(unixtime, loc_args, result)
        return result

    split0 = np.array_split(unixtime, numthreads)
    split2 = np.array_split(result, numthreads, axis=1)
    chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
    # Spawn one thread per chunk
    threads = [threading.Thread(target=solar_position_loop, args=chunk)
               for chunk in chunks]
    for thread in threads:
        thread.start()
    for thread in threads:
        thread.join()
    return result
开发者ID:TylrA,项目名称:pvlib-python,代码行数:34,代码来源:spa.py

示例2: vocode

 def vocode(self, segment_voice, segment_gen):
     """This is the vocoder.  It multiplies the amplitudes of two seperate signals
     to produce a singular response""" 
     temp_final = []
     for j in range(self.num_channels):
         saw_spec = segment_gen[j].make_spectrum()
         input_spec = segment_voice[j].make_spectrum()
     
         input_hs = input_spec.hs
         saw_hs = saw_spec.hs
     
         saw_bands = np.array_split(saw_hs, self.num_bands)
         input_bands = np.array_split(input_hs, self.num_bands)
     
         final_bands = np.empty_like(saw_bands)
         for i in range(self.num_bands):
             amp_multi = np.abs(saw_bands[i])*np.abs(input_bands[i])
             phase_multi = np.angle(saw_bands[i])
             final_bands[i] = amp_multi*(np.cos(phase_multi)+(np.sin(phase_multi)*1j))
             
         temp_final.append(np.ma.concatenate(final_bands).data)
     final_wave = []
     for i in range(len(temp_final)):
         final_wave.append(thinkdsp.Spectrum(hs=temp_final[i], framerate = self.framerate).make_wave())
     output = final_wave[0]
     for i in range(1,len(final_wave)):
         output |= final_wave[i]
     return output
开发者ID:jabb1123,项目名称:Vocoder_project-SigSys,代码行数:28,代码来源:test.py

示例3: distribute_nodes

 def distribute_nodes(self, path_index):
     path = self.paths[path_index]
     if path.type == 'linear':
         digits = int(np.ceil(np.log10(path.ne)))
         base = path.index * 10 ** digits
         energies = np.linspace(path.begin, path.end, path.ne)
         weights = path.weights2 + [1] * (path.ne - 6) + path.weights3
         weights = np.array(weights) * path.int_step
         nids = np.arange(path.ne) + base + 1
     
     elif path.type == 'poles':
         base = path.index * 100
         nids0 = base + 10 + np.arange(path.poles_num) + 1
         nids1 = base + 20 + np.arange(path.poles_num) + 1
         nids = np.append(nids0, nids1)
         energies0 = path.begin + (np.arange(path.poles_num) * 2
                                                     - 1) * np.pi * 1.j
         energies1 = path.end + (np.arange(path.poles_num) * 2
                                                     - 1) * np.pi * 1.j
         weights0 = [-1] * path.poles_num
         weights1 = [1] * path.poles_num
         weights = np.append(weights0, weights1)
     
     loc_nids = np.array_split(nids, self.comm.size)[self.comm.rank]
     loc_energies = np.array_split(energies,
                                      self.comm.size)[self.comm.rank]
     loc_weights = np.array_split(weights, self.comm.size)[self.comm.rank]
     return loc_nids, loc_energies, loc_weights
开发者ID:eojons,项目名称:gpaw-scme,代码行数:28,代码来源:contour.py

示例4: score

    def score(self, X, y):
        """Returns the score obtained for each estimators/data slice couple.

        Parameters
        ----------
        X : array, shape (n_samples, n_features, n_estimators)
            The input samples. For each data slice, the corresponding estimator
            score the prediction: e.g. [estimators[ii].score(X[..., ii], y)
                                        for ii in range(n_estimators)]
        y : array, shape (n_samples,) | (n_samples, n_targets)
            The target values.

        Returns
        -------
        score : array, shape (n_samples, n_estimators)
            Score for each estimator / data slice couple.
        """
        self._check_Xy(X)
        if X.shape[-1] != len(self.estimators_):
            raise ValueError('The number of estimators does not match '
                             'X.shape[2]')
        # For predictions/transforms the parallelization is across the data and
        # not across the estimators to avoid memory load.
        parallel, p_func, n_jobs = parallel_func(_sl_score, self.n_jobs)
        X_splits = np.array_split(X, n_jobs, axis=-1)
        est_splits = np.array_split(self.estimators_, n_jobs)
        score = parallel(p_func(est, x, y)
                         for (est, x) in zip(est_splits, X_splits))

        if n_jobs > 1:
            score = np.concatenate(score, axis=0)
        else:
            score = score[0]
        return score
开发者ID:chrismullins,项目名称:mne-python,代码行数:34,代码来源:search_light.py

示例5: process

    def process(self, data, output, processes, process):
        """
        """
        print "in the process function"
        if data.center_of_rotation is None:
            centre_of_rotation = np.ones(data.get_number_of_sinograms())
            centre_of_rotation = centre_of_rotation * self.parameters["center_of_rotation"]
        else:
            centre_of_rotation = data.center_of_rotation[:]

        if centre_of_rotation is None:
            centre_of_rotation = np.ones(data.get_number_of_sinograms())
            centre_of_rotation = centre_of_rotation * self.parameters["center_of_rotation"]

        sinogram_frames = np.arange(data.get_number_of_sinograms())

        frames = np.array_split(sinogram_frames, len(processes))[process]
        centre_of_rotations = np.array_split(centre_of_rotation, len(processes))[process]

        angles = data.rotation_angle.data[:]

        for i in range(len(frames)):
            frame_centre_of_rotation = centre_of_rotations[i]
            sinogram = data.data[:, frames[i], :]
            reconstruction = self.reconstruct(
                sinogram,
                frame_centre_of_rotation,
                angles,
                (output.data.shape[0], output.data.shape[2]),
                (output.data.shape[0] / 2, output.data.shape[2] / 2),
            )
            output.data[:, frames[i], :] = reconstruction
            self.count += 1
            print self.count
开发者ID:yskashyap,项目名称:Savu,代码行数:34,代码来源:base_recon.py

示例6: filter_params

 def filter_params(self, p_sets, p_fmins, nkeep=5, method='best'):
     # rank inits by costfx error low-to-high
     fmin_series = pd.Series(p_fmins)
     rankorder = fmin_series.sort_values()
     # eliminate extremely bad parameter sets
     rankorder = rankorder[rankorder<=5.0]
     if method=='random':
         # return nkeep from randomly sampled inits
         inits = p_sets[:nkeep]
         inits_err = p_fmins[:nkeep]
     elif method=='best':
         # return nkeep from inits with lowest err
         inits = [p_sets[i] for i in rankorder.index[:nkeep]]
         inits_err = rankorder.values[:nkeep]
     elif method=='lmh':
         # split index for low, med, and high err inits
         # if nkeep is odd, will sample more low than high
         if nkeep<3: nkeep=3
         ix = rankorder.index.values
         nl, nm, nh = [arr.size for arr in np.array_split(np.arange(nkeep), 3)]
         # extract indices roughly equal numbers of parameter sets with low, med, hi err
         keep_ix = np.hstack([ix[:nl], np.array_split(ix,2)[0][-nm:], ix[-nh:]])
         inits = [p_sets[i] for i in keep_ix]
         inits_err = [fmin_series[i] for i in keep_ix]
     return inits, np.min(inits_err)
开发者ID:dunovank,项目名称:radd_kd,代码行数:25,代码来源:theta.py

示例7: transform

    def transform(self, pts, verbose=None):
        """Apply the warp.

        Parameters
        ----------
        pts : shape (n_transform, 3)
            Source points to warp to the destination.

        Returns
        -------
        dest : shape (n_transform, 3)
            The transformed points.
        """
        logger.info('Transforming %s points' % (len(pts),))
        from scipy.spatial.distance import cdist
        assert pts.shape[1] == 3
        # for memory reasons, we should do this in ~100 MB chunks
        out = np.zeros_like(pts)
        n_splits = max(int((pts.shape[0] * self._destination.shape[0]) /
                           (100e6 / 8.)), 1)
        for this_out, this_pts in zip(np.array_split(out, n_splits),
                                      np.array_split(pts, n_splits)):
            dists = _tps(cdist(this_pts, self._destination, 'sqeuclidean'))
            L = np.hstack((dists, np.ones((dists.shape[0], 1)), this_pts))
            this_out[:] = np.dot(L, self._weights)
        assert not (out == 0).any()
        return out
开发者ID:jhouck,项目名称:mne-python,代码行数:27,代码来源:transforms.py

示例8: split_data

def split_data(ras, decs):
	"""
	It will split the RAs and DECs into smaller chunks which would be better
	for cache coherent
	"""
	size = ceil(len(ras)/256.0)
	return zip(array_split(ras, size), array_split(decs, size))
开发者ID:DarwinSenior,项目名称:COSMO,代码行数:7,代码来源:binSortCountMapReduce.py

示例9: parallelMorton

def parallelMorton(iMortonRanges, xMortonRanges, childMethod, numProcessesQuery):
    if iMortonRanges != None:
        numMRanges = max((len(iMortonRanges), len(xMortonRanges)))
        if numMRanges > numProcessesQuery:
            numChunks = numProcessesQuery
        else:
            numChunks = numMRanges
        ichunks = numpy.array_split(iMortonRanges, numChunks)
        xchunks = numpy.array_split(xMortonRanges, numChunks)
    else:
        numMRanges = len(xMortonRanges)
        if numMRanges > numProcessesQuery:
            numChunks = numProcessesQuery
        else:
            numChunks = numMRanges
        ichunks = numpy.array_split([], numChunks)
        xchunks = numpy.array_split(xMortonRanges, numChunks)
    children = []
    for i in range(numChunks):
        children.append(multiprocessing.Process(target=childMethod, 
            args=(ichunks[i],xchunks[i])))
        children[-1].start()  
    # wait for all children to finish their execution
    for i in range(numChunks):
        children[i].join()
开发者ID:ZheLI0319,项目名称:pointcloud-benchmark,代码行数:25,代码来源:dbops.py

示例10: gp2

def gp2(data, block_size = 100, nugget = 0.005):

	c = data[0]
	s = data[1]
	s_2 = np.array_split(s, len(s)/block_size + 1)
	c_2 = np.array_split(c, len(s)/block_size + 1)
	
	sapflux_pred = []
	
	nug = nugget;
	for a in range(0,len(s_2)):
	
		t0 = time.time()
		X = np.atleast_2d(c_2[a]).T
		y = np.atleast_2d(s_2[a]).T
	
		gproc = gaussian_process.GaussianProcess(theta0=0.01, thetaL=1e-4, thetaU=1e-1,nugget=nug)
	
		
		gproc.fit(X, y)
		y_pred, sigma2_pred = gproc.predict(X, eval_MSE=True)
		sapflux_pred.extend(y_pred.ravel())
		t1 = time.time()
		print t1-t0
	
	return np.array([c, s, np.array(sapflux_pred)])
开发者ID:dmuley,项目名称:lc-simulation,代码行数:26,代码来源:pipeline.py

示例11: ensemble_maker_inner

def ensemble_maker_inner(train_mat,labels,model_gen_function, info_dict,num=10):
    ## contains core functions to make ensemble models
    ## from training data and labels
    ## model_gen_function is a functiont that takes NO arguments and returns a keras model
    ## info_dict is a dictionary of training info 
    train_mat, labels = shuffle(train_mat, labels)
    train_mat = np.array_split(train_mat, num, axis=0)
    labels = np.array_split(labels, num, axis=0)
    earlystop = EarlyStopping(monitor=info_dict['monitor'], min_delta=info_dict['min_delta'],
                              patience=info_dict['patience'],
                              verbose=0,
                              mode='auto')
    callbacks_list = [earlystop]
    model_list = []
    for ii in range(num):
        train_feature = array_stack(train_mat, ii)
        train_labels = array_stack(labels, ii)
        loaded_model = model_gen_function() # note the call to gen new model
        current_model = reset_weights(loaded_model)
        history = current_model.fit(train_feature, train_labels,
                                    epochs=info_dict['epochs'], verbose=0,
                                    batch_size=info_dict['batch_size'],
                                    callbacks=callbacks_list)
        model_list.append(current_model)
    return(model_list)
开发者ID:hjkgrp,项目名称:molSimplify,代码行数:25,代码来源:ensemble_test.py

示例12: generateTrainAndTest

    def generateTrainAndTest(self):
        """
        Generate train and test data and then yield
        :return:
        """
        partitions = np.array_split(self.dataset, self.numOfFolds)
        labels_partitions = np.array_split(self.labels, self.numOfFolds)
        for fold in range(self.numOfFolds):
            self.test = partitions[fold]
            self.labels_test = labels_partitions[fold]

            fold_left = partitions[:fold]
            fold_right = partitions[fold + 1:]

            labels_fold_left = labels_partitions[:fold]
            labels_fold_right = labels_partitions[fold + 1:]

            if fold_left.__len__() == 0:
                self.train = np.concatenate(fold_right)
                self.labels_train = np.concatenate(labels_fold_right)
            elif fold_right.__len__() == 0:
                self.train = np.concatenate(fold_left)
                self.labels_train = np.concatenate(labels_fold_left)
            else:
                self.train = np.concatenate((np.concatenate(fold_left), np.concatenate(fold_right)))
                self.labels_train = np.concatenate(
                        (np.concatenate(labels_fold_left), np.concatenate(labels_fold_right)))
            yield
开发者ID:mehtadeepen,项目名称:Image-Classification-in-ML,代码行数:28,代码来源:CrossValidation.py

示例13: get_gradient

def get_gradient(theta):
    global fractional_counts, event_index, feature_index, event_grad, rc, N
    assert len(theta) == len(feature_index)
    event_grad = {}
    cpu_count = multiprocessing.cpu_count()
    pool = Pool(processes=cpu_count)  # uses all available CPUs
    batches_fractional_counts = np.array_split(range(len(event_index)), cpu_count)
    events_to_split = events_to_features.keys()
    batches_events_to_features = np.array_split(events_to_split, cpu_count)
    # for batch_of_fc in batches_fractional_counts:
    for batch_of_fc in batches_events_to_features:
        pool.apply_async(batch_gradient, args=(theta, batch_of_fc), callback=batch_accumilate_gradient)
    pool.close()
    pool.join()
    # grad = np.zeros_like(theta)
    grad = -2 * rc * theta  # l2 regularization with lambda 0.5
    for e in event_grad:
        feats = events_to_features.get(e, [])
        for f in feats:
            grad[feature_index[f]] += event_grad[e]

    # for s in seen_index:
    # grad[s] += -theta[s]  # l2 regularization with lambda 0.5
    assert len(grad) == len(feature_index)
    return -grad
开发者ID:arendu,项目名称:Featurized-Word-Alignment,代码行数:25,代码来源:hybrid_model1_mp.py

示例14: make_batches

def make_batches(x, y, batch_size=128, shuffle=True, nest=True):
    for i in range(len(x)):
        x[i] = atleast_4d(x[i])
    y = atleast_4d(y)
    num_batches = (y.shape[0] // batch_size)
    if y.shape[0] % batch_size is not 0:
        num_batches += 1
    if shuffle:
        shuffled_arrays = sk.utils.shuffle(*x, y)
        x = shuffled_arrays[:len(x)]
        y = shuffled_arrays[-1]
    x_batches_list = []
    for i in range(len(x)):
        x_batches_list.append(np.array_split(x[i], num_batches))
    if nest:
        x_batches = []
        for i in range(num_batches):
            x_batch = []
            for x_input in x_batches_list:
                x_batch.append(x_input[i])
            x_batches.append(x_batch)
    else:
        x_batches = x_batches_list
    y_batches = np.array_split(y, num_batches)
    return x_batches, y_batches, num_batches
开发者ID:wanqizhu,项目名称:Neural-Network-Dev,代码行数:25,代码来源:Core.py

示例15: ModelSelectionTest01

def ModelSelectionTest01():
	from sklearn import datasets, svm
	import numpy as np
	digits = datasets.load_digits()
	X_digits = digits.data
	Y_digits = digits.target
	svc = svm.SVC(C = 1, kernel = 'linear')
	score = svc.fit(X_digits[:-100], Y_digits[:-100]).score(X_digits[-100:], Y_digits[-100:])

	#print score

	X_folds = np.array_split(X_digits, 3)
	Y_folds = np.array_split(Y_digits, 3)

	#print len(X_folds[0])

	scores = list()

	for k in range(3):
		X_train = list(X_folds) #这里的X_folds是一个具有3个元素的list
		X_test = X_train.pop(k) #test是train的第K个元素
		X_train = np.concatenate(X_train) #这里是把X_train减去X_test
		#print len(X_train)
		Y_train = list(Y_folds)
		Y_test = Y_train.pop(k)
		Y_train = np.concatenate(Y_train)

		scores.append(svc.fit(X_train, Y_train).score(X_test, Y_test))

	#print scores


	from sklearn import cross_validation
	k_fold = cross_validation.KFold(n = 6, n_folds = 3)
	for train_indices, test_indices in k_fold:
		print train_indices, test_indices

	k_fold = cross_validation.KFold(len(X_digits), n_folds = 3)
	scores = [svc.fit(X_digits[train], Y_digits[train]).score(X_digits[test], Y_digits[test]) for train , test in k_fold]

	#print scores

	scores = cross_validation.cross_val_score(svc, X_digits, Y_digits, cv = k_fold, n_jobs = 1)
	#print scores

	from sklearn.grid_search import GridSearchCV
	gammas = np.logspace(-6, -1, 10)
	clf = GridSearchCV(estimator = svc, param_grid = dict(gamma = gammas), n_jobs = 1)
	clf.fit(X_digits[:1000], Y_digits[:1000])
	print clf.best_score_
	print clf.best_estimator_.gamma

	from sklearn import linear_model, datasets
	lasso = linear_model.LassoCV()    #这里的lassoCV和lasso有什么区别?
	diabetes = datasets.load_diabetes()
	X_diabetes = diabetes.data
	Y_diabetes = diabetes.target
	lasso.fit(X_diabetes, Y_diabetes)

	print lasso.alpha_
开发者ID:hyliu0302,项目名称:scikit-learn-notes,代码行数:60,代码来源:myScikitLearnFcns.py


注:本文中的numpy.array_split函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。