当前位置: 首页>>代码示例>>Python>>正文


Python scipy.var函数代码示例

本文整理汇总了Python中scipy.var函数的典型用法代码示例。如果您正苦于以下问题:Python var函数的具体用法?Python var怎么用?Python var使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了var函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: infer_diag_post

 def infer_diag_post(self,X_ii,D_i):
     
     X_i = dc(X_ii)
     ns = len(D_i)
     
     X_i.resize([ns,self.D])
     [m,V] = self.infer_diag(X_i,D_i)
     if sp.amin(V)<=-0.:
         class MJMError(Exception):
             pass
         print "negative/eq variance"
         print [m,V,X_i,D_i]
         print "_______________"
         #self.printc()
         raise(MJMError)
     if sp.amin(sp.var(m,axis=0))<-0.:
         class MJMError(Exception):
             pass
         print "negativevar of mean"
         print X_i.shape
         print [m,V,sp.var(m,axis=0),X_i,D_i]
         print "_______________"
         #self.printc()
         raise(MJMError)
     
     return [sp.mean(m,axis=0).reshape([1,ns]),(sp.mean(V,axis=0)+sp.var(m,axis=0)).reshape([1,ns])]
开发者ID:markm541374,项目名称:GPc,代码行数:26,代码来源:GPdc.py

示例2: generate_train_test_phenotypes

def generate_train_test_phenotypes(betas, train_snps, test_snps, h2=0.01):
    """
    Generate genotypes given betas and SNPs
    """
    (m, n) = train_snps.shape
    (test_m, test_n) = test_snps.shape
    assert len(betas) == m == test_m, 'WTF?'
    
    #Training phenotypes
    phen_noise = stats.norm.rvs(0, sp.sqrt(1.0 - h2), size=n) 
    phen_noise = sp.sqrt((1.0 - h2) / sp.var(phen_noise)) * phen_noise
    genetic_part = sp.dot(train_snps.T, betas)
    genetic_part = sp.sqrt(h2 / sp.var(genetic_part)) * genetic_part
    train_phen = genetic_part + phen_noise
#        print 'Herit:', sp.var(genetic_part) / sp.var(train_phen)        
    ret_dict = {}
    ret_dict['phen'] = train_phen
    betas_marg = (1. / n) * sp.dot(train_phen, train_snps.T)
    ret_dict['betas_marg'] = betas_marg
    
    #Testing phenotypes
    phen_noise = stats.norm.rvs(0, sp.sqrt(1.0 - h2), size=test_n) 
    phen_noise = sp.sqrt((1.0 - h2) / sp.var(phen_noise)) * phen_noise
    genetic_part = sp.dot(test_snps.T, betas)
    genetic_part = sp.sqrt(h2 / sp.var(genetic_part)) * genetic_part
    test_phen = genetic_part + phen_noise
    ret_dict['test_phen'] = test_phen
    return ret_dict
开发者ID:bvilhjal,项目名称:phensim,代码行数:28,代码来源:phenotypes.py

示例3: generate_test_data_w_sum_stats

def generate_test_data_w_sum_stats(h2=0.5, n=100000, n_sample=100, m=50000, model='gaussian', 
                                         p=1.0, conseq_r2=0, m_ld_chunk_size=100):
    """
    Generate 
    """
    #Get LD sample matrix
    D_sample = genotypes.get_sample_D(200,conseq_r2=conseq_r2,m=m_ld_chunk_size)
    
    #Simulate beta_hats
    ret_dict = simulate_beta_hats(h2=h2, n=n, n_sample=n_sample, m=m, model=model, p=p, 
                                    conseq_r2=conseq_r2, m_ld_chunk_size=m_ld_chunk_size, D_sample=D_sample)
    
    #Simulate test genotypes
    test_snps = genotypes.simulate_genotypes_w_ld(n_sample=n_sample, m=m, conseq_r2=conseq_r2, 
                                                  m_ld_chunk_size=m_ld_chunk_size)
    ret_dict['test_snps'] = test_snps
    
    #Simulate test phenotypes
    phen_noise = stats.norm.rvs(0, sp.sqrt(1.0 - h2), size=n_sample) 
    phen_noise = sp.sqrt((1.0 - h2) / sp.var(phen_noise)) * phen_noise
    genetic_part = sp.dot(test_snps.T, ret_dict['betas'])
    genetic_part = sp.sqrt(h2 / sp.var(genetic_part)) * genetic_part
    test_phen = genetic_part + phen_noise
    ret_dict['test_phen'] = test_phen
    return ret_dict
开发者ID:bvilhjal,项目名称:phensim,代码行数:25,代码来源:phenotypes.py

示例4: _sqr_transform

 def _sqr_transform(self,  method='standard'):
     a = sp.array(self.values)
     if method == 'standard':
         vals = ((a - min(a)) + 0.1 * sp.var(a)) * ((a - min(a)) + 0.1 * sp.var(a))
     else:
         vals = a * a
     self._perform_transform(vals,"sqr")
     return True
开发者ID:timeu,项目名称:PyGWAS,代码行数:8,代码来源:phenotype.py

示例5: prnt

def prnt(filename, type, duration, run):
    sptp = EL.G.sig_per_turtle_p  # Sig per turtle based on percent of total significance
    sptn = EL.G.sig_per_turtle_n  # Sig per turtle based on number of significant patches visited
    open(filename, 'a').write(str(type) + ',' + str(EL.G.NUM_TURTLES[type]) + \
        ',' + str(duration) + ',' + str(run) + ',' + \
        str(EL.G.epprog) + ',' + str(EL.G.total_prog) + ',' + str(EL.G.percent_progress) + \
        ',' + str(EL.G.agents_peak) + ',' + str(EL.G.agents_hill) + ',' + str(EL.G.wasted_effort) + \
        ',,' + str(min(sptp)) + ',' + str(max(sptp)) + ',' + str(mean(sptp)) + ',' + str(median(sptp)) + \
        ',' + str(var(sptp)) + ',' + str(skew(sptp)) + ',,' + str(min(sptn)) + ',' + str(max(sptn)) + \
        ',' + str(mean(sptn)) + ',' + str(median(sptn)) + ',' + str(var(sptn)) + ',' + str(skew(sptn)) + '\n')
开发者ID:brettc,项目名称:epistemic-landscapes,代码行数:10,代码来源:Batch_Binary.py

示例6: ftest

def ftest(X, Y):
    ''' F-test to test variance equality.
    :param X: data 1
    :param Y: data 2
    :return: f and p-value of F-test
    '''
    F = scipy.var(X) / scipy.var(Y)
    df1, df2 = len(X), len(Y)
    pval = stats.f.cdf(F, df1, df2)
    return (F,pval)
开发者ID:alorchhota,项目名称:asenet,代码行数:10,代码来源:analyze_hist_diff_of_ase_covariate_corr2.py

示例7: power

    def power(self, currentSource, active, inactive, histories, discMesh):
        """
        power is the main method for the power method

        currentSource: Initial source for power method
        active: Number of active iterations
        inactive: Number of inactive iterations
        histories: Number histories per iteration
        discMesh: Mesh for discretization of FissionSource
        """
        self.k = 1

        self.eigEstI = []   # Estimate of eigenvalue from inactive iterations
        self.meanEigI = []  # Mean of the eigenvalues from inactive iterations
        self.varEigI = []   # Variance of the eigenvalues from inactive iterations
        self.eigEst = []    # Estimate of eigenvalue from active iterations
        self.meanEig = []   # Mean of the eigenvalues from active iterations
        self.varEig = []    # Variance of the eigenvalues from active iterations
        
        self.eigVector = [] # Eigenvector estimate for active iterations

        start = time.time()
        for i in xrange(inactive):
            nextSource = self.Markov_Transport(currentSource, histories)
            self.k = self.k*(len(currentSource)/float(histories))

            self.eigEstI.append(self.k)
            self.meanEigI.append(scipy.mean(self.eigEst))    # Mean eigenvalue
            self.varEigI.append(scipy.var(self.eigEst))  # Variance eigenvalue

            print "I: %5i, eigenvalue = %8.6f," %(i, self.k),
            print " time: %8.3f sec" %(time.time() - start)

            currentSource = nextSource

        print "------------------ACTIVE ITERATIONS------------------"
        for self.i in xrange(active):
            nextSource = self.Markov_Transport(currentSource, histories)
            self.k = self.k*(len(currentSource)/float(histories))

            self.eigEst.append(self.k)
            self.meanEig.append(scipy.mean(self.eigEst))    # Mean eigenvalue
            self.varEig.append(scipy.var(self.meanEig))  # Variance eigenvalue

            print "A: %5i, eigenvalue = %8.6f," %(self.i, self.k),
            print " mean = %6.4f, std.dev = %6.4f, time: %8.3f sec" %(
                    self.meanEig[-1], math.sqrt(self.varEig[-1]),
                    (time.time() - start))

            # Discretized fissionSource
            discSource = nextSource.discretized(discMesh)
            discSource = discSource/sum(discSource)
            self.eigVector.append(discSource)

            currentSource = nextSource
开发者ID:jlconlin,项目名称:PhDThesis,代码行数:55,代码来源:Power.py

示例8: regress_erp

def regress_erp(y, test_idx, predictor, events,  ns):
    event_types = events['uniqueLabel']
    labels = events['label']
    latencies = events['latencyInFrame']

    train_idx = ~test_idx
    ytrn = matrix(y[train_idx].tolist()).T

    #There is a specific test_set to use
    if (len(np.where(test_idx)[0])!=0):
        tst_start_idx = min(np.where(test_idx)[0])
        tst_end_idx = max(np.where(test_idx)[0])

    #Test on all the data
    else:
        tst_start_idx = min(np.where(~test_idx)[0])
        tst_end_idx = max(np.where(~test_idx)[0])

    train_idx_list= np.where(train_idx==1)[0]
    train_idx_list = array(train_idx_list, dtype=np.int).tolist()

    #Solve the system of equations y = Ax
    P = predictor[train_idx_list,:].T*predictor[train_idx_list,:]
    q = -predictor[train_idx_list, :].T*ytrn
    rerp_vec = solvers.coneqp(P, q)['x']

    yestimate = array(predictor*rerp_vec)
    y_temp = matrix(y.tolist()).T
    noise = y_temp-yestimate


    events_to_test = np.where((array(latencies)<tst_end_idx) & (array(latencies)>tst_start_idx))[0]
    gc.disable()
    #Compute performance stats
    stats = np.empty((len(event_types),2))
    for i, this_type in enumerate(event_types):
        this_stat = np.empty((0,2))
        for j, event_idx in enumerate(events_to_test):
            this_event=labels[event_idx]
            if this_event==this_type:
                start_idx = latencies[event_idx];
                end_idx = np.minimum(tst_end_idx, start_idx+ns)

                yblock = y[start_idx:end_idx]
                noiseblock = noise[start_idx:end_idx]
                this_stat = np.append(this_stat, array([[sp.var(yblock)], [sp.var(noiseblock)]]).T, axis=0)

        rov_raw = this_stat[:,0]-this_stat[:,1]
        rov_nor = rov_raw/this_stat[:,0]
        rov = array([sp.mean(rov_raw), sp.mean(rov_nor)])
        stats[i,:] =  rov

    gc.enable()
    return stats, np.reshape(array(rerp_vec),(-1, ns)).T
开发者ID:ankaniisc,项目名称:EEGLAB2Hadoop,代码行数:54,代码来源:process.py

示例9: variance_explained

def variance_explained(spikes, means=None, noise=None):
    """ Returns the fraction of variance in each channel that is explained
    by the means.

    Values below 0 or above 1 for large data sizes indicate
    that some assumptions were incorrect (e.g. about channel noise) and
    the results should not be trusted.

    :param dict spikes: Dictionary, indexed by unit, of
        :class:`neo.core.SpikeTrain` objects (where the ``waveforms``
        member includes the spike waveforms) or lists of
        :class:`neo.core.Spike` objects.
    :param dict means: Dictionary, indexed by unit, of lists of
        spike waveforms as :class:`neo.core.Spike` objects or numpy arrays.
        Means for units that are not in this dictionary will be estimated
        using the spikes.
        Default: None - means will be estimated from given spikes.
    :type noise: Quantity 1D
    :param noise: The known noise levels (as variance) per channel of the
        original data. This should be estimated from the signal periods
        that do not contain spikes, otherwise the explained variance
        could be overestimated. If None, the estimate of explained variance
        is done without regard for noise.
        Default: None
    :return dict: A dictionary of arrays, both indexed by unit. If ``noise``
        is ``None``, the  dictionary contains
        the fraction of explained variance per channel without taking noise
        into account. If ``noise`` is given, it contains the fraction of
        variance per channel explained by the means and given noise level
        together.
    """
    ret = {}
    if means is None:
        means = {}
    for u, spks in spikes.iteritems():
        train = spks
        if not isinstance(train, neo.SpikeTrain):
            train = spikes_to_spike_train(spks)
        if u in means and means[u].waveform.shape[0] == train.waveforms.shape[1]:
            spike = means[u]
        else:
            spike = neo.Spike(0)
            spike.waveform = sp.mean(train.waveforms, axis=0)

        orig = sp.mean(sp.var(train.waveforms, axis=1), axis=0)
        waves = train.waveforms - spike.waveform
        new = sp.mean(sp.var(waves, axis=1), axis=0)

        if noise is not None:
            ret[u] = sp.asarray(1 - (new - noise) / orig)
        else:
            ret[u] = sp.asarray(1 - new / orig)

    return ret
开发者ID:amchagas,项目名称:spykeutils,代码行数:54,代码来源:sorting_quality_assesment.py

示例10: _measureColorEntryMonitor

 def _measureColorEntryMonitor(self, colorentry, n=5):
     xyY_list = self.calibmonitor.measureGratingStimColor(
             colorentry.patch_stim_value, n)
     colorentry.monitor_xyY = (
             scipy.mean([xyY[0] for xyY in xyY_list]),
             scipy.mean([xyY[1] for xyY in xyY_list]),
             scipy.mean([xyY[2] for xyY in xyY_list]))
     colorentry.monitor_xyY_sd = (
             math.sqrt(scipy.var([xyY[0] for xyY in xyY_list])),
             math.sqrt(scipy.var([xyY[1] for xyY in xyY_list])),
             math.sqrt(scipy.var([xyY[2] for xyY in xyY_list])))
开发者ID:derNarr,项目名称:achrolab,代码行数:11,代码来源:calibrate.py

示例11: MLE_iteration_constrain

def MLE_iteration_constrain(i1,i2,s1,s2,effective_inclusion_length,effective_skipping_length):
	psi1=vec2psi(i1,s1,effective_inclusion_length,effective_skipping_length);psi2=vec2psi(i2,s2,effective_inclusion_length,effective_skipping_length);
	iter_cutoff=1;iter_maxrun=100;count=0;previous_sum=0;
	beta_0=sum(psi1)/len(psi1);
	beta_1=sum(psi2)/len(psi2);
	var1=10*scipy.var(numpy.array(psi1)-beta_0);
	var2=10*scipy.var(numpy.array(psi2)-beta_1);
	if var1<=0.01:
		var1=0.01;
	if var2<=0.01:
		var2=0.01;
	print('var1');print(var1);print('var2');print(var2);
	while((iter_cutoff>0.01)&(count<=iter_maxrun)):
		count+=1;
		#iteration of beta
		beta_0=sum(psi1)/len(psi1);
		beta_1=sum(psi2)/len(psi2);
		print('var1');print(var1);print('var2');print(var2);
		#if abs(sum(psi1)/len(psi1)-sum(psi2)/len(psi2))>cutoff:
		if (sum(psi1)/len(psi1))>(sum(psi2)/len(psi2)):#minize psi2 if this is the case
			xopt = fmin_l_bfgs_b(myfunc_1,[sum(psi2)/len(psi2)],myfunc_der_1,args=[psi1,psi2,var1,var2],bounds=[[0.001,0.999-cutoff]],iprint=-1)
			theta2 = max(min(float(xopt[0]),1-cutoff),0);theta1=theta2+cutoff;
		else:#minize psi1 if this is the case
			xopt = fmin_l_bfgs_b(myfunc_2,[sum(psi1)/len(psi1)],myfunc_der_2,args=[psi1,psi2,var1,var2],bounds=[[0.001,0.999-cutoff]],iprint=-1)
			theta1 = max(min(float(xopt[0]),1-cutoff),0);theta2=theta1+cutoff;
		print('constrain_1xopt');print('theta');print(theta1);print(theta2);print(xopt);
		#else:
		#	theta1=sum(psi1)/len(psi1);theta2=sum(psi2)/len(psi2);
		beta_0=theta1;beta_1=theta2;
		#iteration of psi
		new_psi1=[];new_psi2=[];current_sum=0;likelihood_sum=0;
		print('constrain_2xopt');
		for i in range(len(psi1)):
			xopt = fmin_l_bfgs_b(myfunc_individual,[psi1[i]],myfunc_individual_der,args=[i1[i],s1[i],beta_0,var1,effective_inclusion_length,effective_skipping_length],bounds=[[0.01,0.99]],iprint=-1);
			new_psi1.append(float(xopt[0]));current_sum+=float(xopt[1]);print(xopt);
			#likelihood_sum+=myfunc_marginal(new_psi1[i],[i1[i],s1[i],beta_0,var1,effective_inclusion_length,effective_skipping_length]);
		for i in range(len(psi2)):
			xopt = fmin_l_bfgs_b(myfunc_individual,[psi2[i]],myfunc_individual_der,args=[i2[i],s2[i],beta_1,var2,effective_inclusion_length,effective_skipping_length],bounds=[[0.01,0.99]],iprint=-1);
			new_psi2.append(float(xopt[0]));current_sum+=float(xopt[1]);print(xopt);
			#likelihood_sum+=myfunc_marginal(new_psi2[i],[i2[i],s2[i],beta_1,var2,effective_inclusion_length,effective_skipping_length]);
		print('new_psi[0]');print(new_psi1[0]);print(new_psi2[0]);
		psi1=new_psi1;psi2=new_psi2;
		print('count');print(count);print('previous_sum');print(previous_sum);print('current_sum');print(current_sum);
		if count>1:
			iter_cutoff=abs(previous_sum-current_sum);
		previous_sum=current_sum;
	#print('constrain');print(theta1);print(theta2);print(psi1);print(psi2);print(current_sum);print(likelihood_sum);
	#print(xopt);
	return([current_sum,[psi1,psi2,beta_0,beta_1,var1,var2]]);
开发者ID:CBIIT,项目名称:mats-nih,代码行数:49,代码来源:GLM_MS_unpaired.py

示例12: _box_cox_transform

 def _box_cox_transform(self, verbose=False, method='standard'):
     """
     Performs the Box-Cox transformation, over different ranges, picking the optimal one w. respect to normality.
     """
     from scipy import stats
     a = sp.array(self.values)
     if method == 'standard':
         vals = (a - min(a)) + 0.1 * sp.var(a)
     else:
         vals = a
     sw_pvals = []
     lambdas = sp.arange(-2.0, 2.1, 0.1)
     for l in lambdas:
         if l == 0:
             vs = sp.log(vals)
         else:
             vs = ((vals ** l) - 1) / l
         r = stats.shapiro(vs)
         if sp.isfinite(r[0]):
             pval = r[1]
         else:
             pval = 0.0
         sw_pvals.append(pval)
     i = sp.argmax(sw_pvals)
     l = lambdas[i]
     if l == 0:
         vs = sp.log(vals)
     else:
         vs = ((vals ** l) - 1) / l
     self._perform_transform(vs,"box_cox")
     log.debug('optimal lambda was %0.1f' % l)
     return True
开发者ID:timeu,项目名称:PyGWAS,代码行数:32,代码来源:phenotype.py

示例13: DataArrayStatisticsReport

def DataArrayStatisticsReport(parent, titleString, tempdata):
    scrolledText = tk_stxt.ScrolledText(parent, width=textboxWidth, height=textboxHeight, wrap=tk.NONE)
    scrolledText.insert(tk.END, titleString + '\n\n')
    
    # must at least have max and min
    minData = min(tempdata)
    maxData = max(tempdata)
    
    if maxData == minData:
        scrolledText.insert(tk.END, 'All data has the same value,\n')
        scrolledText.insert(tk.END, "value = %-.16E\n" % (minData))
        scrolledText.insert(tk.END, 'statistics cannot be calculated.')
    else:
        scrolledText.insert(tk.END, "max = %-.16E\n" % (maxData))
        scrolledText.insert(tk.END, "min = %-.16E\n" % (minData))
        
        try:
            temp = scipy.mean(tempdata)
            scrolledText.insert(tk.END, "mean = %-.16E\n" % (temp))
        except:
            scrolledText.insert(tk.END, "mean gave error in calculation\n")

        try:
            temp = scipy.stats.sem(tempdata)
            scrolledText.insert(tk.END, "standard error of mean = %-.16E\n" % (temp))
        except:
            scrolledText.insert(tk.END, "standard error of mean gave error in calculation\n")

        try:
            temp = scipy.median(tempdata)
            scrolledText.insert(tk.END, "median = %-.16E\n" % (temp))
        except:
            scrolledText.insert(tk.END, "median gave error in calculation\n")

        try:
            temp = scipy.var(tempdata)
            scrolledText.insert(tk.END, "variance = %-.16E\n" % (temp))
        except:
            scrolledText.insert(tk.END, "variance gave error in calculation\n")

        try:
            temp = scipy.std(tempdata)
            scrolledText.insert(tk.END, "std. deviation = %-.16E\n" % (temp))
        except:
            scrolledText.insert(tk.END, "std. deviation gave error in calculation\n")

        try:
            temp = scipy.stats.skew(tempdata)
            scrolledText.insert(tk.END, "skew = %-.16E\n" % (temp))
        except:
            scrolledText.insert(tk.END, "skew gave error in calculation\n")

        try:
            temp = scipy.stats.kurtosis(tempdata)
            scrolledText.insert(tk.END, "kurtosis = %-.16E\n" % (temp))
        except:
            scrolledText.insert(tk.END, "kurtosis gave error in calculation\n")
            
    return scrolledText
开发者ID:tuyendothanh,项目名称:tkInterFit,代码行数:59,代码来源:IndividualReports.py

示例14: findAccessAnomalies

def findAccessAnomalies(data):
	# breaks down minute-long intervals from data
	intervalDict = {}
	for access in data:
		# breaks to 10-second intervals
		seconds = int(access[3])
		seconds = seconds - (seconds%10)

		key = (int(access[1]), int(access[2]), seconds)
		if key in intervalDict:
			intervalDict[key].append(access)
		else:
			intervalDict[key] = [access]

	totAccess = [len(intervalDict[key]) for key in intervalDict]
	totAccessMean = sc.mean(totAccess)
	totAccessVar = sc.var(totAccess)
	# print totAccessMean
	# print totAccessVar

	clientAccess = []
	clientDict = {}
	for key in intervalDict:
		count = Counter([access[10] for access in intervalDict[key]])
		for ckey in count:
			clientAccess.append(count[ckey])
			clientDict[(key[0], key[1], key[2], ckey)] = count[ckey]

	clientAccessMean = sc.mean(clientAccess)
	clientAccessVar = sc.var(clientAccess)
	# print clientAccessMean
	# print clientAccessVar

	clientAttackProb = {}
	for key in clientDict:
		totProb = totAccessVar/pow((totAccessMean-len(intervalDict[(key[0],key[1],key[2])])),2)
		clientProb = clientAccessVar/pow((clientAccessMean-clientDict[key]),2)
		prob = (totProb + clientProb)/2
		clientAttackProb[key] = prob

	arr = []
	for i in range(10):
		minKey = min(clientAttackProb, key=clientAttackProb.get)
		arr.append((minKey, clientAttackProb[minKey]))
		clientAttackProb.pop(minKey, None)
	return arr
开发者ID:chstan,项目名称:CS259D,代码行数:46,代码来源:log.py

示例15: dumpSeries

 def dumpSeries(self):
     for series in self.series:
         print "name:",series.getFullName()
         
         for index,value in enumerate(series):
             print value
             #print "index=",index, " , value=",value
         print "avg=",scipy.average(series)," , variance=",scipy.var(series), " , stddev=",scipy.std(series)
开发者ID:mhrems,项目名称:welcomerain,代码行数:8,代码来源:statManager.py


注:本文中的scipy.var函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。