当前位置: 首页>>代码示例>>Python>>正文


Python numpy.float函数代码示例

本文整理汇总了Python中numpy.float函数的典型用法代码示例。如果您正苦于以下问题:Python float函数的具体用法?Python float怎么用?Python float使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了float函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: update_batch_cd1

def update_batch_cd1(para, data_v, layer=1):
    eta = para['eta']
    max_bsize = data_v.shape[0]
    if layer == 0: # input layer, otherwise they are binary
        data_h, gibbs_v, gibbs_h = sampling_nb(para, data_v)
    else:
        data_h, gibbs_v, gibbs_h = sampling(para, data_v)
    
    pos_delta_w = np.zeros((para['v_num'], para['h_num']))
    neg_delta_w = np.zeros((para['v_num'], para['h_num']))
    for i in range(max_bsize):
        pos_delta_w += matu.matrix_times(data_v[i], data_h[i])
        neg_delta_w += matu.matrix_times(gibbs_v[i], gibbs_h[i])    
    delta_w_pos = eta * pos_delta_w/np.float(max_bsize)
    delta_w_neg = eta * neg_delta_w/np.float(max_bsize)
    para['w'] += delta_w_pos
    para['w'] -= delta_w_neg
    delta_a = data_v - gibbs_v
    delta_b = data_h - gibbs_h
    delta_a = eta * np.average(delta_a,0)
    delta_b = eta * np.average(delta_b,0)
    para['a'] += delta_a
    para['b'] += delta_b
    #print delta_w_pos.max(), delta_w_neg.max()
    return para
开发者ID:qian-liu,项目名称:iconip2016,代码行数:25,代码来源:sigmoid_utils.py

示例2: fit

    def fit(self, X, y):
        n_samples = X.shape[0]
        n_features = X.shape[1]
        n_classes = 2
        n_fvalues = 2

        if n_samples != len(y):
            raise ValueError('Mismatched number of samples.')

        nY = np.zeros(n_classes, dtype=np.int)
        for i in range(n_samples):
            nY[y[i]] += 1

        self.pY_ = np.empty(n_classes, dtype=np.float)
        for i in range(n_classes):
            self.pY_[i] = nY[i] / np.float(n_samples)

        nXY = np.zeros((n_features, n_fvalues, n_classes), dtype=np.int)
        for i in range(n_samples):
            for j in range(n_features):
                nXY[j, X[i, j], y[i]] += 1

        self.pXgY_ = np.empty((n_features, n_fvalues, n_classes), dtype=np.float)
        for j in range(n_features):
            for xi in range(n_fvalues):
                for yi in range(n_classes):
                    self.pXgY_[j, xi, yi] = nXY[j, xi, yi] / np.float(nY[yi])
开发者ID:inazo1115,项目名称:snippet,代码行数:27,代码来源:naive_bayes.py

示例3: _compute_pvalue

    def _compute_pvalue(obs_val, sim):
        """
        Compute the p-value given an observed value of a test statistic
        and some simulations of that same test statistic.

        Parameters
        ----------
        obs_value : float
            The observed value of the test statistic in question

        sim: iterable
            A list or array of simulated values for the test statistic

        Returns
        -------
        pval : float [0, 1]
            The p-value for the test statistic given the simulations.

        """

        # cast the simulations as a numpy array
        sim = np.array(sim)

        # find all simulations that are larger than
        # the observed value
        ntail = sim[sim > obs_val].shape[0]

        # divide by the total number of simulations
        pval = np.float(ntail) / np.float(sim.shape[0])

        return pval
开发者ID:matteobachetti,项目名称:stingray,代码行数:31,代码来源:parameterestimation.py

示例4: getTimeseriesNemData

	def getTimeseriesNemData(self, state, startDate, endDate):
		# AEMO data is in AEST - GMT + 10
		tz = timezone.SydneyTimezone()
		startDate = startDate.astimezone(tz)
		endDate = endDate.astimezone(tz)

		folderPath = "./nemData"
		
		data = np.loadtxt(folderPath+"/"+state+".csv", delimiter=',',dtype='string',skiprows=1, usecols=None, unpack=False)
		timeseries = None
		for i in np.arange(data.shape[0]):
			date = datetime.datetime(year=int(data[i][0]), month = int(data[i][1]), day = int(data[i][2]), hour = int(data[i][3]), minute=int(data[i][4]), tzinfo=timezone.SydneyTimezone())
			if date >= startDate and date <= endDate:
				timePeriod = np.zeros(shape=(7))
				timePeriod[0] = int(data[i][0])
				timePeriod[1] = int(data[i][1])
				timePeriod[2] = int(data[i][2])
				timePeriod[3] = int(data[i][3])
				timePeriod[4] = int(data[i][4])
				timePeriod[5] = np.float(data[i][5])
				timePeriod[6] = np.float(data[i][6])
				if timeseries is None:
					timeseries = timePeriod
				else:
					timeseries = np.vstack((timeseries, timePeriod))

		return timeseries
开发者ID:lukasmarshall,项目名称:solar-data-processing,代码行数:27,代码来源:databaseManager.py

示例5: __init__

 def __init__(self, coefficients, p1=None, p2=None, p3=None):
     """
     Initializes a plane from the 4 coefficients a, b, c and d of ax + by + cz + d = 0
     :param coefficients: abcd coefficients of the plane
     """
     #Initializes the normal vector
     self.normal_vector = np.array([coefficients[0], coefficients[1], coefficients[2]], np.float)
     normv = np.linalg.norm(self.normal_vector)
     self.normal_vector /= normv
     nonzeros = np.argwhere(self.normal_vector != 0.0).flatten()
     zeros = list(set(range(3))-set(nonzeros))
     if len(nonzeros) == 0:
         raise ValueError("Normal vector is equal to 0.0")
     if self.normal_vector[nonzeros[0]] < 0.0:
         self.normal_vector = -self.normal_vector
         dd = -np.float(coefficients[3]) / normv
     else:
         dd = np.float(coefficients[3]) / normv
     self._coefficients = np.array([self.normal_vector[0],
                                   self.normal_vector[1],
                                   self.normal_vector[2],
                                   dd], np.float)
     self._crosses_origin = np.isclose(dd, 0.0, atol=1e-7, rtol=0.0)
     self.p1 = p1
     self.p2 = p2
     self.p3 = p3
     #Initializes 3 points belonging to the plane (useful for some methods)
     if self.p1 is None:
         self.init_3points(nonzeros, zeros)
     self.vector_to_origin = dd * self.normal_vector
开发者ID:adozier,项目名称:pymatgen,代码行数:30,代码来源:coordination_geometry_utils.py

示例6: jaccard

def jaccard(*mhs):
    '''
    Compute Jaccard similarity measure for multiple of MinHash objects.
    '''
    if len(mhs) < 2:
        raise ValueError("Less than 2 MinHash objects were given")
    seed = mhs[0].seed
    if any(seed != m.seed for m in mhs):
        raise ValueError("Cannot compare MinHash objects with\
                different seeds")
    num_perm = mhs[0].hashvalues.size
    if any(num_perm != m.hashvalues.size for m in mhs):
        raise ValueError("Cannot compare MinHash objects with\
                different numbers of permutation functions")
    if len(mhs) == 2:
        m1, m2 = mhs
        return np.float(np.count_nonzero(m1.hashvalues == m2.hashvalues)) /\
                np.float(m1.hashvalues.size)
    # TODO: find a way to compute intersection for more than 2 using numpy
    intersection = 0
    for i in range(num_perm):
        phv = mhs[0].hashvalues[i]
        if all(phv == m.hashvalues[i] for m in mhs):
            intersection += 1
    return float(intersection) / float(num_perm)
开发者ID:GitManager,项目名称:datasketch,代码行数:25,代码来源:minhash.py

示例7: incentive

def incentive(array, weight_factor):
    """Calculate the incentivization factor, for encouraging Tor exit relay
    operators in countries with less exit relays to run more nodes.

    :param array: A two-dimensional 3xN array of country codes, exit
        probabilities, and factors.
    :param float weight_factor: Should be winsorized standard deviation of
        exit probabilities, or trimmed standard deviation of exit
        probabilities.
    """
    array_copy  = numpy.asarray(array[:,1], dtype=numpy.float)
    main_stddev = numpy.float(array_copy.std())

    incentivized = list()
    for ccname, pexit, _ in array[::]:
        ccname = numpy.string_(ccname)  ## oh, Python2.x, how i despise you…
        pexit  = numpy.float(pexit)

        weighted = main_stddev - weight_factor + pexit
        inverted = 1. / (abs(weighted)**2)
        shifted  = inverted * 10.
        factor   = shifted

        incentivized.append({'cc': ccname,
                             'p_exit': pexit,
                             'incentive_factor': factor})
    return incentivized
开发者ID:torservers,项目名称:exit-funding,代码行数:27,代码来源:exit-probability-factors.py

示例8: define_power_sweep_vec

    def define_power_sweep_vec(self, pow_vec, cwfrequency, BW, time, set_time='dwell'):
        '''
        Define a sweep in power with a power vector.

        Input:
            pow_vec [dBm] : define the power vector
            cwfrequency [GHz]: constant wave frequency of the VNA
            time [s]: if set_time==dwell it is a delay for each partial measurement in the segment
                      if set_time==sweeptime, we define the duration of the sweep in the segment
            BW [Hz]: define the Bandwidth

        Output:
            None
        '''
        logging.debug(__name__ + ' : making a sweep in power' % ())

        #Delete all the remaining segments from previous measurement
        self._visainstrument.write('SEGM:DEL:ALL')

        if np.float(self._visainstrument.query('SEGM:COUNT?')) != 0:
            print 'Error: segments not deleted'

        point = len(pow_vec)
        for i in np.arange(point):
            self.define_segment(i+1, cwfrequency, cwfrequency,1, pow_vec[i],time, BW, set_time )

        if np.float(self._visainstrument.query('SEGM:COUNT?')) != point:
            print 'Error: not the number of segment wanted'
开发者ID:nicolas-experiment,项目名称:python_drivers,代码行数:28,代码来源:ZNB20.py

示例9: find_extrema

		def find_extrema():
			import numpy as np
			extrema_1 = np.float(self.x_initial + (- 2*self.c[0] + (4*self.c[0]**2 - 12*self.b[0]*self.d[0])**.5)/(6*self.d[0]))
			extrema_2 = np.float(self.x_initial + (- 2*self.c[0] - (4*self.c[0]**2 - 12*self.b[0]*self.d[0])**.5)/(6*self.d[0]))
			extrema_3 = np.float(self.x_break + (- 2*self.c[1] + (4*self.c[1]**2 - 12*self.b[1]*self.d[1])**.5)/(6*self.d[1]))
			extrema_4 = np.float(self.x_break + (- 2*self.c[1] - (4*self.c[1]**2 - 12*self.b[1]*self.d[1])**.5)/(6*self.d[1]))
			return(extrema_1,extrema_2,extrema_3,extrema_4)
开发者ID:danhagen,项目名称:basketball,代码行数:7,代码来源:plot_three_configuration_space_trajectory.py

示例10: _percentage_distance

def _percentage_distance(canny_in, canny_out, r):
    diamond = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])

    E_1 = scipy.ndimage.morphology.binary_dilation(canny_in, structure=diamond, iterations=r)
    E_2 = scipy.ndimage.morphology.binary_dilation(canny_out, structure=diamond, iterations=r)

    return 1.0 - np.float(np.sum(E_1 & E_2))/np.float(np.sum(E_1))
开发者ID:scikit-video,项目名称:scikit-video,代码行数:7,代码来源:scene.py

示例11: define_power_sweep

    def define_power_sweep(self, startpow, stoppow, steppow, cwfrequency, BW, time, set_time='dwell'):
        '''
        Make a sweep in power where startpow can be greater than stoppow

        Input:
            startpow [dBm] : define the power at which begin the sweep
            stoppow [dBm]: define the power at which finish the sweep
            steppow [dBm]: define the step of the sweep
            cwfrequency [GHz]: constant wave frequency of the VNA
            time [s]: if set_time==dwell it is a delay for each partial measurement in the segment
                      if set_time==sweeptime, we define the duration of the sweep in the segment
            BW [Hz]: define the Bandwidth

        Output:
            None
        '''
        logging.debug(__name__ + ' : making a sweep in power from %s to %s with a step of %s' % (startpow, stoppow, steppow))

        #Destroy all the remaining segments from previous measurement
        self._visainstrument.write('SEGM:DEL:ALL')

        if np.float(self._visainstrument.query('SEGM:COUNT?'))!=0:
            print 'Error: segments not deleted'

        pow_vec=np.arange(startpow, stoppow + steppow, steppow)
        point=len(pow_vec)
        for i in np.arange(point):
            self.define_segment(i+1, cwfrequency, cwfrequency,1, pow_vec[i],time, BW, set_time )

        if np.float(self._visainstrument.query('SEGM:COUNT?'))!=point:
            print 'Error: not the number of segment wanted'
开发者ID:nicolas-experiment,项目名称:python_drivers,代码行数:31,代码来源:ZNB20.py

示例12: buildMatrixExplicit

def buildMatrixExplicit(ratings):  ## Build the matrix in a binary way
    u = ratings['userid'].drop_duplicates() ## we extract the userid's of this region
    u.index = range(0,len(u))   ## we change the index so it will go from 0 to the number of users
    b = ratings['band'].drop_duplicates()  ## An array with the names of the bands
    b.index = range(0,len(b))   ## We change the index of the array so each band has an unique number
    pairs = ratings.loc[:,["userid","band","rating"]] ## For this method we need the userid, and the band. Later on we will count the number of times a band appears for each user profile
    pairs.loc[pairs.rating=="Yes",'rating'] = np.float(5.0) ## We change the implicit values
    pairs.loc[pairs.rating=="Maybe",'rating'] = np.float(4.0)
    pairs['rating'] = pairs['rating'].astype(float) ## We change the column of the ratings to float
    g = pairs.groupby(['userid'])
    rows = []
    cols = []
    rat = []
    for name, group in g:   ## name is the userid by which we group before, group is all the band names and its ratings
        ### We are going to group each of the user groups by band to calculate the mean ratings for each band
        g2 = group.loc[:,['band','rating']].groupby('band')
        meanRatings = g2['rating'].mean()
        z  = list(group['band']) ## A list with the bands that the user have been to. The names can be repeated
        d = Counter(z)  ## A dictionary with the distinct names of bands and the number of occurences of each one
        for band, count in d.iteritems(): ## Eg. band "Arctic monkeys" count = 3
            cols.append(b[b==band].index[0]) ## We append the position of the band in the matrix
            freq = len(list(c for c in d.itervalues() if c <= count)) ## The number of bands which count <= (current band count)
            r = (meanRatings[band] * freq)/len(d) ## We do this in a scale [0,5]
            rat.append(r)
        userNo = (u[u==name].index[0])### name is the user
        rows.extend([userNo]*len(d)) ## We extend with the row position of the user repeated n times where n is the number of columns
    result = csr_matrix((map(float,rat),(rows,cols)),shape=(len(u),len(b)))
    return(result)
开发者ID:Brandonage,项目名称:ConcertTweets,代码行数:28,代码来源:contextRec.py

示例13: retick

def retick(ax, axname):
  if axname == 'x':
    rng = ax.get_xlim()
  elif axname == 'y':
    rng = ax.get_ylim()
  else:
    rng = ax.get_zlim()

  mn = np.int(np.floor(rng[0]))
  mx = np.int(np.ceil(rng[1]))
  ticks = []
  ticklabels = []
  for i in range(mn, mx):
    if np.float(i) >= rng[0]:
      ticks.append(np.float(i))
      ticklabels.append('$10^{' + ("%d" % i) + '}$')

  if axname == 'x':
    ax.set_xticks(ticks)
    ax.set_xticklabels(ticklabels)
  elif axname == 'y':
    ax.set_yticks(ticks)
    ax.set_yticklabels(ticklabels)
  else:
    ax.set_zticks(ticks)
    ax.set_zticklabels(ticklabels)

  return
开发者ID:erikleitch,项目名称:basho-perf-scripts,代码行数:28,代码来源:erltestutil.py

示例14: mkKernel

def mkKernel(ks, sig, th , om, ps, gm):
        """ Check the kernel size"""
        if not ks%2:
            exit(1)

        """ Definition of the varibles"""
        theta = th 
        psi = ps 
        sigma = np.float(sig)
        omega = np.float(om)
        gamma = gm
        
        """Creating the kernel size"""        
#        xs=np.linspace(-1*ks,1*ks/10.,ks)
#        ys=np.linspace(-1*ks/10.,1*ks/10.,ks)
        xs=np.linspace(-1*ks,1*ks,ks)
        ys=np.linspace(-1*ks,1*ks,ks)

        """Creating the kernel"""        
        x,y = np.meshgrid(xs,ys)        

 
        #return np.array( np.exp(-0.5*(x_theta**2+gamma * y_theta**2)/sigma**2)*np.cos(2.*np.pi*x_theta/lmbd + psi),dtype=np.float32)
        gabor_kernel =  np.array( np.exp(-(x**2 + y**2)/(2*sigma**2) )*np.cos(2.*np.pi*(x*np.cos(theta)+y*np.sin(theta) ) * omega), dtype=np.float32)     
        return gabor_kernel
        """  Return the kernel                  The sigma signal                                           The sinus wave                                   """
开发者ID:frederikhagel,项目名称:NormalVectorTransformFunctions,代码行数:26,代码来源:testing_gabor.py

示例15: _compute_rdiff_stats

    def _compute_rdiff_stats(self, var1, var2):
        """Compute the relative difference statistics of var1 and var2.

        vars_differ must already be set for self."""
        if (not self.vars_differ() or len(var1) == 0):
            rdiff_max = np.float('nan')
            rdiff_maxloc = -1
            rdiff_logavg = np.float('nan')
        else:
            differences = self._compute_diffs(var1, var2) != 0
            diff_vals = self._compute_diffs(var1, var2)[differences]
            maxvals = np.maximum(np.abs(var1), np.abs(var2))[differences]
            rdiff = np.abs(diff_vals) / maxvals.astype(np.float)
            rdiff_max = np.max(rdiff)
            rdiff_maxloc = self._compute_max_loc(rdiff, differences)
            numDiffs = np.sum(differences)
            if numDiffs > 0:
                # Compute the sum of logs by taking the products of the logands; +1 if the logand is 0
                # Then take the log of the result
                # Since the log(1) is 0, this does not affect the final sum
                rdiff_prod = np.prod(rdiff)
                if rdiff_prod != np.float('inf') and rdiff_prod > 0.0:
                    rdiff_logsum = -math.log10(rdiff_prod)
                else:
                    # We need to use a different (slower, less accurate) method of computing this,
                    # the product either overflowed or underflowed due to the small exponent
                    rdiff_logs = np.log10(rdiff)
                    rdiff_logsum = -np.sum(rdiff_logs)
                rdiff_logavg = rdiff_logsum / np.sum(differences)
            else:
                rdiff_logavg = np.float('nan')
        return rdiff_max, rdiff_maxloc, rdiff_logavg
开发者ID:mfdeakin-sandia,项目名称:cprnc_python,代码行数:32,代码来源:vardiffs.py


注:本文中的numpy.float函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。