当前位置: 首页>>代码示例>>Python>>正文


Python numpy.add函数代码示例

本文整理汇总了Python中numpy.add函数的典型用法代码示例。如果您正苦于以下问题:Python add函数的具体用法?Python add怎么用?Python add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了add函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

 def __call__(self, values, clip=True, out=None):
     values = _prepare(values, clip=clip, out=out)
     np.multiply(values, self.exp, out=values)
     np.add(values, 1., out=values)
     np.log(values, out=values)
     np.true_divide(values, np.log(self.exp + 1.), out=values)
     return values
开发者ID:AustereCuriosity,项目名称:astropy,代码行数:7,代码来源:stretch.py

示例2: movementCompute

  def movementCompute(self, displacement, noiseFactor = 0):
    """
    Shift the current active cells by a vector.

    @param displacement (pair of floats)
    A translation vector [di, dj].
    """

    if noiseFactor != 0:
      displacement = copy.deepcopy(displacement)
      xnoise = np.random.normal(0, noiseFactor)
      ynoise = np.random.normal(0, noiseFactor)
      displacement[0] += xnoise
      displacement[1] += ynoise


    # Calculate delta in the module's coordinates.
    phaseDisplacement = (np.matmul(self.rotationMatrix, displacement) *
                         self.phasesPerUnitDistance)

    # Shift the active coordinates.
    np.add(self.activePhases, phaseDisplacement, out=self.activePhases)

    # In Python, (x % 1.0) can return 1.0 because of floating point goofiness.
    # Generally this doesn't cause problems, it's just confusing when you're
    # debugging.
    np.round(self.activePhases, decimals=9, out=self.activePhases)
    np.mod(self.activePhases, 1.0, out=self.activePhases)

    self._computeActiveCells()
    self.phaseDisplacement = phaseDisplacement
开发者ID:dubing12,项目名称:htmresearch,代码行数:31,代码来源:location_modules.py

示例3: morphological_laplace

def morphological_laplace(input, size = None, footprint = None,
                          structure = None, output = None,
                          mode = "reflect", cval = 0.0, origin = 0):
    """Multi-dimensional morphological laplace.

    Either a size or a footprint, or the structure must be provided. An
    output array can optionally be provided. The origin parameter
    controls the placement of the filter. The mode parameter
    determines how the array borders are handled, where cval is the
    value when mode is equal to 'constant'.
    """
    tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
                         cval, origin)
    if isinstance(output, numpy.ndarray):
        grey_erosion(input, size, footprint, structure, output, mode,
                     cval, origin)
        numpy.add(tmp1, output, output)
        del tmp1
        numpy.subtract(output, input, output)
        return numpy.subtract(output, input, output)
    else:
        tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
                            cval, origin)
        numpy.add(tmp1, tmp2, tmp2)
        del tmp1
        numpy.subtract(tmp2, input, tmp2)
        numpy.subtract(tmp2, input, tmp2)
        return tmp2
开发者ID:AndreI11,项目名称:SatStressGui,代码行数:28,代码来源:morphology.py

示例4: makeFeatureVec

def makeFeatureVec(words, model, num_features):

	# Pre-initialize an empty numpy array (for speed)
	featureVec = np.zeros((num_features,),dtype="float32")

	# Count number of words
	nwords = 0.

	# Loop over word by word
	# If in vocabulary, add its feature vector to the total
	for word in words.split():

		if word in model: 
			nwords += 1.
			featureVec = np.add(featureVec,model[word])
		else:
			missingWord = handleMissingWord(word, model, num_features)
			featureVec = np.add(featureVec, missingWord)
			nwords += 1.

	# Divide the result by the number of words to get the average
	featureVec = np.divide(featureVec,nwords)
	
	# If number of words zero
	if nwords == 0:
		featureVec = characterVec(words, model, num_features)
	
	return featureVec
开发者ID:tazeek,项目名称:BullyDetect,代码行数:28,代码来源:avg_words.py

示例5: test_pi_ops_nat

    def test_pi_ops_nat(self):
        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='M', name='idx')
        expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'],
                               freq='M', name='idx')

        self._check(idx, lambda x: x + 2, expected)
        self._check(idx, lambda x: 2 + x, expected)
        self._check(idx, lambda x: np.add(x, 2), expected)

        self._check(idx + 2, lambda x: x - 2, idx)
        self._check(idx + 2, lambda x: np.subtract(x, 2), idx)

        # freq with mult
        idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'],
                          freq='2M', name='idx')
        expected = PeriodIndex(['2011-07', '2011-08', 'NaT', '2011-10'],
                               freq='2M', name='idx')

        self._check(idx, lambda x: x + 3, expected)
        self._check(idx, lambda x: 3 + x, expected)
        self._check(idx, lambda x: np.add(x, 3), expected)

        self._check(idx + 3, lambda x: x - 3, idx)
        self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
开发者ID:brianholland,项目名称:pandas,代码行数:25,代码来源:test_period.py

示例6: numeric_gemm_var1_flat

def numeric_gemm_var1_flat(A, B, C, mc, kc, nc, mr=1, nr=1):
  M, N = C.shape
  K = A.shape[0]

  mc = min(mc, M)
  kc = min(kc, K)
  nc = min(nc, N)  

  tA = numpy.zeros((mc, kc), dtype = numpy.float)
  tB = numpy.zeros((kc, N), dtype = numpy.float)  

  for k in range(0, K, kc):
    # Pack B into tB
    tB[:,:] = B[k:k+kc:,:]
    
    for i in range(0, M, mc):
      imc = i+mc
      # Pack A into tA
      tA[:,:] = A[i:imc,k:k+kc]
      
      for j in range(0, N): # , nc):
        # Cj += ABj + Cj
        # jnc = j+nc
        ABj = numpy.matrixmultiply(tA, tB[:,j])
        numpy.add(C[i:imc:,j], ABj, C[i:imc:,j])
        
        # Store Caux into memory
  return
开发者ID:KapilRijhwani,项目名称:corepy,代码行数:28,代码来源:gemm.py

示例7: wavefunction

def wavefunction(coords, mocoeffs, gbasis, volume):
    """Calculate the magnitude of the wavefunction at every point in a volume.
    
    Attributes:
        coords -- the coordinates of the atoms
        mocoeffs -- mocoeffs for one eigenvalue
        gbasis -- gbasis from a parser object
        volume -- a template Volume object (will not be altered)
    """
    bfs = getbfs(coords, gbasis)
    
    wavefn = copy.copy(volume)
    wavefn.data = numpy.zeros( wavefn.data.shape, "d")

    conversion = convertor(1,"bohr","Angstrom")
    x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion
    y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion
    z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion

    for bs in range(len(bfs)):
        data = numpy.zeros( wavefn.data.shape, "d")
        for i,xval in enumerate(x):
            for j,yval in enumerate(y):
                for k,zval in enumerate(z):
                    data[i, j, k] = bfs[bs].amp(xval,yval,zval)
        numpy.multiply(data, mocoeffs[bs], data)
        numpy.add(wavefn.data, data, wavefn.data)
    
    return wavefn
开发者ID:brianwolfe,项目名称:RMG-Py,代码行数:29,代码来源:volume.py

示例8: average_perceptron

def average_perceptron(feature_matrix, labels, T):
    theta = np.empty_like(feature_matrix[0])
    theta.fill(0.)
    theta_sum = theta  
    theta_0 = 0.0
    theta_0_sum = theta_0
    ticker = 0
    update_track = 0
    
    while ticker < T:
        
        for i in range(len(feature_matrix)):        

            check_before_label = np.add(np.dot(theta, feature_matrix[i]),theta_0)

            check_mult_label = np.multiply(labels[i], check_before_label)
            if check_mult_label == 0 or check_mult_label < 0:
                update_track += 1                
                (theta, theta_0) = perceptron_single_step_update(feature_matrix[i], labels[i], theta, theta_0)
                theta_sum = np.add(theta, theta_sum)
                theta_0_sum += theta_0

        ticker += 1
        
    theta_average = np.divide(theta_sum, update_track)
    theta_0_average = theta_0_sum/update_track
        
    return (theta_average, theta_0_average)
开发者ID:jiseokk,项目名称:review-classification,代码行数:28,代码来源:project_tool_functions.py

示例9: _wrapx

def _wrapx(input, output, nx):
    """
    Wrap the X format column Boolean array into an ``UInt8`` array.

    Parameters
    ----------
    input
        input Boolean array of shape (`s`, `nx`)

    output
        output ``Uint8`` array of shape (`s`, `nbytes`)

    nx
        number of bits
    """

    output[...] = 0  # reset the output
    nbytes = ((nx - 1) // 8) + 1
    unused = nbytes * 8 - nx
    for i in range(nbytes):
        _min = i * 8
        _max = min((i + 1) * 8, nx)
        for j in range(_min, _max):
            if j != _min:
                np.left_shift(output[..., i], 1, output[..., i])
            np.add(output[..., i], input[..., j], output[..., i])

    # shift the unused bits
    np.left_shift(output[..., i], unused, output[..., i])
开发者ID:MQQ,项目名称:astropy,代码行数:29,代码来源:column.py

示例10: discretize

    def discretize(self, time_slice_length):
        self.time_slice_length = time_slice_length

        # compute the total number of time-slices
        time_delta = (self.end_date - self.start_date)
        time_delta = time_delta.total_seconds()/60
        self.time_slice_count = int(time_delta // self.time_slice_length) + 1

        # parallelize tweet partitioning using a pool of processes (number of processes = number of cores).
        nb_processes = cpu_count()
        nb_tweets_per_process = self.size // nb_processes
        portions = []
        for i in range(0, self.size, nb_tweets_per_process):
            j = i + nb_tweets_per_process if i + nb_tweets_per_process < self.size else self.size
            portions.append((i, j))
        p = Pool()
        results = p.map(self.discretize_job, portions)
        results.sort(key=lambda x: x[0])

        # insert the time-slices number in the data frame and compute the final frequency matrices
        time_slices = []
        self.tweet_count = np.zeros(self.time_slice_count, dtype=np.int)
        self.global_freq = csr_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.short)
        self.mention_freq = csr_matrix((len(self.vocabulary), self.time_slice_count), dtype=np.short)
        for a_tuple in results:
            time_slices.extend(a_tuple[1])
            self.tweet_count = np.add(self.tweet_count, a_tuple[2])
            self.global_freq = np.add(self.global_freq, a_tuple[3])
            self.mention_freq = np.add(self.mention_freq, a_tuple[4])
        self.df['time_slice'] = np.array(time_slices)
开发者ID:CATS-Project,项目名称:CATS-TextMiningServices,代码行数:30,代码来源:corpus.py

示例11: __add__

 def __add__(self, other):
     if isinstance(other, Raster):
         result = np.add(self.data, other.data)
     else:
         result = np.add(self.data, other)
     
     return Raster(None, result, self.nodata, self.driver, self.georef, self.proj)
开发者ID:MingzeGao2,项目名称:lispy-parser,代码行数:7,代码来源:parser.py

示例12: plot_selfish_cooperative

def plot_selfish_cooperative(num_runs):
	import seaborn as sns
	for exp in range(num_runs):
		fig  = plt.figure()
		cooperators = [3,4,5]
		selfish = [0,1,2]

		
		fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, 0)
		t, pop = np.loadtxt(fname, unpack = True)

		cooperative_pop = [0.0]*len(pop[:-5])
		selfish_pop = [0.0]*len(pop[:-5])

		for c in cooperators:
			fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, c)
			t, pop = np.loadtxt(fname, unpack = True)
			cooperative_pop = np.add(cooperative_pop,pop[:-5] )

		for s in selfish:
			fname = Parameters.dirname + '/%i_assembly_%i.dat' % (exp, s)
			t, pop = np.loadtxt(fname, unpack = True)
			selfish_pop = np.add(selfish_pop,pop[:-5] )

		ax = fig.add_subplot(1,1,1)
		ax.plot(t[:-5], selfish_pop, label = 'selfish', color = 'r')
		ax.plot(t[:-5], cooperative_pop, label = 'cooperative', color = 'g')
		ax.legend(loc = 'upper left')
		plt.xlabel('System Time')
		plt.ylabel('Total Abundance')

		#plt.show()
		plt.savefig(Parameters.dirname + '/%i_cooperative_vs_selfish.png' % exp)
		plt.close()
开发者ID:SES591,项目名称:RNA_Cooperators,代码行数:34,代码来源:Analysis.py

示例13: createChord

 def createChord(self,*args):
   if len(args) == 1:
     self.notesCombined = args[0]
   if len(args) == 2:
     self.notesCombined = np.add(args[0],args[1])
   if len(args) == 3:
     self.notesCombined = np.add(args[0],np.add(args[1],args[2]))
开发者ID:dbouman1,项目名称:iccp-assignment-4,代码行数:7,代码来源:vibration.py

示例14: __init__

	def __init__(self, limitsLow, limitsHigh, status = None):
		# Either merge two clusters (bottom-up)
		# or create a new micro-cluster (top-down)
		if status is "merging":
			# The first two input parameters are two newCluster() objects
			first = limitsLow
			second = limitsHigh
			self.limitsLow = [None]*ndim
			self.limitsHigh = [None]*ndim
			for i in xrange(ndim):
				self.limitsLow[i] = min(first.limitsLow[i], second.limitsLow[i])
				self.limitsHigh[i] = max(first.limitsHigh[i], second.limitsHigh[i])
			self.findKeys()
			self.weight = first.weight + second.weight
			self.Sum = np.add(first.Sum, second.Sum)
			self.sqSum = np.add(first.sqSum, second.sqSum)#self.computeSqSum()
			self.computeSSQ()
			self.computeCoG()
		else:
			# The first two parameters are the edges of the cluster
			self.limitsLow = limitsLow
			self.limitsHigh = limitsHigh
			self.findKeys()
			self.computeWeight()
			self.computeSum()
			self.computeSqSum()
			self.computeSSQ()
			self.computeCoG()
开发者ID:elgicse,项目名称:clubs,代码行数:28,代码来源:CLUBS.py

示例15: scale_samples

def scale_samples(params, bounds):
    '''
    Rescales samples in 0-to-1 range to arbitrary bounds.

    Arguments:
        bounds - list of lists of dimensions num_params-by-2
        params - numpy array of dimensions num_params-by-N,
        where N is the number of samples
    '''
    # Check bounds are legal (upper bound is greater than lower bound)
    b = np.array(bounds)
    lower_bounds = b[:, 0]
    upper_bounds = b[:, 1]

    if np.any(lower_bounds >= upper_bounds):
        raise ValueError("Bounds are not legal")

    # This scales the samples in-place, by using the optional output
    # argument for the numpy ufunctions
    # The calculation is equivalent to:
    #   sample * (upper_bound - lower_bound) + lower_bound
    np.add(np.multiply(params,
                       (upper_bounds - lower_bounds),
                       out=params),
           lower_bounds,
           out=params)
开发者ID:eccentricaomy,项目名称:SALib,代码行数:26,代码来源:__init__.py


注:本文中的numpy.add函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。