当前位置: 首页>>代码示例>>Python>>正文


Python numpy.arr函数代码示例

本文整理汇总了Python中numpy.arr函数的典型用法代码示例。如果您正苦于以下问题:Python arr函数的具体用法?Python arr怎么用?Python arr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了arr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: drawing_housing_units

def drawing_housing_units(db, frequencies, weights, index_matrix, sp_matrix, pumano=0):

    dbc = db.cursor()
    dbc.execute("select hhlduniqueid from hhld_pums group by hhlduniqueid")
    hhld_colno = dbc.rowcount
    dbc.execute("select gquniqueid from gq_pums group by gquniqueid")
    gq_colno = dbc.rowcount

    hh_colno = hhld_colno + gq_colno
    synthetic_population = []
    j = 0
    for i in index_matrix[:hh_colno, :]:
        if i[1] == i[2] and frequencies[j] > 0:
            synthetic_population.append([sp_matrix[i[1] - 1, 2] + 1, frequencies[j], i[0]])
            print "hhid single", sp_matrix[i[1] - 1, 2]
        else:
            cumulative_weights = weights[sp_matrix[i[1] - 1 : i[2], 2]].cumsum()
            probability_distribution = cumulative_weights / cumulative_weights[-1]
            probability_lower_limit = probability_distribution[:-1].tolist()
            probability_lower_limit.insert(0, 0)
            probability_lower_limit = arr(probability_lower_limit)
            random_numbers = random.rand(frequencies[j])
            freq, probability_lower_limit = histogram(random_numbers, probability_lower_limit)
            hhldid_by_type = sp_matrix[i[1] - 1 : i[2], 2]

            for k in range(len(freq)):
                if freq[k] <> 0:
                    synthetic_population.append([hhldid_by_type[k] + 1, freq[k], i[0]])
        j = j + 1

    dbc.close()
    db.commit()
    return arr(synthetic_population)
开发者ID:psrc,项目名称:urbansim,代码行数:33,代码来源:drawing_households.py

示例2: create_adjusted_frequencies

def create_adjusted_frequencies(db, synthesis_type, control_variables, pumano, tract= 0, bg= 0):
    dbc = db.cursor()
    dummy_order_string = create_aggregation_string(control_variables)
    puma_table = ('%s_%s_joint_dist'%(synthesis_type, pumano))
    pums_table = ('%s_%s_joint_dist'%(synthesis_type, 0))

    dbc.execute('select * from %s where tract = %s and bg = %s order by %s' %(puma_table, tract, bg, dummy_order_string))
    puma_joint = arr(dbc.fetchall(), float)
    puma_prob = puma_joint[:,-2] / sum(puma_joint[:,-2])
    upper_prob_bound = 0.5 / sum(puma_joint[:,-2])

    dbc.execute('select * from %s order by %s' %(pums_table, dummy_order_string))
    pums_joint = arr(dbc.fetchall(), float)
    pums_prob = pums_joint[:,-2] / sum(pums_joint[:,-2])


    puma_adjustment = (pums_prob <= upper_prob_bound) * pums_prob + (pums_prob > upper_prob_bound) * upper_prob_bound
    correction = 1 - sum((puma_prob == 0) * puma_adjustment)
    puma_prob = ((puma_prob <> 0) * correction * puma_prob +
                 (puma_prob == 0) * puma_adjustment)
    puma_joint[:,-2] = sum(puma_joint[:,-2]) * puma_prob

    dbc.execute('delete from %s where tract = %s and bg = %s'%(puma_table, tract, bg))
    puma_joint_dummy = str([tuple(i) for i in puma_joint])
    dbc.execute('insert into %s values %s' %(puma_table, puma_joint_dummy[1:-1]))
    dbc.close()
    db.commit()
开发者ID:sfcta,项目名称:popgen1,代码行数:27,代码来源:adjusting_sample_joint_distribution.py

示例3: predictSoft

    def predictSoft(self, X):
        """
        This method makes a "soft" nearest-neighbor prediction on test data.

        Parameters
        ----------
        X : M x N numpy array 
            M = number of testing instances; N = number of features.  
        """
        mtr,ntr = arr(self.X_train).shape      # get size of training data
        mte,nte = arr(X).shape                 # get size of test data
        if nte != ntr:
            raise ValueError('Training and prediction data must have same number of features')
        
        num_classes = len(self.classes)
        prob = np.zeros((mte,num_classes))     # allocate memory for class probabilities
        K = min(self.K, mtr)                   # (can't use more neighbors than training data points)
        for i in range(mte):                   # for each test example...
            # ...compute sum of squared differences...
            dist = np.sum(np.power(self.X_train - arr(X)[i,:], 2), axis=1)
            # ...find nearest neighbors over training data and keep nearest K data points
            sorted_dist = np.sort(dist, axis=0)[0:K]                
            indices = np.argsort(dist, axis=0)[0:K]             
            wts = np.exp(-self.alpha * sorted_dist)
            count = []
            for c in range(len(self.classes)):
                # total weight of instances of that classes
                count.append(np.sum(wts[self.Y_train[indices] == self.classes[c]]))
            count = np.asarray(count)
            prob[i,:] = np.divide(count, np.sum(count))       # save (soft) results
        return prob
开发者ID:TullyHanson,项目名称:YelpChallenge,代码行数:31,代码来源:knn.py

示例4: predict

    def predict(self, X):
        """
        This method makes a nearest neighbor prediction on test data X.
    
        Parameters
        ----------
        X : numpy array 
            N x M numpy array that contains N data points with M features. 
        """
        ntr,mtr = arr(self.X_train).shape              # get size of training data
        nte,mte = arr(X).shape                         # get size of test data

        if m_tr != m_te:
            raise ValueError('knnRegress.predict: training and prediction data must have the same number of features')

        Y_te = np.tile(self.Y_train[0], (n_te, 1))     # make Y_te the same data type as Y_train
        K = min(self.K, n_tr)                          # can't have more than n_tr neighbors

        for i in range(n_te):
            dist = np.sum(np.power((self.X_train - X[i]), 2), axis=1)  # compute sum of squared differences
            sorted_dist = np.sort(dist, axis=0)[:K]           # find nearest neihbors over X_train and...
            sorted_idx = np.argsort(dist, axis=0)[:K]         # ...keep nearest K data points
            wts = np.exp(-self.alpha * sorted_dist)
            Y_te[i] = arr(wts) * arr(self.Y_train[sorted_idx]).T / np.sum(wts)  # weighted average

        return Y_te
开发者ID:TullyHanson,项目名称:YelpChallenge,代码行数:26,代码来源:knn.py

示例5: drawing_housing_units_nogqs

def drawing_housing_units_nogqs(db, frequencies, weights, index_matrix, sp_matrix, pumano = 0):

    dbc = db.cursor()
    dbc.execute('select hhlduniqueid from hhld_sample group by hhlduniqueid')
    hhld_colno = dbc.rowcount

    hh_colno = hhld_colno
    synthetic_population=[]
    j = 0
    for i in index_matrix[:hh_colno,:]:
        if i[1] == i[2] and frequencies[j]>0:
            synthetic_population.append([sp_matrix[i[1]-1, 2] , frequencies[j], i[0]])
        else:
            cumulative_weights = weights[sp_matrix[i[1]-1:i[2], 2]].cumsum()
            probability_distribution = cumulative_weights / cumulative_weights[-1]
            probability_lower_limit = probability_distribution.tolist()
            probability_lower_limit.insert(0,0)
            probability_lower_limit = arr(probability_lower_limit)
            random_numbers = random.rand(frequencies[j])
            freq, probability_lower_limit = histogram(random_numbers, probability_lower_limit)
            hhldid_by_type = sp_matrix[i[1]-1:i[2],2]

            for k in range(len(freq)):
                if freq[k]<>0:
                    #hhid = hhidRowDict[hhldid_by_type[k]]
                    # storing the matrix row no, freq, type
                    synthetic_population.append([hhldid_by_type[k], freq[k], i[0]])
        j = j + 1

    dbc.close()
    db.commit()
    return arr(synthetic_population, int)
开发者ID:christianurich,项目名称:VIBe2UrbanSim,代码行数:32,代码来源:drawing_households.py

示例6: init_weights

	def init_weights(self, sizes, init='zeros', X=None, Y=None):
		"""
		This method initializes the weights of the neural network.
		Set layer sizes to S = [Ninput, N1, N2, ... Noutput] and set
		using 'fast' method ('none', 'random', 'zeros'). Refer to
		constructor doc string for argument descriptions.

		TODO:
			implement autoenc
			implement regress
		"""
		init = init.lower()

		if init == 'none':
			pass				# no init: do nothing

		elif init == 'zeros':
			self.wts = arr([np.zeros((sizes[i + 1], sizes[i] + 1)) for i in range(len(sizes) - 1)], dtype=object)

		elif init == 'random':
			self.wts = arr([.25 * np.random.randn(sizes[i + 1], sizes[i] + 1) for i in range(len(sizes) - 1)], dtype=object)

		elif init == 'autoenc':
			pass

		elif init == 'regress':
			pass

		else:
			raise ValueError('NNetRegress.init_weights: \'' + init + '\' is not a valid argument for init')
开发者ID:austinsherron,项目名称:Python-Machine-Learning,代码行数:30,代码来源:nnet_regress.py

示例7: plot_lum

def plot_lum():
    clf()
    j_3min = [8052.06, 3050.04, 324.251, 20082.0, 1443.05, 1070.26, 1879.54, 3210.33, 312.932, 233.877, 714.423, 112.846, 126.616]
    j_3min2 = [8052.06, 3050.04, 324.251, 1443.05, 1070.26, 1879.54, 3210.33, 312.932, 233.877, 714.423, 112.846, 126.616]
    j_3min3 = [3050.04, 324.251, 1443.05, 1070.26, 1879.54, 3210.33, 312.932, 233.877, 714.423, 112.846, 126.616]

    j_3min = [8052.06, 3050.04, 324.251, 20082.0, 1443.05, 1070.26, 1879.54, 3210.33, 312.932, 233.877, 714.423, 188.211, 1594, 57.29, 833466.82317]

    #convert to cgs from microjansky:
    j_3min = arr(j_3min)*10**(-29)

    #convert to AB magnitude:
    j_3min = -2.5*numpy.log10(j_3min) - 48.60
    
    hist(j_3min,13)
    xlabel('$m_j$', fontsize=28)
    ylabel('Number', fontsize=28)
    yticks(arr([0, 1., 2., 3., 4.]))
    ax = matplotlib.pyplot.gca()
    ax.set_xlim(ax.get_xlim()[::-1]) # reversing the xlimits
    savefig('Lum_dist.eps')

    clf()
 #   hist(j_3min,20,cumulative=True, histtype='step')
  #  hist(j_3min2,20,cumulative=True, histtype='step')
   # hist(j_3min3,20,cumulative=True, histtype='step')
    #ylim(0,14)
    #xlim(-1000,22000)
    #xlabel('J Flux at 3 Minutes (Micro Jansky)')
  #  savefig('lum_dist.eps')
    return j_3min
开发者ID:qmorgan,项目名称:qsoft,代码行数:31,代码来源:scatter_plotter.py

示例8: __init__

    def __init__(self, *args, **kwargs):
        """
        Constructor for treeRegress (decision tree regression model)

        Parameters: see "train" function; calls "train" if arguments passed

        Properties (internal use only)
           L,R : indices of left & right child nodes in the tree
           F,T : feature index & threshold for decision (left/right) at this node
                 for leaf nodes, T[n] holds the prediction for leaf node n
        """
        self.L = arr([0])           # indices of left children
        self.R = arr([0])           # indices of right children
        self.F = arr([0])           # feature to split on (-1 = leaf = predict)
        self.T = arr([0])           # threshold to split on (prediction value if leaf)
        
        
        self.information_gain = dict()
        self.nX = dict() #keeps track of remaining data on that branch
        self.nY = dict() #left branch and right branch
#        self.bestval = dict()
        self.div = defaultdict(list) #        [best_feat,best_thresh]
        self.gain = defaultdict(int) #        best_val

         
        if len(args) or len(kwargs):     # if we were given optional arguments,
            self.train(*args, **kwargs)    #  just pass them through to "train"
开发者ID:exzacktlee,项目名称:ml_final_project,代码行数:27,代码来源:dtree_maxLeaves27.py

示例9: load_data_from_csv

def load_data_from_csv(csv_path, label_index, trans_func=lambda x: x):
	"""
	Function that loads from a CSV into main memory.

	Parameters
	----------
	csv_path : str
		Path to CSV file that contains data.
	label_indes : int
		The index in the CSV rows that contains the label
		for each data point.
	trans_func : function object
		Function that transform values in CSV, i.e.: str -> int.

	Returns
	-------
	data,labels : (list)
		Tuple that contains a list of data points (index 0) and
		a list of labels corresponding to thos data points (index 1).
	"""
	data = []
	labels = []

	with open(csv_path) as f:
		csv_data = reader(f)
	
		for row in csv_data:
			row = list(map(trans_func, row))

			labels.append(row.pop(label_index))
			data.append(row)

	return arr(data),arr(labels)
开发者ID:austinsherron,项目名称:Python-Machine-Learning,代码行数:33,代码来源:data.py

示例10: addVars

 def addVars(self):
     bus,branch,_,_, n,nl,_,_,_,_,gens = self.data + self.aux
     if self.verbose: print 'defining variables'
     INF = 1e100
     if self.solver == 'cplex':
         p = ['p_%d'%i for i in gens]
         a = ['a_%d'%i for i in gens]
         D = ['D_%d'%i for i in bus]
         t = ['t_%d'%i for i in bus]
         m = ['m{}'.format(i['id']) for i in branch] 
         s = ['s{}'.format(i['id']) for i in branch]
         self.M.variables.add(names = p + a)
         self.M.variables.add(names = D + t, lb = [-INF]*2*n)
         #self.M.variables.add(names = m, lb = [-INF]*nl)
         #self.M.variables.add(names = s)
         self.M.variables.add(names = m + s, lb = [-INF]*2*nl)
         D, t = arr(D), arr(t)
         self.var = (p, a, D, t, m, s)
     else:
         p = {i: self.M.addVar(name='pbar_%d'%i) for i in gens}
         a = {i: self.M.addVar(name='alpha_%d'%i) for i in gens}
         D = {i: self.M.addVar(lb=-INF, name='delta_%d'%i) for i in bus}
         t = {i: self.M.addVar(lb=-INF, name='theta_%d'%i) for i in bus}
         m = {i['id']: self.M.addVar(lb=-INF, name='fbar{}'.format(i['id'])) for 
                 i in branch}
         s = {i['id']: self.M.addVar(lb=-INF, name='std{}'.format(i['id'])) for 
                 i in branch}
         self.var = (p, a, D, t, m, s)
         self.M.update()
开发者ID:sharnett,项目名称:ccopf,代码行数:29,代码来源:ccopf.py

示例11: __dectree_train

    def __dectree_train(self, X, Y, L, R, F, T, next, depth, minParent, maxDepth, minScore, nFeatures):
        """
        This is a recursive helper method that recusively trains the decision tree. Used in:
            train

        TODO:
            compare for numerical tolerance
        """
        n,d = mat(X).shape

        # check leaf conditions...
        if n < minParent or depth >= maxDepth or np.var(Y) < minScore:
            assert n != 0, ('TreeRegress.__dectree_train: tried to create size zero node')
            return self.__output_leaf(Y, n, L, R, F, T, next)

        best_val = np.inf
        best_feat = -1
        try_feat = np.random.permutation(d)

        # ...otherwise, search over (allowed) features
        for i_feat in try_feat[0:nFeatures]:
            dsorted = arr(np.sort(X[:,i_feat].T)).ravel()                       # sort data...
            pi = np.argsort(X[:,i_feat].T)                                      # ...get sorted indices...
            tsorted = Y[pi].ravel()                                             # ...and sort targets by feature ID
            can_split = np.append(arr(dsorted[:-1] != dsorted[1:]), 0)          # which indices are valid split points?

            if not np.any(can_split):          # no way to split on this feature?
                continue

            # find min weighted variance among split points
            val,idx = self.__min_weighted_var(tsorted, can_split, n)

            # save best feature and split point found so far
            if val < best_val:
                best_val = val
                best_feat = i_feat
                best_thresh = (dsorted[idx] + dsorted[idx + 1]) / 2

        # if no split possible, output leaf (prediction) node
        if best_feat == -1:         
            return self.__output_leaf(Y, n, L, R, F, T, next)

        # split data on feature i_feat, value (tsorted[idx] + tsorted[idx + 1]) / 2
        F[next] = best_feat
        T[next] = best_thresh
        go_left = X[:,F[next]] < T[next]
        my_idx = next
        next += 1

        # recur left
        L[my_idx] = next    
        L,R,F,T,next = self.__dectree_train(X[go_left,:], Y[go_left], L, R, F, T, 
            next, depth + 1, minParent, maxDepth, minScore, nFeatures)

        # recur right
        R[my_idx] = next    
        L,R,F,T,next = self.__dectree_train(X[np.logical_not(go_left),:], Y[np.logical_not(go_left)], L, R, F, T, 
            next, depth + 1, minParent, maxDepth, minScore, nFeatures)

        return (L,R,F,T,next)
开发者ID:exzacktlee,项目名称:ml_final_project,代码行数:60,代码来源:dtree.py

示例12: MixedN

def MixedN(ls):
    """
    ls: a list of either lists or dictionaries.
    """
    
    if (len(ls)==1):
        if type(ls[0])==list:
            return [item/float(sum(ls[0])) for item in ls[0]]
        elif type(ls[0])==dict:
            return {key:value/float(sum(ls[0].values())) for key, value in ls[0].items()}

    lamb = 1.0/len(ls)
    if (sum([type(it)==list for it in ls])==len(ls)):
        total=arr([0]*len(ls[0]));
        for it in ls:
            total= total + arr([n/float(sum(it)) for n in it])
        mix = total*lamb
        return mix

    elif (sum([type(it)==dict for it in ls])==len(ls)):
        keys=set([])
        for it in ls:
            keys.update(set(it.keys()))
        mix={key:sum([(float(1)/sum(it.values()))*it.get(key, 0)*lamb for it in ls]) for key in keys}
        return mix
开发者ID:wazaahhh,项目名称:bayesLearn,代码行数:25,代码来源:Entro.py

示例13: __init__

	def __init__(self, X=None, Y=None, min_parent=2, max_depth=np.inf, min_score=-1, n_features=None):
		"""
		Constructor for TreeRegressor (decision tree regression model).

		Parameters
		----------
		X : numpy array 
			N x M numpy array which contains N data points with M features.
		Y : numpy array 
			1 x N numpy array that contains values the relate to the data
		  	points in X.
		min_parent : int 
			Minimum number of data required to split a node. 
		min_score : int 
			Minimum value of score improvement to split a node.
		max_depth : int 
			Maximum depth of the decision tree. 
		n_features : int 
			Number of available features for splitting at each node.
		"""
		self.L = arr([0])			# indices of left children
		self.R = arr([0])			# indices of right children
		self.F = arr([0])			# feature to split on (-1 = leaf = predict)
		self.T = arr([0])			# threshold to split on (prediction value if leaf)
	
		if type(X) is np.ndarray and type(Y) is np.ndarray:					
			self.train(X, Y, min_parent, max_depth, min_score, n_features)	# train if data is provided
开发者ID:austinsherron,项目名称:Python-Machine-Learning,代码行数:27,代码来源:tree_regress.py

示例14: __min_weighted_var

	def __min_weighted_var(self, tsorted, can_split, n):
		"""
		This is a helper method that finds the minimum weighted variance
		among all split points. Used in:
			__dectree_train
		"""
		# compute mean up to and past position j (for j = 0..n)
		y_cum_to = np.cumsum(tsorted, axis=0)
		y_cum_pa = y_cum_to[-1] - y_cum_to
		mean_to = y_cum_to / arr(range(1, n + 1))		
		mean_pa = y_cum_pa / arr(list(range(n - 1, 0, -1)) + [1])

		# compute variance up to, and past position j (for j = 0..n)
		y2_cum_to = np.cumsum(np.power(tsorted, 2), axis=0)
		y2_cum_pa = y2_cum_to[-1] - y2_cum_to
		var_to = (y2_cum_to - 2 * mean_to * y_cum_to + list(range(1, n + 1)) * np.power(mean_to, 2)) / list(range(1, n + 1))
		var_pa = (y2_cum_pa - 2 * mean_pa * y_cum_pa + list(range(n - 1, -1, -1)) * np.power(mean_pa, 2)) / arr(list(range(n - 1, 0, -1)) + [1])
		var_pa[-1] = np.inf

		# find minimum weighted variance among all split points
		weighted_variance = arr(range(1, n + 1)) / n * var_to + arr(range(n - 1, -1, -1)) / n * var_pa
		val = np.nanmin((weighted_variance + 1) / (can_split + 1e-100))			# nan versions of min functions must be used to ignore nans
		idx = np.nanargmin((weighted_variance + 1) / (can_split + 1e-100))		# find only splittable points

		return (val,idx)
开发者ID:austinsherron,项目名称:Python-Machine-Learning,代码行数:25,代码来源:tree_regress.py

示例15: test_bothModels

	def test_bothModels(self):
		fun1 = functions.DistanceToCircle(arr([ 10,  10]), .5)
		fun2 = functions.DistanceToCircle(arr([-10, -10]), 5)
		set = dfo_model.MultiFunctionModel([fun1, fun2], self.b, self.center, self.radius)
		set.improve(None)
		center = arr([3,4])

		for i in range(50):
			print("testing " + str(i) + " of " + str(50))
			rFactor = self.getRFactor()
			newRadius = set.modelRadius * rFactor
			center = center + set.modelRadius / newRadius
			set.testNewModelCenter(center)
			set.setNewModelCenter(center)
			set.multiplyRadius(rFactor)
			set.improve('images/test_both_%04d_improve.png' % i)

			quadmod1 = set.getQuadraticModels(arr([0, 1], int))
			quadmod2 = set.getQuadraticModels2(arr([0, 1], int))
			for j in range(10):
				x = center + 10 * (2 * random.random(2) - 1)
				y1 = quadmod1.evaluate(x)
				y2 = quadmod2.evaluate(x)

				self.assertTrue(norm(y1 - y2) < self.tolerance)

				y1 = quadmod1.jacobian(x)
				y2 = quadmod2.jacobian(x)

				self.assertTrue(norm(y1 - y2) < self.tolerance)
开发者ID:tlhallock,项目名称:line-search-dfo,代码行数:30,代码来源:run_Model.py


注:本文中的numpy.arr函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。