当前位置: 首页>>代码示例>>Python>>正文


Python numpy.zeros_like函数代码示例

本文整理汇总了Python中numpy.zeros_like函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_like函数的具体用法?Python zeros_like怎么用?Python zeros_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了zeros_like函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dateRange

def dateRange(first, last):

    # Type check, float --> int
    if isinstance(first[0], float):
        temp = np.zeros_like(first, dtype='int')
        for i in xrange(temp.size):
            temp[i] = first[i]
        first = tuple(temp)

    if isinstance(last[0], float):
        temp = np.zeros_like(last, dtype='int')
        for i in xrange(temp.size):
            temp[i] = last[i]
        last = tuple(temp)

    # Initialize date dictionary
    dateList = {}

    # Populate dictionary
    first = dt.datetime(*first[:6])
    last = dt.datetime(*last[:6])
    n = (last + dt.timedelta(days=1) - first).days
    dateList['year'] = np.array([(first + dt.timedelta(days=i)).year for i in xrange(n)])
    dateList['month'] = np.array([(first + dt.timedelta(days=i)).month for i in xrange(n)])
    dateList['day'] = np.array([(first + dt.timedelta(days=i)).day for i in xrange(n)])

    return dateList
开发者ID:DAESCG,项目名称:example_sql_repo,代码行数:27,代码来源:write_rmm_db.py

示例2: viterbi_decode

def viterbi_decode(score, transition_params):
  """Decode the highest scoring sequence of tags outside of TensorFlow.

  This should only be used at test time.

  Args:
    score: A [seq_len, num_tags] matrix of unary potentials.
    transition_params: A [num_tags, num_tags] matrix of binary potentials.

  Returns:
    viterbi: A [seq_len] list of integers containing the highest scoring tag
        indicies.
    viterbi_score: A float containing the score for the Viterbi sequence.
  """
  trellis = np.zeros_like(score)
  backpointers = np.zeros_like(score, dtype=np.int32)
  trellis[0] = score[0]

  for t in range(1, score.shape[0]):
    v = np.expand_dims(trellis[t - 1], 1) + transition_params
    trellis[t] = score[t] + np.max(v, 0)
    backpointers[t] = np.argmax(v, 0)

  viterbi = [np.argmax(trellis[-1])]
  for bp in reversed(backpointers[1:]):
    viterbi.append(bp[viterbi[-1]])
  viterbi.reverse()

  viterbi_score = np.max(trellis[-1])
  return viterbi, viterbi_score
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:30,代码来源:crf.py

示例3: reg

def reg(psf_model, parms):
    """
    Regularization and derivative.
    """
    eps = parms.eps
    if (eps is None):
        return np.zeros_like(psf_model)

    psf_shape = psf_model.shape
    d = np.zeros_like(psf_model)
    r = np.zeros_like(psf_model)
    for i in range(psf_shape[0]):
        for j in range(psf_shape[1]): 
            if i > 0:
                r[i, j] += (psf_model[i, j] - psf_model[i - 1, j]) ** 2.
                d[i, j] += 2. * (psf_model[i, j] - psf_model[i - 1, j]) 
            if j > 0:
                r[i, j] += (psf_model[i, j] - psf_model[i, j - 1]) ** 2.
                d[i, j] += 2. * (psf_model[i, j] - psf_model[i, j - 1]) 
            if i < psf_shape[0] - 1:
                r[i, j] += (psf_model[i, j] - psf_model[i + 1, j]) ** 2.
                d[i, j] += 2. * (psf_model[i, j] - psf_model[i + 1, j]) 
            if j < psf_shape[1] - 1:
                r[i, j] += (psf_model[i, j] - psf_model[i, j + 1]) ** 2.
                d[i, j] += 2. * (psf_model[i, j] - psf_model[i, j + 1]) 
    r *= eps
    d *= eps
    return r, d
开发者ID:rossfadely,项目名称:wfc3psf,代码行数:28,代码来源:derivatives.py

示例4: finalize

    def finalize(self):
        """Calculates the flux, inverse variance and resolution for this spectrum.

        Uses the accumulated data from all += operations so far but does not prevent
        further accumulation.  This is the expensive step in coaddition so we make
        it something that you have to call explicitly.  If you forget to do this,
        the flux,ivar,resolution attributes will be None.

        If the coadded resolution matrix is not invertible, a warning message is
        printed and the returned flux vector is zero (but ivar and resolution are
        still valid).
        """
        # Convert to a dense matrix if necessary.
        if scipy.sparse.issparse(self.Cinv):
            self.Cinv = self.Cinv.todense()
        # What pixels are we using?
        mask = (np.diag(self.Cinv) > 0)
        keep = np.arange(len(self.Cinv_f))[mask]
        keep_t = keep[:,np.newaxis]
        # Initialize the results to zero.
        self.flux = np.zeros_like(self.Cinv_f)
        self.ivar = np.zeros_like(self.Cinv_f)
        R = np.zeros_like(self.Cinv)
        # Calculate the deconvolved flux,ivar and resolution for ivar > 0 pixels.
        self.ivar[mask],R[keep_t,keep] = decorrelate(self.Cinv[keep_t,keep])
        try:
            R_it = scipy.linalg.inv(R[keep_t,keep].T)
            self.flux[mask] = R_it.dot(self.Cinv_f[mask])/self.ivar[mask]
        except np.linalg.linalg.LinAlgError:
            self.log.warning('resolution matrix is singular so no coadded fluxes available.')
        # Convert R from a dense matrix to a sparse one.
        self.resolution = desispec.resolution.Resolution(R)
开发者ID:desihub,项目名称:desispec,代码行数:32,代码来源:coaddition.py

示例5: totalvalue

def totalvalue(cash_ini,orderform,valueform):
    
    trades = pd.read_csv(orderform,header=None,sep=',')
    trades = trades.dropna(axis = 1, how='all')
    trades.columns = ['Year','Month','Day','Symbol','Order','Share']
    dateall = []
    for i in np.arange(len(trades.Year)):
        dateall.append(dt.datetime(trades['Year'][i],trades['Month'][i],trades['Day'][i],16))
    dateall = pd.to_datetime(dateall)
    trades=trades.drop(['Year','Month','Day'],axis=1)
    trades['Date']=dateall
    trades.set_index('Date',inplace=True)
    
    ls_symbols = []
    for symbol in trades.Symbol:
        if symbol not in ls_symbols:
            ls_symbols.append(symbol)
            
    startdate = dateall[0]
    enddate = dateall[-1]
    dt_timeofday = dt.timedelta(hours=16)
    ldt_timestamps = du.getNYSEdays(startdate,enddate+dt_timeofday,dt_timeofday)
    ls_keys = 'close'
    c_dataobj = da.DataAccess('Yahoo')
    price = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
    orders = price*np.NaN
    orders = orders.fillna(0)
    for i in np.arange(len(trades.index)):
        ind = trades.index[i]
        if trades.ix[i,'Order']=='Buy':
            orders.loc[ind,trades.ix[i,'Symbol']]+=trades.ix[i,'Share']
        else:
            orders.loc[ind,trades.ix[i,'Symbol']]+=-trades.ix[i,'Share']
    #    keys = ['price','orders']
    #    trading_table = pd.concat([ldf_data,orders],keys=keys,axis=1)
    cash = np.zeros(np.size(price[ls_symbols[0]]),dtype=np.float)
    cash[0] = cash_ini
    # updating the cash value
    for i in np.arange(len(orders.index)):
        if i == 0: 
            cash[i] = cash[i] - pd.Series.sum(price.ix[i,:]*orders.ix[i,:])
        else:
            cash[i] = cash[i-1] - pd.Series.sum(price.ix[i,:]*orders.ix[i,:])
    # updating ownership
    ownership = orders*np.NaN
    for i in np.arange(len(orders.index)):
        ownership.ix[i,:]=orders.ix[:i+1,:].sum(axis=0) 
        
    # updating total portofolio value
    value = np.zeros_like(cash)
    for i in np.arange(len(ownership.index)):
        value[i] = pd.Series.sum(price.ix[i,:]*ownership.ix[i,:]) 
    keys = ['price','orders','ownership']
    trading_table = pd.concat([price,orders,ownership],keys = keys, axis=1)
    trading_table[('value','CASH')]=cash
    trading_table[('value','STOCK')]=value
    total = np.zeros_like(cash)
    total = cash + value
    trading_table[('value','TOTAL')]=total
    trading_table[('value','TOTAL')].to_csv(valueform)
开发者ID:yesufeng,项目名称:computational-investing,代码行数:60,代码来源:trading.py

示例6: fit_deriv

    def fit_deriv(cls, x, amplitude, x_0, width):
        """One dimensional Box model derivative with respect to parameters"""

        d_amplitude = cls.evaluate(x, 1, x_0, width)
        d_x_0 = np.zeros_like(x)
        d_width = np.zeros_like(x)
        return [d_amplitude, d_x_0, d_width]
开发者ID:robcross,项目名称:astropy,代码行数:7,代码来源:functional_models.py

示例7: dataVtk_3dMatrix

def dataVtk_3dMatrix(points,bounds,vectors):
    """
    Function that turns a vtk output formated data to 3d field matrix data
    from [(x1,y1,z1),...,(xn,yn,zn)]
    to [[[[x1,y1,z1],[...],[x3,y1,z1]],[[x1,y2,z1],[...],[...]],[[x1,y3,z1],[...],[...]]]
    ,[[[x1,y1,z2],[...],[...]],[...],[...]] , [.........]]    
    -points => list of the coordinates of the poitns where the data is located.
    -bounds => bounds of the data.(Xmin,Xmax,Ymin,Ymax,Zmin,Zmax)
    -vectors => vector data of the field at the 'points'
    """
    #asign variables
    (xmin,xmax,ymin,ymax,zmin,zmax) = bounds
        
    #generate the output arrays
    grid3d = N.mgrid[zmin:zmax+1, ymin:ymax+1, xmin:xmax+1]
    pnts3d = N.zeros_like(grid3d[0],dtype= N.ndarray)
    vect3d = N.zeros_like(grid3d[0],dtype= N.ndarray)
    
    #loop and rearange
    for i in range(len(points)):
        x_t = points[i][0]
        y_t = points[i][1]
        z_t = points[i][2]
        pnts3d[z_t+zmax][y_t+ymax][x_t+xmax] = points[i]
        vect3d[z_t+zmax][y_t+ymax][x_t+xmax] = vectors[i]
        
    return {'points':pnts3d,'vectors':vect3d}
开发者ID:carlosloslas,项目名称:PyVortexInfoVisualisation,代码行数:27,代码来源:VTK_ReadVf3d.py

示例8: __init__

    def __init__(self, network, **kwargs):
        # due to the way that theano handles updates, we cannot update a
        # parameter twice during the same function call. so, instead of handling
        # everything in the updates for self.f_learn(...), we split the
        # parameter updates into two function calls. the first "prepares" the
        # parameters for the gradient computation by moving the entire model one
        # step according to the current velocity. then the second computes the
        # gradient at that new model position and performs the usual velocity
        # and parameter updates.

        self.params = network.params(**kwargs)
        self.momentum = kwargs.get('momentum', 0.5)

        # set up space for temporary variables used during learning.
        self._steps = []
        self._velocities = []
        for param in self.params:
            v = param.get_value()
            n = param.name
            self._steps.append(theano.shared(np.zeros_like(v), name=n + '_step'))
            self._velocities.append(theano.shared(np.zeros_like(v), name=n + '_vel'))

        # step 1. move to the position in parameter space where we want to
        # compute our gradient.
        prepare = []
        for param, step, velocity in zip(self.params, self._steps, self._velocities):
            prepare.append((step, self.momentum * velocity))
            prepare.append((param, param + step))

        logging.info('compiling NAG adjustment function')
        self.f_prepare = theano.function([], [], updates=prepare)

        super(NAG, self).__init__(network, **kwargs)
开发者ID:majidaldo,项目名称:theano-nets,代码行数:33,代码来源:trainer.py

示例9: get_jk_coulomb

def get_jk_coulomb(mol, dm, hermi=1, coulomb_allow='SSSS',
                   opt_llll=None, opt_ssll=None, opt_ssss=None):
    if coulomb_allow.upper() == 'LLLL':
        logger.info(mol, 'Coulomb integral: (LL|LL)')
        j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
        n2c = j1.shape[1]
        vj = numpy.zeros_like(dm)
        vk = numpy.zeros_like(dm)
        vj[...,:n2c,:n2c] = j1
        vk[...,:n2c,:n2c] = k1
    elif coulomb_allow.upper() == 'SSLL' \
      or coulomb_allow.upper() == 'LLSS':
        logger.info(mol, 'Coulomb integral: (LL|LL) + (SS|LL)')
        vj, vk = _call_veff_ssll(mol, dm, hermi, opt_ssll)
        j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
        n2c = j1.shape[1]
        vj[...,:n2c,:n2c] += j1
        vk[...,:n2c,:n2c] += k1
    else: # coulomb_allow == 'SSSS'
        logger.info(mol, 'Coulomb integral: (LL|LL) + (SS|LL) + (SS|SS)')
        vj, vk = _call_veff_ssll(mol, dm, hermi, opt_ssll)
        j1, k1 = _call_veff_llll(mol, dm, hermi, opt_llll)
        n2c = j1.shape[1]
        vj[...,:n2c,:n2c] += j1
        vk[...,:n2c,:n2c] += k1
        j1, k1 = _call_veff_ssss(mol, dm, hermi, opt_ssss)
        vj[...,n2c:,n2c:] += j1
        vk[...,n2c:,n2c:] += k1
    return vj, vk
开发者ID:pengdl,项目名称:pyscf,代码行数:29,代码来源:dhf.py

示例10: sor

def sor(A, b):
    sol = []
    
    n = len(A)
    D = np.zeros_like(A)
    L = np.zeros_like(A)
    
    for i in range(0,n):
        D[i][i] = A[i][i];
        
    for i in range(0,n):
        for j in range(0,i):
            L[i][j] = -A[i][j];
    
    omega = omegafind(A,D)   
    Q = D/omega -L
    Tj = np.linalg.inv(Q).dot(Q-A)
    c = np.linalg.inv(Q).dot(b)
    x = np.zeros_like(b)
    

    for itr in range(ITERATION_LIMIT):
        x=Tj.dot(x) + c;

    sol = x
    
    return list(sol)
开发者ID:chaikt12,项目名称:UECM3033_assign2,代码行数:27,代码来源:task1.py

示例11: filter_frames

 def filter_frames(self, data):
     data = data[0]
     lp = gaussian_filter(data, 100)
     hp = data - lp # poormans background subtraction
     hp -= np.min(hp)
     sh = hp.shape
     print "here"
     hp = hp.astype('uint32')
     hp = flex.int(hp)
     print "here now"
     
     mask = flex.bool(np.ones_like(hp).astype('bool'))
     print "here now"
     result1 = flex.bool(np.zeros_like(hp).astype('bool'))
     spots = np.zeros_like(hp).astype('bool')
     print "here now"
     
     for i in range(3, self.parameters['spotsize'], 5):
         print "here now"
         algorithm = DispersionThreshold(sh, (i, i), 1, 1, 0, -1)
         print "here now"
         print type(hp), type(mask), type(result1)
         thing = algorithm(hp, mask, result1)
         print "here now"
         spots = spots + result1.as_numpy_array()
     return [data, spots*data]
开发者ID:rcatwood,项目名称:Savu,代码行数:26,代码来源:dials_find_spots.py

示例12: initialize_adam

def initialize_adam(parameters):
    '''
    初始化v和s,他们都是字典类型的向量,都包含了以下字段
    -key:'dW1','db1',...'dWL','dbL'
    -values:与对应的梯度/参数相同维度的值为零的numpy矩阵
    
    :param parameters: -包含了以下参数的字典变量
            parameters['W'+str(l)] = W1
            parameters['b'+str(l)] = bl
    :return: 
    v - 包含梯度的指数加权平均值,字段如下:
        v['dW'+str(l)] = ...
        v['db'+str(l)] = ...
    s - 包含平方梯度的指数加权平均值,字段如下:
        s['dW'+str(l)] = ...
        s['db'+str(l)] = ...
    '''

    L = len(parameters)//2
    v= {}
    s = {}

    for l in range(L):
        v['dW'+str(l+1)] = np.zeros_like(parameters['W'+str(l+1)])
        v['db'+str(l+1)] = np.zeros_like(parameters['b'+str(l+1)])

        s['dW'+str(l+1)] = np.zeros_like(parameters['W'+str(l+1)])
        s['db'+str(l+1)] = np.zeros_like(parameters['b'+str(l+1)])

    return(v,s)
开发者ID:491811030,项目名称:hellow-world,代码行数:30,代码来源:work_1.py

示例13: compute_normals

def compute_normals(im_pos, n_offset=3):
    """
    Converts an XYZ image to a Normal Image
    --Input--
    im_pos : ndarray (NxMx3)
        Image with x/y/z values for each pixel
    n_offset : int
        Smoothness factor for calculating the gradient
    --Output--
    normals : ndarray (NxMx3)
        Image with normal vectors for each pixel
    """
    gradients_x = np.zeros_like(im_pos)
    gradients_y = np.zeros_like(im_pos)
    for i in range(3):
        gradients_x[:, :, i], gradients_y[:, :, i] = np.gradient(im_pos[:, :, i], n_offset)

    gradients_x /= np.sqrt(np.sum(gradients_x**2, -1))[:, :, None]
    gradients_y /= np.sqrt(np.sum(gradients_y**2, -1))[:, :, None]

    normals = np.cross(gradients_x.reshape([-1, 3]),
                       gradients_y.reshape([-1, 3])).reshape(im_pos.shape)
    normals /= np.sqrt(np.sum(normals**2, -1))[:, :, None]
    normals = np.nan_to_num(normals)

    return normals
开发者ID:MerDane,项目名称:pyKinectTools,代码行数:26,代码来源:Normals.py

示例14: calculate_feed_tonnage_constrain

    def calculate_feed_tonnage_constrain(self,schedule,opening = None,closing = None):
        if opening is None:
            opening = np.zeros(self.ndp,dtype=np.int)
        else:
            assert(schedule.shape[0] == opening.shape[0])
            
        if closing is None:
            closing = np.zeros(self.ndp,dtype=np.int)
            closing[:] = self.nperiods - 1
        else:
            assert(schedule.shape[0] == closing.shape[0])


        production_period = self.calculate_feed_tonnage(schedule,opening,closing)

        #calculate the deviation from feed targets
        #logger.debug("minimum_feed_production=%f",self.minimum_feed_production)
        #logger.debug("maximum_feed_production=%f",self.maximum_feed_production)

        minp = np.zeros_like(production_period)
        indices = np.where(production_period < self.minimum_feed_production)[0]
        if len(indices) > 0:
            minp[indices] = self.minimum_feed_production - production_period[indices]

        maxp = np.zeros_like(production_period)
        indices = np.where(production_period > self.maximum_feed_production)[0]
        if len(indices) > 0:
            maxp[indices] = production_period[indices] - self.maximum_feed_production

            
        return tuple(maxp) + tuple(minp)
开发者ID:exepulveda,项目名称:phd_coding,代码行数:31,代码来源:bcproblem.py

示例15: predictionToPosition

	def predictionToPosition(self,pi, dim = 64):
		pirescale = np.expand_dims(pi, axis=1)
		pirescale = np.append(pirescale, np.zeros_like(pirescale), axis=1)
		positions = np.zeros_like(pirescale)
		positions[:,0] = pirescale[:,0] // dim
		positions[:,1] = pirescale[:,0] % dim
		return positions
开发者ID:bruzat,项目名称:starcraft-reinforcement-learning,代码行数:7,代码来源:beacon.py


注:本文中的numpy.zeros_like函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。