当前位置: 首页>>代码示例>>Python>>正文


Python tensor.as_tensor函数代码示例

本文整理汇总了Python中theano.tensor.as_tensor函数的典型用法代码示例。如果您正苦于以下问题:Python as_tensor函数的具体用法?Python as_tensor怎么用?Python as_tensor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了as_tensor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: maxpool_3D

def maxpool_3D(input, ds, ignore_border=False):
   
    #input.dimshuffle (0, 2, 1, 3, 4)   # convert to make video in back. 
    # no need to reshuffle. 
    if input.ndim < 3:
        raise NotImplementedError('max_pool_3d requires a dimension >= 3')

    # extract nr dimensions
    vid_dim = input.ndim
    # max pool in two different steps, so we can use the 2d implementation of 
    # downsamplefactormax. First maxpool frames as usual. 
    # Then maxpool the time dimension. Shift the time dimension to the third 
    # position, so rows and cols are in the back


    # extract dimensions
    frame_shape = input.shape[-2:]
    
    # count the number of "leading" dimensions, store as dmatrix
    batch_size = T.prod(input.shape[:-2])
    batch_size = T.shape_padright(batch_size,1)
    
    # store as 4D tensor with shape: (batch_size,1,height,width)
    new_shape = T.cast(T.join(0, batch_size,
                                        T.as_tensor([1,]), 
                                        frame_shape), 'int32')
    input_4D = T.reshape(input, new_shape, ndim=4)

    # downsample mini-batch of videos in rows and cols
    op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)          # so second and third dimensions of ds are for height and width
    output = op(input_4D)
    # restore to original shape                                     
    outshape = T.join(0, input.shape[:-2], output.shape[-2:])
    out = T.reshape(output, outshape, ndim=input.ndim)

    # now maxpool time
    # output (time, rows, cols), reshape so that time is in the back
    shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
    input_time = out.dimshuffle(shufl)
    # reset dimensions
    vid_shape = input_time.shape[-2:]
    
    # count the number of "leading" dimensions, store as dmatrix
    batch_size = T.prod(input_time.shape[:-2])
    batch_size = T.shape_padright(batch_size,1)
    
    # store as 4D tensor with shape: (batch_size,1,width,time)
    new_shape = T.cast(T.join(0, batch_size,
                                        T.as_tensor([1,]), 
                                        vid_shape), 'int32')
    input_4D_time = T.reshape(input_time, new_shape, ndim=4)
    # downsample mini-batch of videos in time
    op = DownsampleFactorMax((1,ds[0]), ignore_border)            # Here the time dimension is downsampled. 
    outtime = op(input_4D_time)
    # output 
    # restore to original shape (xxx, rows, cols, time)
    outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
    shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
    #rval = T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
    return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:kli-nlpr,项目名称:Convolutional-Neural-Networks,代码行数:60,代码来源:core.py

示例2: _initial_part_matrix

 def _initial_part_matrix(self, part, size, deterministic):
     if size is None:
         size = 1
     length, dist_name, dist_map = self._choose_alternative(
         part,
         (self.local_size, self.initial_dist_local_name, self.initial_dist_local_map),
         (self.global_size, self.initial_dist_global_name, self.initial_dist_global_map)
     )
     dtype = self.symbolic_initial_global_matrix.dtype
     if length == 0:  # in this case theano fails to compute sample of correct size
         return tt.ones((size, 0), dtype)
     length = tt.as_tensor(length)
     size = tt.as_tensor(size)
     shape = tt.stack((size, length))
     # apply optimizations if possible
     if not isinstance(deterministic, tt.Variable):
         if deterministic:
             return tt.ones(shape, dtype) * dist_map
         else:
             return getattr(self._rng, dist_name)(shape)
     else:
         sample = getattr(self._rng, dist_name)(shape)
         initial = tt.switch(
             deterministic,
             tt.ones(shape, dtype) * dist_map,
             sample
         )
         return initial
开发者ID:aasensio,项目名称:pymc3,代码行数:28,代码来源:opvi.py

示例3: theano_scan_color

def theano_scan_color(writer, draw_fn):
	with writer as writer_buf:
		writer_buf_reshaped = writer_buf.reshape((Screen.screen_vane_count, Screen.screen_max_magnitude, 3))
		vane_matrix = [[[float(vane), float(vane), float(vane)] for px in range(Screen.screen_max_magnitude)]
					   for vane in range(Screen.screen_vane_count)]
		px_matrix =   [[[float(px),float(px),float(px)] for px in range(Screen.screen_max_magnitude)]
					   for vane in range(Screen.screen_vane_count)]
		col_matrix =  [[[float(0), float(1), float(2)] for px in range(Screen.screen_max_magnitude)]
					   for vane in range(Screen.screen_vane_count)]
		vane_vec = T.as_tensor(vane_matrix)
		px_vec = T.as_tensor(px_matrix)
		col_vec = T.as_tensor(col_matrix)
		step = T.fscalar('step')

		draw_fn_with_step = draw_fn(step)
		f, _ = theano.map(draw_fn_with_step, [vane_vec, px_vec, col_vec])

		fn_actual = theano.function([step], f, allow_input_downcast=True, on_unused_input='ignore')

		step_actual = 0
		while True:
			writer.frame_ready()
			start = time.time()
			writer_buf_reshaped[:] = fn_actual(step_actual)
			step_actual -= 1
			done = time.time()
			fps = 1.0/(done - start)
			if fps < TARGET_FPS:
				logging.warning('Frame rate is %f, which is lower than target %d', fps, TARGET_FPS)
开发者ID:jarrahl,项目名称:skyscreen,代码行数:29,代码来源:theano_examples.py

示例4: get_aggregator

    def get_aggregator(self):
        initialized = shared_like(0.)
        numerator_acc = shared_like(self.numerator)
        denominator_acc = shared_like(self.denominator)

        # Dummy default expression to use as the previously-aggregated
        # value, that has the same shape as the new result
        numerator_zeros = tensor.as_tensor(self.numerator).zeros_like()
        denominator_zeros = tensor.as_tensor(self.denominator).zeros_like()

        conditional_update_num = self.numerator + ifelse(initialized,
                                                         numerator_acc,
                                                         numerator_zeros)
        conditional_update_den = self.denominator + ifelse(initialized,
                                                           denominator_acc,
                                                           denominator_zeros)

        initialization_updates = [(numerator_acc,
                                   tensor.zeros_like(numerator_acc)),
                                  (denominator_acc,
                                   tensor.zeros_like(denominator_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(numerator_acc,
                                 conditional_update_num),
                                (denominator_acc,
                                 conditional_update_den),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(numerator_acc /
                                                  denominator_acc))
        return aggregator
开发者ID:AdityoSanjaya,项目名称:blocks,代码行数:33,代码来源:aggregation.py

示例5: test_chunk_unchunk_grad2

def test_chunk_unchunk_grad2():
  n_time = 101
  n_batch = 3
  n_dim = 5
  numpy.random.seed(1234)
  _x = numpy.random.randn(n_time, n_batch, n_dim).astype(f32)
  _Dx2 = numpy.random.randn(n_time, n_batch, n_dim).astype(f32)
  _index = numpy.ones((n_time, n_batch), dtype="int8")
  x = T.as_tensor(_x)
  Dx2 = T.as_tensor(_Dx2)
  index = T.as_tensor(_index)
  chunk_size = 11
  chunk_step = 7

  out, oindex = chunk(x, index=index, chunk_size=chunk_size, chunk_step=chunk_step)
  chunk_op = NativeOp.Chunking().make_op()
  assert type(out.owner.op) is type(chunk_op)

  x2, index2, factors = unchunk(out, index=oindex, chunk_size=chunk_size, chunk_step=chunk_step, n_time=x.shape[0], n_batch=x.shape[1])
  unchunk_op = NativeOp.UnChunking().make_op()
  assert type(x2.owner.op) is type(unchunk_op)

  Dout, _, _, _, _, _ = unchunk_op.grad(x2.owner.inputs, (Dx2, None, None))
  Dx, _, _, _, _ = chunk_op.grad(out.owner.inputs, (Dout, None))
  _Dx = Dx.eval()
  assert_almost_equal(_Dx, _Dx2)
开发者ID:rwth-i6,项目名称:returnn,代码行数:26,代码来源:test_NativeOp_chunk.py

示例6: dynamic_kmaxPooling

    def dynamic_kmaxPooling(self, curConv_out, k):
        neighborsForPooling = TSN.images2neibs(ten4=curConv_out, neib_shape=(1,curConv_out.shape[3]), mode='ignore_borders')
        self.neighbors = neighborsForPooling

        neighborsArgSorted = T.argsort(neighborsForPooling, axis=1)
        kNeighborsArg = neighborsArgSorted[:,-k:]
        #self.bestK = kNeighborsArg
        kNeighborsArgSorted = T.sort(kNeighborsArg, axis=1)

        ii = T.repeat(T.arange(neighborsForPooling.shape[0]), k)
        jj = kNeighborsArgSorted.flatten()
        pooledkmaxTmp = neighborsForPooling[ii, jj]
        new_shape = T.cast(T.join(0, 
                           T.as_tensor([neighborsForPooling.shape[0]]),
                           T.as_tensor([k])),
                           'int64')
        pooledkmax_matrix = T.reshape(pooledkmaxTmp, new_shape, ndim=2)

        rightWidth=self.unifiedWidth-k            
        right_padding = T.zeros((neighborsForPooling.shape[0], rightWidth), dtype=theano.config.floatX)
        matrix_padded = T.concatenate([pooledkmax_matrix, right_padding], axis=1)      
        #recover tensor form
        new_shape = T.cast(T.join(0, curConv_out.shape[:-2],
                           T.as_tensor([curConv_out.shape[2]]),
                           T.as_tensor([self.unifiedWidth])),
                           'int64')

        curPooled_out = T.reshape(matrix_padded, new_shape, ndim=4)
                
        return curPooled_out
开发者ID:rgtjf,项目名称:DeepLearning,代码行数:30,代码来源:HKDefined.py

示例7: max_pool

def max_pool(images, imgshp, maxpoolshp):
    """Implements a max pooling layer

    Takes as input a 2D tensor of shape batch_size x img_size and
    performs max pooling.  Max pooling downsamples by taking the max
    value in a given area, here defined by maxpoolshp. Outputs a 2D
    tensor of shape batch_size x output_size.

    :param images: 2D tensor containing images on which to apply convolution.
                   Assumed to be of shape batch_size x img_size
    :param imgshp: tuple containing image dimensions
    :param maxpoolshp: tuple containing shape of area to max pool over

    :return: out1, symbolic result (2D tensor)
    :return: out2, logical shape of the output
    """
    N = numpy
    poolsize = N.int64(N.prod(maxpoolshp))

    # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
    # in the first case, default nfeatures to 1
    if N.size(imgshp) == 2:
        imgshp = (1,) + imgshp

    # construct indices and index pointers for sparse matrix, which,
    # when multiplied with input images will generate a stack of image
    # patches
    indices, indptr, spmat_shape, sptype, outshp = \
            convolution_indices.conv_eval(imgshp, maxpoolshp,
                                          maxpoolshp, mode='valid')

#    print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'
#    print 'imgshp = ', imgshp
#    print 'maxpoolshp = ', maxpoolshp
#    print 'outshp = ', outshp

    # build sparse matrix, then generate stack of image patches
    csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,
                                    indptr, spmat_shape)
    patches = sparse.structured_dot(csc, images.T).T

    pshape = tensor.stack([images.shape[0] *\
                               tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0]),
                           tensor.as_tensor(poolsize)])
    patch_stack = tensor.reshape(patches, pshape, ndim=3)

    out1 = tensor.max(patch_stack, axis=2)

    pshape = tensor.stack([images.shape[0],
                           tensor.as_tensor(N.prod(outshp)),
                           tensor.as_tensor(imgshp[0])])
    out2 = tensor.reshape(out1, pshape, ndim=3)

    out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)

    return tensor.flatten(out3, 2), outshp
开发者ID:12190143,项目名称:Theano,代码行数:57,代码来源:sp.py

示例8: _grad_single

    def _grad_single(self, ct, s, lnC2, GAMMI2):
        lnC = lnC2
        GAMMI = GAMMI2
        v = self.v#T.as_tensor(self.v)[:,ct:]
        v0 = T.as_tensor(v[v[:,0]==0, :])
        v1 = T.as_tensor(v[v[:,0]==1, :])

        cnp = v.shape[0]

        # Gradient of fE wrt the priors over final state
        [ofE, oxS], upd_fE_single = th.scan(fn=self._free_energy,
                                   sequences=v,
                                   non_sequences=[s,self.h,lnC,self.b])
        ofE0 = ofE[v0].sum()
        ofE1 = ofE[v1].sum()

        dFE0dlnC = T.jacobian(ofE0, lnC)
        dFE1dlnC = T.jacobian(ofE1, lnC)
        dFEdlnC  = T.jacobian(ofE,  lnC)
        ofE_ = T.vector()
        ofE_.tag.test_value = ofE.tag.test_value

        # Gradient of Gamma with respect to its initial condition:
        GAMMA, upd_GAMMA = th.scan(fn=self._upd_gamma,
               outputs_info=[GAMMI],
               non_sequences=[ofE, self.lambd, self.alpha, self.beta, cnp],
               n_steps=4)
        dGdg = T.grad(GAMMA[-1], GAMMI)

        dGdfE = T.jacobian(GAMMA[-1], ofE)
        dGdlnC = dGdfE.dot(dFEdlnC)

        out1 = ofE0
        out2 = ofE1
        maxout = T.max([out1, out2])

        exp_out1 = T.exp(GAMMA[-1]*(out1 - maxout))
        exp_out2 = T.exp(GAMMA[-1]*(out2 - maxout))
        norm_const = exp_out1 + exp_out2

        # Derivative wrt the second output (gammi):
        Jac1_gammi = (-(out1-out2)*dGdg*
                T.exp(GAMMA[-1]*(out1+out2 - 2*maxout))/(norm_const**2))
        Jac2_gammi = -Jac1_gammi
#        dfd1_tZ = Jac1_gammi*dCdf[1][0]+ Jac2_gammi*dCdf[1][1]

        # Derivative wrt first input (lnc)
        Jac1_lnC = (T.exp(GAMMA[-1]*(out1 + out2 - 2*maxout))/(norm_const**2)*
                  (-dGdlnC*(out1 - out2) - GAMMA[-1]*(dFE0dlnC - dFE1dlnC)))
        Jac2_lnC = -Jac1_lnC

        Jac1 = T.concatenate([T.stack(Jac1_gammi), Jac1_lnC])
        Jac2 = T.concatenate([T.stack(Jac2_gammi), Jac2_lnC])
        self.debug = [Jac1_lnC, Jac2_lnC, Jac2_gammi, Jac1_gammi, dFE0dlnC,
                      dFE1dlnC, dGdg, out1, out2, v0, v1, v, ct]
        return Jac1, Jac2
开发者ID:dcuevasr,项目名称:actinf,代码行数:56,代码来源:actinfThClass.py

示例9: symbolic_g

 def symbolic_g(self, symbolic_X_list,t):
     '''
     the gx for every state x must be a matrix with dimensions [number_of_rollouts,x_dim, control_dim]
     with x.shape = [number_of_rollouts, x_dim]
     '''
     x = symbolic_X_list[0]
     y = symbolic_X_list[1]
     gx = T.as_tensor(np.ones([1,self.control_dimensions]))
     gy = T.as_tensor(np.ones([1,self.control_dimensions]))
     return [gx,gy]
开发者ID:DoTha,项目名称:ParallelPice,代码行数:10,代码来源:Theano_LQ1.py

示例10: __init__

 def __init__(self, n_units, **kwargs):
   super(LSTMS, self).__init__(
     n_units=n_units,
     n_in=n_units * 4,  # input gate, forget gate, output gate, net input
     n_out=n_units,
     n_re=n_units * 4,
     n_act=2  # output, cell state
   )
   self.o_output = T.as_tensor(numpy.ones((n_units,), dtype='float32'))
   self.o_h = T.as_tensor(numpy.ones((n_units,), dtype='float32'))
开发者ID:rwth-i6,项目名称:returnn,代码行数:10,代码来源:NetworkRecurrentLayer.py

示例11: max_pool_3d

def max_pool_3d(input, ds, ignore_border=False):
	"""
		Takes as input a N-D tensor, where N >= 3. It downscales the input video by
		the specified factor, by keeping only the maximum value of non-overlapping
		patches of size (ds[0],ds[1],ds[2]) (time, height, width)  
		
		:type input: N-D theano tensor of input images.
		:param input: input images. Max pooling will be done over the 3 last dimensions.
		:type ds: tuple of length 3
		:param ds: factor by which to downscale. (2,2,2) will halve the video in each dimension.
		:param ignore_border: boolean value. Example when True, (5,5,5) input with ds=(2,2,2) will generate a
		(2,2,2) output. (3,3,3) otherwise.
	"""
	if input.ndim < 3:
		raise NotImplementedError('max_pool_3d requires a dimension >= 3')
		
	vid_dim = input.ndim
	#Maxpool frame
	frame_shape = input.shape[-2:]

	# count the number of "leading" dimensions, store as dmatrix
	batch_size = T.prod(input.shape[:-2])
	batch_size = T.shape_padright(batch_size,1)
	new_shape = T.cast(T.join(0, batch_size,T.as_tensor([1,]),frame_shape), 'int32')
	
	input_4D = T.reshape(input, new_shape, ndim=4)
	# downsample mini-batch of videos in rows and cols
	op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
	output = op(input_4D)
	# restore to original shape
	outshape = T.join(0, input.shape[:-2], output.shape[-2:])
	out = T.reshape(output, outshape, ndim=input.ndim)
	
	#Maxpool time 
	# output (time, rows, cols), reshape so that time is in the back
	shufl = (list(range(vid_dim-4)) + list(range(vid_dim-3,vid_dim))+[vid_dim-4])
	input_time = out.dimshuffle(shufl)
	# reset dimensions
	vid_shape = input_time.shape[-2:]
	# count the number of "leading" dimensions, store as dmatrix
	batch_size = T.prod(input_time.shape[:-2])
	batch_size = T.shape_padright(batch_size,1)
	# store as 4D tensor with shape: (batch_size,1,width,time)
	new_shape = T.cast(T.join(0, batch_size,T.as_tensor([1,]),vid_shape), 'int32')
	input_4D_time = T.reshape(input_time, new_shape, ndim=4)
	# downsample mini-batch of videos in time
	op = DownsampleFactorMax((1,ds[0]), ignore_border)
	outtime = op(input_4D_time)
	# restore to original shape (xxx, rows, cols, time)
	outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
	shufl = (list(range(vid_dim-4)) + [vid_dim-1] + list(range(vid_dim-4,vid_dim-1)))
	#shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
	return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:IITM-DONLAB,项目名称:python-dnn,代码行数:53,代码来源:max_pool.py

示例12: make_node

 def make_node(self, frames, n, axis):
     """ compute an n-point fft of frames along given axis """
     _frames = tensor.as_tensor(frames, ndim=2)
     _n = tensor.as_tensor(n, ndim=0)
     _axis = tensor.as_tensor(axis, ndim=0)
     if self.half and _frames.type.dtype.startswith('complex'):
         raise TypeError('Argument to HalfFFT must not be complex', frames)
     spectrogram = tensor.zmatrix()
     buf = generic()
     # The `buf` output is present for future work
     # when we call FFTW directly and re-use the 'plan' that FFTW creates.
     # In that case, buf would store a CObject encapsulating the plan.
     rval = Apply(self, [_frames, _n, _axis], [spectrogram, buf])
     return rval
开发者ID:ChienliMa,项目名称:Theano,代码行数:14,代码来源:fourier.py

示例13: updates

    def updates(self, gradients):
        """
        Return symbolic updates to apply given a set of gradients
        on the parameters being optimized.

        Parameters
        ----------
        gradients : list of tensor_likes
            List of symbolic gradients for the parameters contained
            in self.params, in the same order as in self.params.

        Returns
        -------
        updates : dict
            A dictionary with the shared variables in self.params as keys
            and a symbolic expression of how they are to be updated each
            SGD step as values.

        Notes
        -----
        `cost_updates` is a convenient helper function that takes all
        necessary gradients with respect to a given symbolic cost.
        """
        ups = {}
        # Add the learning rate/iteration updates
        l_ups, learn_rates = self.learning_rate_updates()
        safe_update(ups, l_ups)

        # Get the updates from sgd_updates, a PyLearn library function.
        p_up = dict(sgd_updates(self.params, gradients, learn_rates))

        # Add the things in p_up to ups
        safe_update(ups, p_up)

        # Clip the values if needed.
        # We do not want the clipping values to force an upcast
        # of the update: updates should have the same type as params
        for param, (p_min, p_max) in self.clipping_values.iteritems():
            p_min = tensor.as_tensor(p_min)
            p_max = tensor.as_tensor(p_max)
            dtype = param.dtype
            if p_min.dtype != dtype:
                p_min = tensor.cast(p_min, dtype)
            if p_max.dtype != dtype:
                p_max = tensor.cast(p_max, dtype)
            ups[param] = tensor.clip(ups[param], p_min, p_max)

        # Return the updates dictionary.
        return ups
开发者ID:jaberg,项目名称:pylearn,代码行数:49,代码来源:optimizer.py

示例14: test_2

def test_2():
  n_time = 11
  n_dim = 5
  numpy.random.seed(1234)
  _x = numpy.random.randn(n_time, n_dim).astype(f32)
  _idx = numpy.random.randint(0, n_dim, (n_time,))
  assert _idx.shape == (n_time,)
  x = T.as_tensor(_x)
  idx = T.as_tensor(_idx)
  y = subtensor_batched_index(x, idx)
  ts = T.arange(x.shape[0])
  y2 = x[ts, idx[ts]]
  _y = y.eval()
  _y2 = y2.eval()
  assert_almost_equal(_y, _y2)
开发者ID:rwth-i6,项目名称:returnn,代码行数:15,代码来源:test_NativeOp_subtensor_batched_index.py

示例15: grad

    def grad(self, inputs, dCdf):
        CT = T.as_tensor(self.ct)
        S = T.as_tensor(self.s)
        (jac1, jac2), _ = th.scan(fn=self._grad_single,
                                  sequences=[CT, S],
                                  non_sequences=[inputs[0][1:], inputs[0][0]])

#        for t in self.ct:
#            out = self._grad_single(t, s)

#        Jac1 = T.reshape(jac1, newshape=(1,-1))
#        Jac2 = T.reshape(jac2, newshape=(1,-1))
        Jac = T.concatenate([jac1, jac2], axis=0)
#        return Jac1*dCdf[0][0] + Jac2*dCdf[0][1],
        return Jac.T.dot(dCdf[0]),
开发者ID:dcuevasr,项目名称:actinf,代码行数:15,代码来源:actinfThClass.py


注:本文中的theano.tensor.as_tensor函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。