当前位置: 首页>>代码示例>>Python>>正文


Python FunctionSet.embed方法代码示例

本文整理汇总了Python中chainer.FunctionSet.embed方法的典型用法代码示例。如果您正苦于以下问题:Python FunctionSet.embed方法的具体用法?Python FunctionSet.embed怎么用?Python FunctionSet.embed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.FunctionSet的用法示例。


在下文中一共展示了FunctionSet.embed方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: FunctionSet

# 需要导入模块: from chainer import FunctionSet [as 别名]
# 或者: from chainer.FunctionSet import embed [as 别名]
    rawim = np.copy(im).astype('uint8')
    
    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    
    # Convert to BGR
    im = im[::-1, :, :]

    im = im - MEAN_VALUES
    return rawim.transpose(2, 0, 1).astype(np.float32)

#Model Preparation
print "preparing caption generation models"
model = FunctionSet()
model.img_feature2vec=F.Linear(image_feature_dim, n_units)#CNN(I)の最後のレイヤーに相当。#parameter  W,b
model.embed=F.EmbedID(len(vocab), n_units)#W_e*S_tに相当 #parameter  W
model.l1_x=F.Linear(n_units, 4 * n_units)#parameter  W,b
model.l1_h=F.Linear(n_units, 4 * n_units)#parameter  W,b
model.out=F.Linear(n_units, len(vocab))#parameter  W,b

serializers.load_hdf5(model_place, model)

#To GPU
if gpu_id >= 0:
    model.to_gpu()
print "done"

#Define Newtowork (Forward)

#forward_one_step is after the CNN layer, 
#h0 is n_units dimensional vector (embedding)
开发者ID:TakumiIwata,项目名称:chainer_caption_generation,代码行数:33,代码来源:generate_caption.py

示例2: FunctionSet

# 需要导入模块: from chainer import FunctionSet [as 别名]
# 或者: from chainer.FunctionSet import embed [as 别名]
    
    rawim = np.copy(im).astype('uint8')
    
    # Shuffle axes to c01
    im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)
    
    # Convert to BGR
    im = im[::-1, :, :]

    im = im - MEAN_VALUES
    return rawim.transpose(2, 0, 1).astype(np.float32)

#Model Preparation
model = FunctionSet()
model.img_feature2vec=F.Linear(image_feature_dim, n_units)
model.embed=F.EmbedID(len(vocab), n_units)
model.l1_x=F.Linear(n_units, 4 * n_units)#parameter  W,b
model.l1_h=F.Linear(n_units, 4 * n_units)#parameter  W,b
model.out=F.Linear(n_units, len(vocab))#parameter  W,b

serializers.load_hdf5(model_place, model)

#To GPU
if gpu_id >= 0:
    model.to_gpu()

#Define Newtowork (Forward)

#forward_one_step is after the CNN layer, 
#h0 is n_units dimensional vector (embedding)
def forward_one_step(cur_word, state, volatile='on'):
开发者ID:acharyaomkar01,项目名称:VidScribe,代码行数:33,代码来源:generate_caption_googlenet.py

示例3: Caption_generator

# 需要导入模块: from chainer import FunctionSet [as 别名]
# 或者: from chainer.FunctionSet import embed [as 别名]
class Caption_generator(object):
    def __init__(self,caption_model_place,cnn_model_place,index2word_place,gpu_id=-1,beamsize=3):
        #basic paramaters you need to modify
        self.gpu_id=gpu_id# GPU ID. if you want to use cpu, -1
        self.beamsize=beamsize

        #Gpu Setting
        global xp
        if self.gpu_id >= 0:
            xp = cuda.cupy 
            cuda.get_device(gpu_id).use()
        else:
            xp=np

        # Prepare dataset
        with open(index2word_place, 'r') as f:
            self.index2word = pickle.load(f)
        vocab=self.index2word

        #Load Caffe Model
        with open(cnn_model_place, 'r') as f:
            self.func = pickle.load(f)

        #Model Preparation
        image_feature_dim=1024#dimension of image feature
        self.n_units = 512  #number of units per layer
        n_units = 512 
        self.model = FunctionSet()
        self.model.img_feature2vec=F.Linear(image_feature_dim, n_units)#CNN(I)の最後のレイヤーに相当。#parameter  W,b
        self.model.embed=F.EmbedID(len(vocab), n_units)#W_e*S_tに相当 #parameter  W
        self.model.l1_x=F.Linear(n_units, 4 * n_units)#parameter  W,b
        self.model.l1_h=F.Linear(n_units, 4 * n_units)#parameter  W,b
        self.model.out=F.Linear(n_units, len(vocab))#parameter  W,b
        serializers.load_hdf5(caption_model_place, self.model)#read pre-trained model

        #To GPU
        if gpu_id >= 0:
            model.to_gpu()
            func.to_gpu()

        #to avoid overflow.
        #I don't know why, but this model overflows at the first time only with CPU.
        #So I intentionally make overflow so that it never happns after that.
        if gpu_id < 0:
            numpy_image = np.ones((3, 224,224), dtype=np.float32)
            self.generate(numpy_image)

    def feature_exractor(self,x_chainer_variable): #to extract image feature by CNN.
        y, = self.func(inputs={'data': x_chainer_variable}, outputs=['pool5/7x7_s1'],
                      disable=['loss1/ave_pool', 'loss2/ave_pool','loss3/classifier'],
                      train=False)
        return y

    def forward_one_step_for_image(self,img_feature, state, volatile='on'):
        x = img_feature#img_feature is chainer.variable.
        h0 = self.model.img_feature2vec(x)
        h1_in = self.model.l1_x(F.dropout(h0,train=False)) + self.model.l1_h(state['h1'])
        c1, h1 = F.lstm(state['c1'], h1_in)
        y = self.model.out(F.dropout(h1,train=False))#don't forget to change drop out into non train mode.
        state = {'c1': c1, 'h1': h1}
        return state, F.softmax(y)

    #forward_one_step is after the CNN layer, 
    #h0 is n_units dimensional vector (embedding)
    def forward_one_step(self,cur_word, state, volatile='on'):
        x = chainer.Variable(cur_word, volatile)
        h0 = self.model.embed(x)
        h1_in = self.model.l1_x(F.dropout(h0,train=False)) + self.model.l1_h(state['h1'])
        c1, h1 = F.lstm(state['c1'], h1_in)
        y = self.model.out(F.dropout(h1,train=False)) 
        state = {'c1': c1, 'h1': h1}
        return state, F.softmax(y)

    def beam_search(self,sentence_candidates,final_sentences,depth=1,beamsize=3):
        volatile=True
        next_sentence_candidates_temp=list()
        for sentence_tuple in sentence_candidates:
            cur_sentence=sentence_tuple[0]
            cur_index=sentence_tuple[0][-1]
            cur_index_xp=xp.array([cur_index],dtype=np.int32)
            cur_state=sentence_tuple[1]
            cur_log_likely=sentence_tuple[2]

            state, predicted_word = self.forward_one_step(cur_index_xp,cur_state, volatile=volatile)
            predicted_word_np=cuda.to_cpu(predicted_word.data)
            top_indexes=(-predicted_word_np).argsort()[0][:beamsize]

            for index in np.nditer(top_indexes):
                index=int(index)
                probability=predicted_word_np[0][index]
                next_sentence=copy.deepcopy(cur_sentence)
                next_sentence.append(index)
                log_likely=math.log(probability)
                next_log_likely=cur_log_likely+log_likely
                next_sentence_candidates_temp.append((next_sentence,state,next_log_likely))# make each sentence tuple

        prob_np_array=np.array([sentence_tuple[2] for sentence_tuple in next_sentence_candidates_temp])
        top_candidates_indexes=(-prob_np_array).argsort()[:beamsize]
        next_sentence_candidates=list()
        for i in top_candidates_indexes:
#.........这里部分代码省略.........
开发者ID:yuyay,项目名称:chainer_caption_generation,代码行数:103,代码来源:caption_generator.py


注:本文中的chainer.FunctionSet.embed方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。