本文整理汇总了Python中feature_extractor.FeatureExtractor.featurize方法的典型用法代码示例。如果您正苦于以下问题:Python FeatureExtractor.featurize方法的具体用法?Python FeatureExtractor.featurize怎么用?Python FeatureExtractor.featurize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类feature_extractor.FeatureExtractor
的用法示例。
在下文中一共展示了FeatureExtractor.featurize方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: AgentE2ERLAllAct
# 需要导入模块: from feature_extractor import FeatureExtractor [as 别名]
# 或者: from feature_extractor.FeatureExtractor import featurize [as 别名]
#.........这里部分代码省略.........
if self.num_updates%DISPF==0: self._print_progress(loss, t_elap, kl_loss, x_loss)
else:
loss = self.update(regime='RL')
t_elap = time.time() - tst
if self.num_updates%DISPF==0: self._print_progress(loss, t_elap)
if self.num_updates%SAVEF==0: self.save_model(dialog_config.MODEL_PATH+self._name)
self.state = {}
self.state['database'] = pkl.loads(pkl.dumps(self.database,-1))
self.state['prevact'] = '[email protected]'
self.state['inform_slots'] = self._init_beliefs()
self.state['turn'] = 0
self.state['num_requests'] = {s:0 for s in self.state['database'].slots}
self.state['slot_tracker'] = set()
self.state['dont_care'] = set()
p_db_i = (1./self.state['database'].N)*np.ones((self.state['database'].N,))
self.state['init_entropy'] = calc_entropies(self.state['inform_slots'], p_db_i,
self.state['database'])
self.state['inputs'] = []
self.state['actions'] = []
self.state['rewards'] = []
self.state['indices'] = []
self.state['ptargets'] = []
self.state['phitargets'] = []
self.state['hid_state'] = [np.zeros((1,self.r_hid)).astype('float32') \
for s in dialog_config.inform_slots]
self.state['pol_state'] = np.zeros((1,self.n_hid)).astype('float32')
''' get next action based on rules '''
def next(self, user_action, verbose=False):
self.state['turn'] += 1
p_vector = np.zeros((self.in_size,)).astype('float32')
p_vector[:self.feat_extractor.n] = self.feat_extractor.featurize( \
user_action['nl_sentence'])
if self.state['turn']>1:
pr_act = self.state['prevact'].split('@')
assert pr_act[0]!='inform', 'Agent called after informing!'
act_id = dialog_config.inform_slots.index(pr_act[1])
p_vector[self.feat_extractor.n+act_id] = 1
p_vector = np.expand_dims(np.expand_dims(p_vector, axis=0), axis=0)
p_vector = standardize(p_vector)
p_targets = []
phi_targets = []
if self.training and self.num_updates<self.pol_start:
self._update_state(user_action['nl_sentence'], upd=self.upd, verbose=verbose)
db_probs = self._check_db()
H_db = tools.entropy_p(db_probs)
H_slots = calc_entropies(self.state['inform_slots'], db_probs, self.state['database'])
# act on policy but train on expert
pp = np.zeros((len(dialog_config.inform_slots)+1,))
for i,s in enumerate(dialog_config.inform_slots):
pp[i] = H_slots[s]
pp[-1] = H_db
pp = np.expand_dims(np.expand_dims(pp, axis=0), axis=0)
_, action = self._rule_act(pp, db_probs)
act, _, p_out, hid_out, p_db = self._prob_act(p_vector, mode='sample')
for s in dialog_config.inform_slots:
p_s = self.state['inform_slots'][s]/self.state['inform_slots'][s].sum()
p_targets.append(p_s)
if s in self.state['dont_care']:
phi_targets.append(np.ones((1,)).astype('float32'))
else:
phi_targets.append(np.zeros((1,)).astype('float32'))