本文整理汇总了Python中alex.components.slu.da.DialogueActConfusionNetwork类的典型用法代码示例。如果您正苦于以下问题:Python DialogueActConfusionNetwork类的具体用法?Python DialogueActConfusionNetwork怎么用?Python DialogueActConfusionNetwork使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DialogueActConfusionNetwork类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build_confusion_network
def _build_confusion_network(self, sampled_da_items):
confusion_net = DialogueActConfusionNetwork()
for da_items, probs in sampled_da_items:
for dai, prob in zip(da_items, probs):
confusion_net.add_merge(prob, dai)
return confusion_net
示例2: _build_confusion_network
def _build_confusion_network(self, sampled_da_items):
'''Build confusion network from a list containing DialgoueActItem and their observation probability.'''
confusion_net = DialogueActConfusionNetwork()
for da_items, probs in sampled_da_items:
for dai, prob in zip(da_items, probs):
confusion_net.add_merge(prob, dai)
return confusion_net
示例3: test_prune
def test_prune(self):
dacn = DialogueActConfusionNetwork()
dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)'))
dacn.add(0.9, DialogueActItem(dai='inform(food=czech)'))
dacn.add(0.00005, DialogueActItem(dai='inform(food=russian)'))
# Russian food should be pruned.
self.assertEqual(len(dacn), 3)
dacn.prune()
self.assertEqual(len(dacn), 2)
self.assertTrue(not DialogueActItem(dai='inform(food=russian)') in dacn)
示例4: test_get_prob
def test_get_prob(self):
dacn = DialogueActConfusionNetwork()
dacn.add(0.2, DialogueActItem(dai='inform(food=chinese)'))
dacn.add(0.7, DialogueActItem(dai='inform(food=czech)'))
dacn.add(0.1, DialogueActItem(dai='inform(food=russian)'))
self.assertAlmostEqual(dacn._get_prob([0, 1, 1]), 0.2 * 0.3 * 0.9)
self.assertAlmostEqual(dacn._get_prob([0, 0, 0]), 0.2 * 0.7 * 0.1)
示例5: test_sort
def test_sort(self):
dacn = DialogueActConfusionNetwork()
dacn.add(0.05, DialogueActItem(dai='inform(food=chinese)'))
dacn.add(1.0, DialogueActItem(dai='inform(food=czech)'))
dacn.add(0.00005, DialogueActItem(dai='inform(food=russian)'))
dacn.sort()
cn = list(dacn)
self.assertEqual(cn[0][1], DialogueActItem(dai='inform(food=czech)'))
self.assertEqual(cn[1][1], DialogueActItem(dai='inform(food=chinese)'))
self.assertEqual(cn[2][1], DialogueActItem(dai='inform(food=russian)'))
示例6: test_add_merge
def test_add_merge(self):
dai = DialogueActItem(dai='inform(food=chinese)')
dacn = DialogueActConfusionNetwork()
dacn.add_merge(0.5, dai, combine='add')
self.assertEqual(dacn._get_prob([0]), 0.5)
dacn.add_merge(0.5, dai, combine='add')
self.assertEqual(dacn._get_prob([0]), 1.0)
示例7: last_talked_about
def last_talked_about(self, user_da, system_da):
"""This adds dialogue act items to support inference of the last slots the user talked about."""
old_user_da = deepcopy(user_da)
new_user_da = DialogueActConfusionNetwork()
for prob, user_dai in user_da:
new_user_dais = []
lta_tsvs = self.ontology.last_talked_about(user_dai.dat, user_dai.name, user_dai.value)
for name, value in lta_tsvs:
new_user_dais.append(DialogueActItem("inform", name, value))
if new_user_dais:
for nudai in new_user_dais:
new_user_da.add(prob, nudai)
old_user_da.extend(new_user_da)
return old_user_da
示例8: _infer_last_talked_about_slots
def _infer_last_talked_about_slots(self, user_da, system_da):
"""This adds dialogue act items to support inference of the last slots the user talked about."""
old_user_da = deepcopy(user_da)
new_user_da = DialogueActConfusionNetwork()
colliding_slots = {}
done_slots = set()
for prob, user_dai in user_da:
new_user_dais = []
lta_tsvs = self.ontology.last_talked_about(user_dai.dat, user_dai.name, user_dai.value)
for name, value in lta_tsvs:
new_user_dais.append(DialogueActItem("inform", name, value))
if name in done_slots:
if not name in colliding_slots:
colliding_slots[name] = set()
colliding_slots[name].add(value)
else:
done_slots.add(name)
if new_user_dais:
for nudai in new_user_dais:
if not nudai in new_user_da:
new_user_da.add(prob, nudai)
# In case of collisions, prefer the current last talked about values if it is one of the colliding values.
# If there is a collision and the current last talked about value is not among the colliding values, do not
# consider the colliding DA's at all.
invalid_das = set()
for prob, da in set(new_user_da):
if da.name in colliding_slots and self[da.name].mpv() in colliding_slots[da.name]:
if not da.value == self[da.name].mpv():
invalid_das.add(da)
elif da.name in colliding_slots:
invalid_das.add(da)
for invalid_da in invalid_das:
new_user_da.remove(invalid_da)
old_user_da.merge(new_user_da, combine='max')
return old_user_da
示例9: _resolve_user_da_in_context
def _resolve_user_da_in_context(self, user_da, system_da):
"""Resolves and converts meaning of some user dialogue acts
given the context."""
old_user_da = deepcopy(user_da)
new_user_da = DialogueActConfusionNetwork()
if isinstance(system_da, DialogueAct):
for system_dai in system_da:
for prob, user_dai in user_da:
new_user_dai = None
if system_dai.dat == "confirm" and user_dai.dat == "affirm":
new_user_dai = DialogueActItem("inform", system_dai.name, system_dai.value)
elif system_dai.dat == "confirm" and user_dai.dat == "negate":
new_user_dai = DialogueActItem("deny", system_dai.name, system_dai.value)
elif system_dai.dat == "request" and user_dai.dat == "inform" and \
user_dai.name in self.ontology['context_resolution'] and \
system_dai.name in self.ontology['context_resolution'][user_dai.name] and \
user_dai.value == "dontcare":
new_user_dai = DialogueActItem("inform", system_dai.name, system_dai.value)
elif system_dai.dat == "request" and user_dai.dat == "inform" and \
user_dai.name in self.ontology['context_resolution'] and \
system_dai.name in self.ontology['context_resolution'][user_dai.name] and \
self.ontology.slot_has_value(system_dai.name, user_dai.value):
new_user_dai = DialogueActItem("inform", system_dai.name, user_dai.value)
elif system_dai.dat == "request" and system_dai.name != "" and \
user_dai.dat == "affirm" and self.ontology.slot_is_binary(system_dai.name):
new_user_dai = DialogueActItem("inform", system_dai.name, "true")
elif system_dai.dat == "request" and system_dai.name != "" and \
user_dai.dat == "negate" and self.ontology.slot_is_binary(system_dai.name):
new_user_dai = DialogueActItem("inform", system_dai.name, "false")
if new_user_dai:
new_user_da.add(prob, new_user_dai)
old_user_da.merge(new_user_da, combine='max')
return old_user_da
示例10: parse_nblist
def parse_nblist(self, obs, *args, **kwargs):
"""
Parses an observation featuring an utterance n-best list using the
parse_1_best method.
Arguments:
obs -- a dictionary of observations
:: observation type -> observed value
where observation type is one of values for `obs_type' used in
`ft_props', and observed value is the corresponding observed
value for the input
args -- further positional arguments that should be passed to the
`parse_1_best' method call
kwargs -- further keyword arguments that should be passed to the
`parse_1_best' method call
"""
nblist = obs['utt_nbl']
if len(nblist) == 0:
return DialogueActConfusionNetwork()
obs_wo_nblist = copy.deepcopy(obs)
del obs_wo_nblist['utt_nbl']
dacn_list = []
for prob, utt in nblist:
if "_other_" == utt:
dacn = DialogueActConfusionNetwork()
dacn.add(1.0, DialogueActItem("other"))
elif "_silence_" == utt:
dacn = DialogueActConfusionNetwork()
dacn.add(1.0, DialogueActItem("silence"))
else:
obs_wo_nblist['utt'] = utt
dacn = self.parse_1_best(obs_wo_nblist, *args, **kwargs)
dacn_list.append((prob, dacn))
dacn = merge_slu_confnets(dacn_list)
dacn.prune()
dacn.sort()
return dacn
示例11: main
def main():
# initialize tracker and state
slots = ["food", "location"]
tr = DSTCTracker(slots)
state = DSTCState(slots)
state.pprint()
# try to update state with some information
print '---'
cn = DialogueActConfusionNetwork()
cn.add(0.3, DialogueActItem("inform", "food", "chinese"))
cn.add(0.1, DialogueActItem("inform", "food", "indian"))
tr.update_state(state, cn)
state.pprint()
# try to deny some information
print '---'
cn.add(0.9, DialogueActItem("deny", "food", "chinese"))
cn.add(0.1, DialogueActItem("deny", "food", "indian"))
tr.update_state(state, cn)
state.pprint()
示例12: test_get_platform_res_da
def test_get_platform_res_da(self):
hdc_policy = self._build_policy()
state = DeterministicDiscriminativeDialogueState(self.cfg, self.ontology)
system_input = DialogueActConfusionNetwork()
res = hdc_policy.get_da(state)
user_input = DialogueActConfusionNetwork()
user_input.add(1.0, DialogueActItem(dai='info(task=find_platform)'))
user_input.add(1.0, DialogueActItem(dai='inform(from_stop=Praha)'))
user_input.add(1.0, DialogueActItem(dai='inform(to_stop=Brno)'))
state.update(user_input, system_input)
res = hdc_policy.get_da(state)
self.assert_('inform(not_supported)' in res)
示例13: process_pending_commands
def process_pending_commands(self):
"""Process all pending commands.
Available commands:
stop() - stop processing and exit the process
flush() - flush input buffers.
Now it only flushes the input connection.
Return True if the process should terminate.
"""
while self.commands.poll():
command = self.commands.recv()
if self.cfg['DM']['debug']:
self.cfg['Logging']['system_logger'].debug(command)
if isinstance(command, Command):
#Thanh:
if command.parsed['__name__'] == 'print_log_dir':
print '===***===session-log-dir:', command.source
if command.parsed['__name__'] == 'stop':
return True
if command.parsed['__name__'] == 'flush':
# discard all data in in input buffers
while self.slu_hypotheses_in.poll():
data_in = self.slu_hypotheses_in.recv()
self.dm.end_dialogue()
self.commands.send(Command("flushed()", 'DM', 'HUB'))
return False
#if command.parsed['__name__'] == 'prepare_new_dialogue':
#self.dm.new_dialogue()
if command.parsed['__name__'] == 'new_dialogue':
self.dm.new_dialogue()#thanh change???
self.epilogue_state = None
self.cfg['Logging']['session_logger'].turn("system")
self.dm.log_state()
# I should generate the first DM output
da = self.dm.da_out()
if self.cfg['DM']['debug']:
s = []
s.append("DM Output")
s.append("-"*60)
s.append(unicode(da))
s.append("")
s = '\n'.join(s)
self.cfg['Logging']['system_logger'].debug(s)
self.cfg['Logging']['session_logger'].dialogue_act("system", da)
self.commands.send(DMDA(da, 'DM', 'HUB'))
return False
if command.parsed['__name__'] == 'end_dialogue':
self.dm.end_dialogue()
return False
if command.parsed['__name__'] == 'timeout':
# check whether there is a looong silence
# if yes then inform the DM
silence_time = command.parsed['silence_time']
cn = DialogueActConfusionNetwork()
cn.add(1.0, DialogueActItem('silence','time', silence_time))
# process the input DA
self.dm.da_in(cn)
self.cfg['Logging']['session_logger'].turn("system")
self.dm.log_state()
print '----Time out: ', self.epilogue_state, silence_time
'''Thanh
if self.epilogue_state == 'give_code':
# an cant_apply act have been chosen
self.cfg['Logging']['session_logger'].dialogue_act("system", self.epilogue_da)
self.commands.send(DMDA(self.epilogue_da, 'DM', 'HUB'))
self.commands.send(Command('hangup()', 'DM', 'HUB'))
return False
#'''
if self.epilogue_state and float(silence_time) > 5.0:
if self.epilogue_state == 'final_question': # and self.final_question_repeated<16:
da = DialogueAct('say(text="{text}")'.format(text="Sorry, did you get the correct information?"))
#self.final_question_repeated += 1
self.cfg['Logging']['session_logger'].dialogue_act("system", da)
self.commands.send(DMDA(da, 'DM', 'HUB'))
#.........这里部分代码省略.........
示例14: parse_1_best
def parse_1_best(self, obs, verbose=False, *args, **kwargs):
"""Parse an utterance into a dialogue act.
:rtype DialogueActConfusionNetwork
"""
utterance = obs['utt']
if isinstance(utterance, UtteranceHyp):
# Parse just the utterance and ignore the confidence score.
utterance = utterance.utterance
if verbose:
print 'Parsing utterance "{utt}".'.format(utt=utterance)
res_cn = DialogueActConfusionNetwork()
dict_da = self.utt2da.get(unicode(utterance), None)
if dict_da:
for dai in DialogueAct(dict_da):
res_cn.add(1.0, dai)
return res_cn
utterance = self.preprocessing.normalise_utterance(utterance)
abutterance, category_labels = self.abstract_utterance(utterance)
if verbose:
print 'After preprocessing: "{utt}".'.format(utt=abutterance)
print category_labels
self.parse_non_speech_events(utterance, res_cn)
utterance = utterance.replace_all(['_noise_'], '').replace_all(['_laugh_'], '').replace_all(['_ehm_hmm_'], '').replace_all(['_inhale_'], '')
abutterance = abutterance.replace_all(['_noise_'], '').replace_all(['_laugh_'], '').replace_all(['_ehm_hmm_'], '').replace_all(['_inhale_'], '')
abutterance = self.handle_false_abstractions(abutterance)
category_labels.add('CITY')
category_labels.add('VEHICLE')
category_labels.add('NUMBER')
if len(res_cn) == 0:
if 'STOP' in category_labels:
self.parse_stop(abutterance, res_cn)
if 'CITY' in category_labels:
self.parse_city(abutterance, res_cn)
if 'NUMBER' in category_labels:
self.parse_number(abutterance)
if any([word.startswith("TIME") for word in abutterance]):
category_labels.add('TIME')
if 'TIME' in category_labels:
self.parse_time(abutterance, res_cn)
if 'DATE_REL' in category_labels:
self.parse_date_rel(abutterance, res_cn)
if 'AMPM' in category_labels:
self.parse_ampm(abutterance, res_cn)
if 'VEHICLE' in category_labels:
self.parse_vehicle(abutterance, res_cn)
if 'TASK' in category_labels:
self.parse_task(abutterance, res_cn)
self.parse_meta(utterance, res_cn)
res_cn.merge()
return res_cn
示例15: parse_1_best
def parse_1_best(self, obs, verbose=False):
"""Parse an utterance into a dialogue act."""
utterance = obs['utt']
if isinstance(utterance, UtteranceHyp):
# Parse just the utterance and ignore the confidence score.
utterance = utterance.utterance
# print 'Parsing utterance "{utt}".'.format(utt=utterance)
if verbose:
print 'Parsing utterance "{utt}".'.format(utt=utterance)
if self.preprocessing:
# the text normalisation
utterance = self.preprocessing.normalise_utterance(utterance)
abutterance, category_labels = self.abstract_utterance(utterance)
if verbose:
print 'After preprocessing: "{utt}".'.format(utt=abutterance)
print category_labels
else:
category_labels = dict()
# handle false positive alarms of abstraction
abutterance = abutterance.replace(('STOP=Metra',), ('metra',))
abutterance = abutterance.replace(('STOP=Nádraží',), ('nádraží',))
abutterance = abutterance.replace(('STOP=SME',), ('sme',))
abutterance = abutterance.replace(('STOP=Bílá Hora', 'STOP=Železniční stanice',), ('STOP=Bílá Hora', 'železniční stanice',))
abutterance = abutterance.replace(('TIME=now','bych', 'chtěl'), ('teď', 'bych', 'chtěl'))
abutterance = abutterance.replace(('STOP=Čím','se'), ('čím', 'se',))
abutterance = abutterance.replace(('STOP=Lužin','STOP=Na Chmelnici',), ('STOP=Lužin','na','STOP=Chmelnici',))
abutterance = abutterance.replace(('STOP=Konečná','zastávka'), ('konečná', 'zastávka',))
abutterance = abutterance.replace(('STOP=Konečná','STOP=Anděl'), ('konečná', 'STOP=Anděl',))
abutterance = abutterance.replace(('STOP=Konečná stanice','STOP=Ládví'), ('konečná', 'stanice', 'STOP=Ládví',))
abutterance = abutterance.replace(('STOP=Výstupní', 'stanice', 'je'), ('výstupní', 'stanice', 'je'))
abutterance = abutterance.replace(('STOP=Nová','jiné'), ('nové', 'jiné',))
abutterance = abutterance.replace(('STOP=Nová','spojení'), ('nové', 'spojení',))
abutterance = abutterance.replace(('STOP=Nová','zadání'), ('nové', 'zadání',))
abutterance = abutterance.replace(('STOP=Nová','TASK=find_connection'), ('nový', 'TASK=find_connection',))
abutterance = abutterance.replace(('z','CITY=Liberk',), ('z', 'CITY=Liberec',))
abutterance = abutterance.replace(('do','CITY=Liberk',), ('do', 'CITY=Liberec',))
abutterance = abutterance.replace(('pauza','hrozně','STOP=Dlouhá',), ('pauza','hrozně','dlouhá',))
abutterance = abutterance.replace(('v','STOP=Praga',), ('v', 'CITY=Praha',))
abutterance = abutterance.replace(('na','STOP=Praga',), ('na', 'CITY=Praha',))
abutterance = abutterance.replace(('po','STOP=Praga', 'ale'), ('po', 'CITY=Praha',))
abutterance = abutterance.replace(('jsem','v','STOP=Metra',), ('jsem', 'v', 'VEHICLE=metro',))
category_labels.add('CITY')
category_labels.add('VEHICLE')
# print 'After preprocessing: "{utt}".'.format(utt=abutterance)
# print category_labels
res_cn = DialogueActConfusionNetwork()
self.parse_non_speech_events(utterance, res_cn)
if len(res_cn) == 0:
# remove non speech events, they are not relevant for SLU
abutterance = abutterance.replace_all('_noise_', '').replace_all('_laugh_', '').replace_all('_ehm_hmm_', '').replace_all('_inhale_', '')
if 'STOP' in category_labels:
self.parse_stop(abutterance, res_cn)
if 'CITY' in category_labels:
self.parse_city(abutterance, res_cn)
if 'TIME' in category_labels:
self.parse_time(abutterance, res_cn)
if 'DATE_REL' in category_labels:
self.parse_date_rel(abutterance, res_cn)
if 'AMPM' in category_labels:
self.parse_ampm(abutterance, res_cn)
if 'VEHICLE' in category_labels:
self.parse_vehicle(abutterance, res_cn)
if 'TASK' in category_labels:
self.parse_task(abutterance, res_cn)
self.parse_meta(utterance, res_cn)
res_cn.merge()
return res_cn