-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathAgent.py
1693 lines (1538 loc) · 99.5 KB
/
Agent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
__author__ = 'jesse'
import copy
import math
import numpy as np
import operator
import os
import pickle
import random
import signal
class Agent:
# Takes an instantiated, trained parser, a knowledge base grounder, an input/output instance, and
# a (possibly empty) list of oidxs to be considered active for perceptual dialog questions.
# The optional no_clarify argument can be a list of roles which the agent does not use clarification
# dialog questions to confirm, but rather takes the current maximum belief as the correct argument
# when sampling from the belief state.
def __init__(self, parser, grounder, io, active_train_set, no_clarify=None,
use_shorter_utterances=False, # useful for rendering speech on robot
word_neighbors_to_consider_as_synonyms=3, # how many lexicon items to beam through for new pred
max_perception_subdialog_qs=3, # based on CORL17 control condition; vetted down from 5
max_ask_before_enumeration=2): # max times to ask the same question before using an enumeration backoff
# random.seed(27) # (adj for demo)
# np.random.seed(seed=27) # (adj for demo)
self.parser = parser
self.grounder = grounder
self.io = io
self.active_train_set = active_train_set
self.use_shorter_utterances = use_shorter_utterances
self.no_clarify = [] if no_clarify is None else no_clarify
# hyperparameters
self.parse_beam = 1
self.threshold_to_accept_role = 1.0 # include role filler in questions above this threshold
self.belief_update_rate = 0.5 # interpolation between belief state and grounding state
self.threshold_to_accept_perceptual_conf = 0.7 # per perceptual predicate, e.g. 0.7*0.7 for two
self.none_start_mass_factor = 1 # how much None mass per role versus all else; if 1, 50/50, if 9, 9 None to 1 else
self.max_perception_subdialog_qs = max_perception_subdialog_qs
self.max_ask_before_enumeration = max_ask_before_enumeration
self.word_neighbors_to_consider_as_synonyms = word_neighbors_to_consider_as_synonyms
self.budget_for_parsing = 15 # how many seconds we allow the parser
self.budget_for_grounding = 10 # how many seconds we allow the grounder
self.latent_forms_to_consider_for_induction = 32 # maximum parses to consider for grounding during induction
# self.get_novel_question_beam = 10 # how many times to sample for a new question before giving up if identical
# static information about expected actions and their arguments
self.roles = ['action', 'patient', 'recipient', 'source', 'goal']
self.actions = ['walk', 'bring', 'move']
# expected argument types per action
self.action_args = {'walk': {'goal': ['l']},
'bring': {'patient': ['i'], 'recipient': ['p']},
'move': {'patient': ['i'], 'source': ['l'], 'goal': ['l']}}
self.action_belief_state = None # maintained during action dialogs to track action, patient, recipient
# pairs of [utterance, grounded SemanticNode] induced from conversations
self.induced_utterance_grounding_pairs = []
# pairs of (pred, oidx, label) gathered during perceptual sub-dialogs
# We use pred as a str instead of its id to make collapsing labels across users easier later.
self.new_perceptual_labels = []
# pairs of (pred, pred, syn) for syn bool gathered during perceptual sub-dialogs
self.perceptual_pred_synonymy = []
# track parsing and grounding timeouts.
self.parser_timeouts = 0
self.grounder_timeouts = 0
# Start a new action dialog from utterance u given by a user.
# Clarifies the arguments of u until the action is confirmed by the user.
# perception_labels_requested - pairs of (pidx, oidx) labels already requested from user; modified in-place.
# perform_action - whether to execute the action through IO or just return it.
def start_action_dialog(self, perception_labels_requested,
perform_action=True):
debug = False
# Start with a count of 1.0 on each role being empty (of which only recipient can remain empty in the end).
# Perform belief updates modeled after IJCAI'15 paper based on presence/absence of arguments in answers.
# Action beliefs are sampled from a probability distribution drawn from this histogram of beliefs.
# For the patient role, explicitly limit 'i' types to those in the grounder's active test set to avoid sampling
# irrelevant items (the grounder only thinks about whether predicates apply to test set).
self.action_belief_state = {'action': {a: 1.0 for a in self.actions},
'patient': {p: 1.0 for p in self.parser.ontology.preds
if ((self.parser.ontology.types[self.parser.ontology.entries[
self.parser.ontology.preds.index(p)]] in
self.action_args['bring']['patient'] or
self.parser.ontology.types[self.parser.ontology.entries[
self.parser.ontology.preds.index(p)]] in
self.action_args['move']['patient']) and
(self.parser.ontology.types[self.parser.ontology.entries[
self.parser.ontology.preds.index(p)]] != 'i' or
int(p.split('_')[1]) in self.grounder.active_test_set))},
'recipient': {r: 1.0 for r in self.parser.ontology.preds
if self.parser.ontology.types[self.parser.ontology.entries[
self.parser.ontology.preds.index(r)]] in
self.action_args['bring']['recipient']},
'source': {r: 1.0 for r in self.parser.ontology.preds
if self.parser.ontology.types[self.parser.ontology.entries[
self.parser.ontology.preds.index(r)]] in
self.action_args['move']['source']},
'goal': {r: 1.0 for r in self.parser.ontology.preds
if (self.parser.ontology.types[self.parser.ontology.entries[
self.parser.ontology.preds.index(r)]] in
self.action_args['walk']['goal'] or
self.parser.ontology.types[self.parser.ontology.entries[
self.parser.ontology.preds.index(r)]] in
self.action_args['move']['goal'])}}
# question generation supports None action, but I think it's weird maybe so I removed it here
for r in ['patient', 'recipient', 'source', 'goal']:
# None starts with chunk of the probability mass to encourage clarification over instance checks.
self.action_belief_state[r][None] = self.none_start_mass_factor * len(self.action_belief_state[r])
for r in self.roles:
self.action_belief_state[r] = self.make_distribution_from_positive_counts(self.action_belief_state[r])
if debug:
print ("start_action_dialog starting with blank action belief state: "
+ str(self.action_belief_state))
# Ask a follow up question based on the new belief state.
# This continues until an action is chosen.
user_utterances_by_role = {r: [] for r in self.roles + ['all']} # to later induce grounding matches
action_confirmed = {r: None for r in self.roles}
first_utterance = True
perception_subdialog_qs = 0 # track how many have been asked so far to disallow more.
asked_role_repeat = {} # count the number of times we've requested a role in an open-ended question.
last_q = None
self.parser_timeouts = 0
self.grounder_timeouts = 0
# last_rvs = None
while (action_confirmed['action'] is None or
None in [action_confirmed[r] for r in self.action_args[action_confirmed['action']].keys()]):
# Determine what question to ask based on missing arguments in chosen action.
if not first_utterance:
# q = last_q
# rvs = last_rvs
action_chosen = self.sample_action_from_belief(action_confirmed, arg_max=True)
q, role_asked, _, roles_in_q = self.get_question_from_sampled_action(
action_chosen, self.threshold_to_accept_role)
rvs = {r: action_chosen[r][0] for r in self.roles if r in roles_in_q}
# If we are about to ask a second instance confirmation, sample a new question
# that asks for an open-ended clarification instead.
if role_asked is not None and q == last_q:
action_chosen_open = {r: action_chosen[r] for r in self.roles}
action_chosen_open[role_asked] = (None, 0)
q, role_asked, _, roles_in_q = self.get_question_from_sampled_action(
action_chosen_open, self.threshold_to_accept_role)
rvs = {r: action_chosen[r][0] for r in self.roles if r in roles_in_q}
# If our question is identidcal to the last, apologize to encourage rewording.
if q == last_q:
self.io.say_to_user("Sorry, I didn't understand that.")
# Implementation to get a novel question to avoid asking annoying repeats.
# With enumeration backoff, allowing annoying repeats speeds up convergence, so it's fine.
# times_sampled = 0
# while (q == last_q and last_rvs == rvs and (q is None or "rephrase" not in q) and
# times_sampled < self.get_novel_question_beam):
# action_chosen = self.sample_action_from_belief(action_confirmed,
# arg_max=True if times_sampled == 0 else False)
# q, role_asked, _, roles_in_q = self.get_question_from_sampled_action(
# action_chosen, self.threshold_to_accept_role)
# rvs = {r: action_chosen[r][0] for r in self.roles if r in roles_in_q}
# times_sampled += 1
# if debug:
# print "sampled q " + str(q)
# last_q = q
# last_rvs = rvs
# if times_sampled == self.get_novel_question_beam:
# self.io.say_to_user("Sorry, I didn't understand that.")
else:
action_chosen = self.sample_action_from_belief(action_confirmed, arg_max=True)
q = "What should I do?"
role_asked = None
roles_in_q = []
rvs = {}
first_utterance = False
last_q = q
# Ask question and get user response.
if role_asked is None or (action_chosen[role_asked][0] is None or role_asked not in roles_in_q):
conf_q = False
# If this is not a confirmation question, note that we're asking an open-ended one since
# we only allow up to a max number of open-ended repeats for a role before backing off to
# an enumeration strategy to avoid annoying the user too much.
if role_asked is not None:
if role_asked not in asked_role_repeat:
asked_role_repeat[role_asked] = 0
asked_role_repeat[role_asked] += 1
else:
conf_q = True
# Confirmation yes/no question.
if conf_q:
ur = self.get_yes_no_from_user(q, rvs)
action_confirmed = self.update_action_belief_from_confirmation(ur, action_confirmed,
action_chosen, roles_in_q)
# Open-ended response question.
else:
# Back off to an enumeration strategy if we're about to ask an open-ended question for the Nth time.
if role_asked is not None and asked_role_repeat[role_asked] >= self.max_ask_before_enumeration + 1:
# We actually only need the strings of the belief state to do a confirmation update, which is nice!
enum_candidates_strs = [k for k, _ in sorted(self.action_belief_state[role_asked].items(), key=operator.itemgetter(1),
reverse=True)
if k is not None] # Enumerate possible choices in current order of belief.
# Present these as options to user.
self.io.say_to_user_with_referents(q, rvs) # Ask same open-ended question but show enumeration instead of open-ended text resp.
enum_ur = self.io.get_from_user_enum(enum_candidates_strs, role_asked) # Show enumeration to user and have them select exactly one.
enum_chosen = {role_asked: [enum_ur, 1.]} # Full confidence to the selected choice.
# Do a confirmation update based on the user selection to solidify their choice in the action belief space.
action_confirmed = self.update_action_belief_from_confirmation('yes', action_confirmed, enum_chosen, [role_asked])
else:
self.io.say_to_user_with_referents(q, rvs)
ur = self.io.get_from_user()
# Possible sub-dialog to clarify whether new words are perceptual and, possibly synonyms of existing
# neighbor words.
self.preprocess_utterance_for_new_predicates(ur)
# Get groundings and latent parse from utterance.
gprs, pr = self.parse_and_ground_utterance(ur)
# Start a sub-dialog to ask clarifying perceptual questions before continuing with slot-filling.
# If sub-dialog results in fewer than the maximum number of questions, allow asking off-topic
# questions in the style of CORL'17 paper to improve future interactions.
if role_asked == 'patient' or role_asked is None:
num_new_qs = 0
if self.active_train_set is not None:
if perception_subdialog_qs < self.max_perception_subdialog_qs:
num_new_qs += self.conduct_perception_subdialog(gprs, pr,
self.max_perception_subdialog_qs,
perception_labels_requested)
perception_subdialog_qs += num_new_qs
if perception_subdialog_qs < self.max_perception_subdialog_qs:
preface_msg = True if perception_subdialog_qs == 0 else False
num_new_qs += self.conduct_perception_subdialog(gprs, pr,
self.max_perception_subdialog_qs -
perception_subdialog_qs,
perception_labels_requested,
allow_off_topic_preds=True,
preface_msg=preface_msg)
perception_subdialog_qs += num_new_qs
if num_new_qs > 0:
self.io.say_to_user("Thanks. Now, back to business.")
# Reground utterance from fresh classifier information.
if pr is not None:
gprs = self.ground_semantic_form(pr.node)
if role_asked is None: # asked to repeat whole thing
user_utterances_by_role['all'].append(ur)
self.update_action_belief_from_groundings(gprs, self.roles)
# asked an open-ended question for a particular role (e.g. "where should i go?")
elif action_chosen[role_asked][0] is None or role_asked not in roles_in_q:
user_utterances_by_role[role_asked].append(ur)
self.update_action_belief_from_groundings(gprs, [role_asked])
# Fill current_confirmed with any no_clarify roles by taking the current non-None max.
# As in other max operations, considers all tied values and chooses one at random.
# These role slots are re-filled after every confidence update, so they are the only clarification
# slots that can be changed.
for r in self.no_clarify:
valid_entries = [entry for entry in self.action_belief_state[r]
if entry is not None]
dist = [self.action_belief_state[r][entry] for entry in valid_entries]
s = sum(dist)
dist = [dist[idx] / s for idx in range(len(dist))]
max_idxs = [idx for idx in range(len(dist)) if dist[idx] == max(dist)]
c = np.random.choice([valid_entries[idx] for idx in max_idxs], 1)
action_confirmed[r] = c[0]
if debug:
print("start_action_dialog: updated action belief state: " + str(self.action_belief_state))
# Induce utterance/grounding pairs from this conversation.
# new_i_pairs = self.induce_utterance_grounding_pairs_from_conversation(user_utterances_by_role,
# action_confirmed)
# self.induced_utterance_grounding_pairs.extend(new_i_pairs)
# TODO: update SVMs with positive example from active test set
# TODO: this is tricky since in live experiment these labels still have to be ignored
# TODO: will have to do fold-by-fold training as usual
# TODO: also tricky because without explicitly asking, the labels come from reverse-grounding,
# TODO: which can be noisy and should be overwritten later on by explicit human conversation.
# Perform the chosen action.
if perform_action:
self.io.perform_action(action_confirmed)
# Return the chosen action and the user utterances by role from this dialog.
return action_confirmed, user_utterances_by_role, self.parser_timeouts, self.grounder_timeouts
# Given a dictionary of key -> value for positive values, return a dictionary over the same keys
# with the value ssumming to 1.
def make_distribution_from_positive_counts(self, d):
assert True not in [True if d[k] < 0 else False for k in d.keys()]
s = sum([d[k] for k in d.keys()])
return {k: d[k] / float(s) for k in d.keys()}
# While top grounding confidence does not pass perception threshold, ask a question that
# strengthens an SVM involved in the current parse.
# In effect, this can start a sub-dialog about perception, which, when resolved, returns to
# the existing slot-filling dialog.
# ur - the user response to the last question, which may contain perceptual predicates
# gprs - groundings of parse from last response
# pr - the associated latent parse
# max_questions - the maximum number of questions to ask in this sub-dialog
# labeled_tuples - a list of (pidx, oidx) tuples labeled by the user; modified in-place with new entries
# allow_off_topic_preds - if flipped to true, considers all predicates, not just those in parse of utterance
# returns - an integer, the number of questions asked
def conduct_perception_subdialog(self, gprs, pr, max_questions, labeled_tuples,
allow_off_topic_preds=False, preface_msg=True):
debug = False
num_qs = 0
if len(gprs) > 0:
perception_above_threshold = False
top_conf = gprs[0][1]
perceptual_pred_trees = self.get_parse_subtrees(pr.node, self.grounder.kb.perceptual_preds)
if debug:
print ("conduct_perception_subdialog: perceptual confidence " + str(top_conf) + " versus " +
"threshold " + str(self.threshold_to_accept_perceptual_conf) + " across " +
str(len(perceptual_pred_trees)) + " predicates")
while (allow_off_topic_preds or not perception_above_threshold) and num_qs < max_questions:
if (allow_off_topic_preds or
top_conf < math.pow(self.threshold_to_accept_perceptual_conf,
len(perceptual_pred_trees))):
if debug:
print ("conduct_perception_subdialog: perceptual confidence " + str(top_conf) +
" below threshold or we are allowing off-topic predicates; " +
"entering dialog to strengthen perceptual classifiers")
# Sub-dialog to ask perceptual predicate questions about objects in the active training
# set until confidence threshold is reached or no more information can be gained
# from the objects in the active training set.
# For current SVMs, calculate the least-reliable predicates when applied to test objects.
# Additionally record current confidences against active training set objects.
pred_test_conf = {} # from predicates to confidence sums
pred_train_conf = {} # from predicates to active training idx oidx to confidences
if allow_off_topic_preds:
preds_to_consider = self.grounder.kb.pc.predicates[:]
else:
preds_to_consider = [self.parser.ontology.preds[root.idx]
for root in perceptual_pred_trees]
if len(preds_to_consider) == 0: # no further preds to consider
return num_qs
pred_to_surface = {}
for pred in preds_to_consider:
pidx = self.grounder.kb.pc.predicates.index(pred)
# Calculate the surface form to use with this pred.
# We look at the raw count score for the sem | sf since the parameter vector is normalized
# given the surface form, but we're looking for the max across surface forms.
sems_with_pred = [sem_idx for sem_idx in range(len(self.parser.lexicon.semantic_forms))
if len(self.get_parse_subtrees(self.parser.lexicon.semantic_forms[sem_idx],
[pred])) > 0]
sfs_with_sems = [(sf_idx, sem_idx) for sf_idx in range(len(self.parser.lexicon.surface_forms))
for sem_idx in self.parser.lexicon.entries[sf_idx]
if sem_idx in sems_with_pred]
sf_sem_scores = [self.parser.theta._lexicon_entry_given_token_counts[(sem_idx, sf_idx)] if
(sem_idx, sf_idx) in self.parser.theta._lexicon_entry_given_token_counts else 0
for sem_idx, sf_idx in sfs_with_sems]
best_sf_sem_score = max(sf_sem_scores)
pred_to_surface[pred] = self.parser.lexicon.surface_forms[
sfs_with_sems[sf_sem_scores.index(best_sf_sem_score)][0]]
test_conf = 0
for oidx in self.grounder.active_test_set:
pos_conf, neg_conf = self.grounder.kb.query((pred, 'oidx_' + str(oidx)))
test_conf += max(pos_conf, neg_conf)
pred_test_conf[pred] = test_conf / len(self.grounder.active_test_set)
pred_train_conf[pred] = []
for oidx in self.active_train_set:
if (pidx, oidx) not in labeled_tuples:
pos_conf, neg_conf = self.grounder.kb.query((pred, 'oidx_' + str(oidx)))
pred_train_conf[pred].append(max(pos_conf, neg_conf))
if pred_train_conf[pred][-1] < 0:
pred_train_conf[pred][-1] = 0.
elif pred_train_conf[pred][-1] > 1:
pred_train_conf[pred][-1] = 1.
else:
pred_train_conf[pred].append(1)
if debug:
print ("conduct_perception_subdialog: examined classifiers to get pred_test_conf: " +
str(pred_test_conf) + " and pred_train_conf: " + str(pred_train_conf) +
" for active train set " + str(self.active_train_set))
# Examine preds probabilistically for least test confidence until we reach one for which we can
# formulate a useful question against the active training set objects. If all of the
# active training set objects have been labeled or have total confidence already
# for every predicate, the sub-dialog can't be productive and ends.
q = None
rvs = {}
q_type = None
perception_pidx = None
pred = None
# Order predicates weighted by their negative test confidence as a probability.
sum_inv_test_conf = sum([1 - pred_test_conf[pred] for pred in preds_to_consider])
pred_probs = [(1 - pred_test_conf[pred]) / sum_inv_test_conf for pred in preds_to_consider]
# Clamp the probabilities to [0, 1] by hand since the floating point ops sometimes drift outside.
pred_probs = [prob if 0. < prob < 1. else (0. if prob < 0. else 1.)
for prob in pred_probs]
if debug:
print ("conduct_perception_subdialog: resulting pred probs " + str(pred_probs) +
" for predicates " + str(preds_to_consider))
nonzero_to_consider = [preds_to_consider[idx] for idx in range(len(pred_probs))
if pred_probs[idx] > 0]
nonzero_probs = [pred_probs[idx] for idx in range(len(pred_probs))
if pred_probs[idx] > 0]
sampled_preds_to_ask = np.random.choice(nonzero_to_consider, len(nonzero_to_consider),
replace=False, p=nonzero_probs)
for pred in sampled_preds_to_ask:
if debug:
print ("conduct_perception_subdialog: sampled pred '" + pred +
"' with pred_train_conf " + str(pred_train_conf[pred]))
# If at least one active training object is unlabeled or unconfident
if sum(pred_train_conf[pred]) < len(self.active_train_set):
perception_pidx = self.grounder.kb.pc.predicates.index(pred)
# If all objects are below the perception threshold, ask for label we have least of.
if min(pred_train_conf[pred]) < self.threshold_to_accept_perceptual_conf:
ls = [l for _p, _o, l in self.grounder.kb.pc.labels if _p == perception_pidx
and _o not in self.grounder.active_test_set]
if ls.count(1) <= ls.count(0): # more negative labels or labels are equal
if self.use_shorter_utterances:
q = ("Show me an object you could use the word '" + pred_to_surface[pred] +
"' when describing, or shake your head.")
else:
q = ("Among these nearby objects, could you show me one you would use the " +
"word '" + pred_to_surface[pred] + "' when describing, or shake your " +
"head if there are none?")
q_type = 'pos'
else: # more positive labels
if self.use_shorter_utterances:
q = ("Show me an object you could not use the word '" + pred_to_surface[pred] +
"' when describing, or shake your head.")
else:
q = ("Among these nearby objects, could you show me one you could not use " +
"the word '" + pred_to_surface[pred] + "' when describing, or shake " +
"your head if you could use " + "'" + pred_to_surface[pred] +
"' when describing all of them?")
q_type = 'neg'
# Else, ask for the label of the (sampled) least-confident object.
else:
sum_inv_train_conf = sum([1 - pred_train_conf[pred][idx]
for idx in range(len(pred_train_conf[pred]))])
pred_train_conf_idx = np.random.choice(range(len(pred_train_conf[pred])), 1,
p=[(1 - pred_train_conf[pred][idx]) /
sum_inv_train_conf
for idx in
range(len(pred_train_conf[pred]))])[0]
if debug:
print ("conduct_perception_subdialog: sampled idx " + str(pred_train_conf_idx) +
" out of confidences " + str(pred_train_conf[pred]))
oidx = self.active_train_set[pred_train_conf_idx]
q = ("Would you use the word '" + pred_to_surface[pred] +
"' when describing <p>this</p> object?")
rvs['patient'] = 'oidx_' + str(oidx)
q_type = oidx
# Ask the question we settled on.
break
# Nothing more to be gained by asking questions about the active training set
# with respect to this predicate.
else:
continue
# If we didn't settle on a question, all active training set objects have been labeled
# for every predicate of interest, so this sub-dialog can't get us anywhere.
if q is None:
break
# If q is not None, we're going to engage in the sub-dialog.
if num_qs == 0 and preface_msg:
if self.use_shorter_utterances:
self.io.say_to_user("I'm still learning the meanings of some words.")
else:
self.io.say_to_user("I'm still learning the meanings of some words. I'm going to ask you " +
"a few questions about these nearby objects before we continue.")
# Ask the question and get a user response.
if q_type == 'pos' or q_type == 'neg':
self.io.say_to_user_with_referents(q, rvs)
sub_ur = -1
while sub_ur is not None and sub_ur not in self.active_train_set:
sub_ur = self.io.get_oidx_from_user(self.active_train_set)
else: # i.e. q_type is a particular oidx atom we asked a yes/no about
sub_ur = self.get_yes_no_from_user(q, rvs)
num_qs += 1
# Update perceptual classifiers from user response.
upidxs = []
uoidxs = []
ulabels = []
if q_type == 'pos': # response is expected to be an oidx or 'none' (e.g. None)
if sub_ur is None: # None, so every object in active train is a negative example
upidxs = [perception_pidx] * len(self.active_train_set)
uoidxs = self.active_train_set
ulabels = [0] * len(self.active_train_set)
labeled_tuples.extend([(perception_pidx, oidx) for oidx in self.active_train_set])
self.new_perceptual_labels.extend([(pred, oidx, 0) for oidx in self.active_train_set])
else: # an oidx of a positive example
upidxs = [perception_pidx]
uoidxs = [sub_ur]
ulabels = [1]
labeled_tuples.append((perception_pidx, sub_ur))
e = (pred, sub_ur, 1)
if e not in self.new_perceptual_labels:
self.new_perceptual_labels.append(e)
else: # if the user gives the same answer twice, just move on.
num_qs += self.max_perception_subdialog_qs
elif q_type == 'neg': # response is expected to be an oidx or 'all' (e.g. None)
if sub_ur is None: # None, so every object in active train set is a positive example
upidxs = [perception_pidx] * len(self.active_train_set)
uoidxs = self.active_train_set
ulabels = [1] * len(self.active_train_set)
labeled_tuples.extend([(perception_pidx, oidx) for oidx in self.active_train_set])
self.new_perceptual_labels.extend([(pred, oidx, 1) for oidx in self.active_train_set])
else: # an oidx of a negative example
upidxs = [perception_pidx]
uoidxs = [sub_ur]
ulabels = [0]
labeled_tuples.append((perception_pidx, sub_ur))
e = (pred, sub_ur, 0)
if e not in self.new_perceptual_labels:
self.new_perceptual_labels.append(e)
else: # if the user gives the same answer twice, just move on.
num_qs += self.max_perception_subdialog_qs
else: # response is expected to be a confirmation yes/no
if sub_ur == 'yes':
upidxs = [perception_pidx]
uoidxs = [q_type]
ulabels = [1]
labeled_tuples.append((perception_pidx, q_type))
self.new_perceptual_labels.append((pred, q_type, 1))
elif sub_ur == 'no':
upidxs = [perception_pidx]
uoidxs = [q_type]
ulabels = [0]
labeled_tuples.append((perception_pidx, q_type))
self.new_perceptual_labels.append((pred, q_type, 0))
if debug:
print ("conduct_perception_subdialog: updating classifiers with upidxs " + str(upidxs) +
", uoidxs " + str(uoidxs) + ", ulabels " + str(ulabels))
self.grounder.kb.pc.update_classifiers([], upidxs, uoidxs, ulabels)
# Re-ground with updated classifiers.
gprs = self.ground_semantic_form(pr.node)
if len(gprs) > 0:
top_conf = gprs[0][1]
else:
perception_above_threshold = True
return num_qs
# Given a user utterance, pass over the tokens to identify potential new predicates. This can initiate
# a sub-dialog in which the user is asked whether a word requires perceiving the real world, and then whether
# it means the same thing as a few neighboring words. This dialog's length is limited linearly with respect
# to the number of words in the utterance, but could be long for many new predicates.
def preprocess_utterance_for_new_predicates(self, u):
debug = False
if debug:
print ("preprocess_utterance_for_new_predicates: called with utterance " + u)
tks = u.strip().split()
for tkidx in range(len(tks)):
tk = tks[tkidx]
if tk not in self.parser.lexicon.surface_forms: # this token hasn't been analyzed by the parser
if debug:
print ("preprocess_utterance_for_new_predicates: token '" + tk + "' has not been " +
"added to the parser's lexicon yet")
# Get all the neighbors in order based on word embedding distances for this word.
nn = self.parser.lexicon.get_lexicon_word_embedding_neighbors(
tk, self.word_neighbors_to_consider_as_synonyms)
# Beam through neighbors to determine which, if any, are perceptual
perceptual_neighbors = {} # from surface forms to parse subtress
for idx in range(len(nn)):
nsfidx, _ = nn[idx]
# Determine if lexical entries for the neighbor contain perceptual predicates.
for sem_idx in self.parser.lexicon.entries[nsfidx]:
psts = self.get_parse_subtrees(self.parser.lexicon.semantic_forms[sem_idx],
self.grounder.kb.perceptual_preds)
if len(psts) > 0:
if nsfidx not in perceptual_neighbors:
perceptual_neighbors[nsfidx] = []
perceptual_neighbors[nsfidx].extend(psts)
if debug:
print ("preprocess_utterance_for_new_predicates: identified perceptual neighbors: " +
str(perceptual_neighbors))
# If there are perceptual neighbors, confirm with the user that this new word requires perception.
# If there were no neighbors at all, the word isn't in the embedding space and might be a brand name
# (e.g. pringles) that we could consider perceptual by adding an "or len(nn) == 0".
if len(perceptual_neighbors.keys()) > 0:
if self.use_shorter_utterances:
q = "Does '" + tk + "' refer to a property of an object?"
else:
q = ("I haven't heard the word '" + tk + "' before. Does it refer to properties of " +
"things, like a color, shape, or weight?")
c = self.get_yes_no_from_user(q)
if c == 'yes':
# Ask about each neighbor in the order we found them, corresponding to closest distances.
synonym_identified = None
for nsfidx in perceptual_neighbors.keys():
_q = ("Does '" + tk + "' mean the same thing as '" +
self.parser.lexicon.surface_forms[nsfidx] + "'?")
_c = self.get_yes_no_from_user(_q)
# The new word tk is a synonym of the neighbor, so share lexical entries between them.
if _c == 'yes':
synonym_identified = [nsfidx, perceptual_neighbors[nsfidx]]
self.perceptual_pred_synonymy.append((tk, self.parser.lexicon.surface_forms[nsfidx],
True))
break
# The new word is not a synonym according to this user.
else:
self.perceptual_pred_synonymy.append((tk, self.parser.lexicon.surface_forms[nsfidx],
False))
# Whether we identified a synonym or not, we need to determine whether this word is being
# Used as an adjective or a noun, which we can do based on its position in the utterance.
# TODO: make checking for adjective a flag depending on whether we're using a bare noun-only
# TODO: parser instead of the current hard fix to that.
tk_probably_adjective = self.is_token_adjective(tkidx, tks)
if debug:
# print ("preprocess_utterance_for_new_predicates: examined following token and guessed " +
# " that '" + tk + "'s probably adjective value is " + str(tk_probably_adjective))
print ("preprocess_utterance_for_new_predicates: examined following token and guessed " +
" that '" + tk + "'s probably adjective value is " + str(False))
# Add new lexical entries for this fresh perceptual token.
# self.add_new_perceptual_lexical_entries(tk, tk_probably_adjective, synonym_identified)
self.add_new_perceptual_lexical_entries(tk, False, synonym_identified)
# TODO: these are also part of the bare-noun hard fix.
self.parser.type_raise_bare_nouns() # should only affect new nouns
self.parser.theta.update_probabilities() # because the above adds new entries
# Add new lexical entries from a perceptual token.
# tk - the string token to be added
# tk_probably_adjective - whether the new token should be treated as an adjective entry
# synonym_identified - a tuple of the [surface_form_idx, semantic entries for that surface form] flagged syn w tk
def add_new_perceptual_lexical_entries(self, tk, tk_probably_adjective, synonym_identified, known_ont_pred=None,
debug=False):
assert synonym_identified is None or known_ont_pred is None
# Prepare to add new entries.
noun_cat_idx = self.parser.lexicon.categories.index('N') # assumed to exist
adj_cat_idx = self.parser.lexicon.categories.index([noun_cat_idx, 1, noun_cat_idx]) # i.e. N/N
item_type_idx = self.parser.ontology.types.index('i')
bool_type_idx = self.parser.ontology.types.index('t')
pred_type_idx = self.parser.ontology.types.index([item_type_idx, bool_type_idx])
if tk_probably_adjective:
cat_to_match = adj_cat_idx
sem_prefix = "lambda P:<i,t>.(and("
sem_suffix = ", P))"
else:
cat_to_match = noun_cat_idx
sem_prefix = ""
sem_suffix = ""
# Add synonym lexical entry for appropriate form (adj or noun) of identified synonym,
# or create one if necessary.
# If the synonym has more than one N or N/N entry (as appropriate), both will be added.
if synonym_identified is not None:
nsfidx, psts = synonym_identified
if debug:
print ("add_new_perceptual_lexical_entries: " +
"searching for synonym category matches")
synonym_had_category_match = False
for sem_idx in self.parser.lexicon.entries[nsfidx]:
if self.parser.lexicon.semantic_forms[sem_idx].category == cat_to_match:
if tk not in self.parser.lexicon.surface_forms:
self.parser.lexicon.surface_forms.append(tk)
self.parser.lexicon.entries.append([])
sfidx = self.parser.lexicon.surface_forms.index(tk)
if sfidx not in self.parser.theta._skipwords_given_surface_form:
self.parser.theta._skipwords_given_surface_form[sfidx] = \
self.parser.theta._skipwords_given_surface_form[nsfidx]
self.parser.lexicon.neighbor_surface_forms.append(sfidx)
self.parser.lexicon.entries[sfidx].append(sem_idx)
self.parser.theta._lexicon_entry_given_token_counts[(sem_idx, sfidx)] = \
self.parser.theta._lexicon_entry_given_token_counts[(sem_idx, nsfidx)]
self.parser.theta.update_probabilities()
synonym_had_category_match = True
if debug:
print ("add_new_perceptual_lexical_entries: added a lexical entry" +
" due to category match: " +
self.parser.print_parse(self.parser.lexicon.semantic_forms[sem_idx],
True))
# Create a new adjective entry N/N : lambda P.(synonympred, P) or N : synonympred
# All perception predicates associated with entries in the chosen synonym generate entries.
if not synonym_had_category_match:
if debug:
print ("add_new_perceptual_lexical_entries: no category match for synonym")
for pst in psts: # trees with candidate synonym preds in them somewhere
candidate_preds = [p for p in self.scrape_preds_from_parse(pst)
if p in self.grounder.kb.perceptual_preds]
for cpr in candidate_preds:
s = sem_prefix + cpr + sem_suffix
sem = self.parser.lexicon.read_semantic_form_from_str(s, cat_to_match, None, [])
if tk not in self.parser.lexicon.surface_forms:
self.parser.lexicon.surface_forms.append(tk)
self.parser.lexicon.entries.append([])
sfidx = self.parser.lexicon.surface_forms.index(tk)
self.parser.lexicon.neighbor_surface_forms.append(sfidx)
if sfidx not in self.parser.theta._skipwords_given_surface_form:
self.parser.theta._skipwords_given_surface_form[sfidx] = \
self.parser.theta._skipwords_given_surface_form[nsfidx]
if sem not in self.parser.lexicon.semantic_forms:
self.parser.lexicon.semantic_forms.append(sem)
sem_idx = self.parser.lexicon.semantic_forms.index(sem)
self.parser.lexicon.entries[sfidx].append(sem_idx)
self.parser.theta._lexicon_entry_given_token_counts[(sem_idx, sfidx)] = \
self.parser.theta.lexicon_weight # fresh entry not borrowing neighbor value
if debug:
print ("add_new_perceptual_lexical_entries: created lexical entry " +
"for candidate pred extracted from synonym trees: " +
self.parser.print_parse(sem, True))
ont_tk = None
# No identified synonym, so we instead have to create a new ontological predicate
# and then add a lexical entry pointing to it as a N or N/N entry, as appropriate.
else:
if debug:
print ("add_new_perceptual_lexical_entries: no synonym found, so adding new " +
"ontological concept for '" + tk + "'")
# Create a new ontological predicate to represent the new perceptual concept.
if known_ont_pred is not None:
ont_tk = known_ont_pred
elif tk in self.parser.ontology.preds:
i = 0
while tk + "_" + str(i) in self.parser.ontology.preds:
i += 1
ont_tk = tk + "_" + str(i)
else:
ont_tk = tk
self.parser.ontology.preds.append(ont_tk)
self.parser.ontology.entries.append(pred_type_idx)
self.parser.ontology.num_args.append(self.parser.ontology.calc_num_pred_args(
len(self.parser.ontology.preds) - 1))
# Create a new perceptual predicate to represent the new perceptual concept.
if ont_tk not in self.grounder.kb.pc.predicates:
self.grounder.kb.pc.update_classifiers([ont_tk], [], [], []) # blank concept
if debug:
print ("add_new_perceptual_lexical_entries: updated perception classifiers with" +
" new concept '" + ont_tk + "'")
# Create a lexical entry corresponding to the newly-acquired perceptual concept.
s = sem_prefix + ont_tk + sem_suffix
if debug:
print ("add_new_perceptual_lexical_entries: adding lexical entry '" + s + "'")
sem = self.parser.lexicon.read_semantic_form_from_str(s, cat_to_match, None, [])
if tk not in self.parser.lexicon.surface_forms:
self.parser.lexicon.surface_forms.append(tk)
self.parser.lexicon.entries.append([])
sfidx = self.parser.lexicon.surface_forms.index(tk)
if sfidx not in self.parser.theta._skipwords_given_surface_form:
self.parser.theta._skipwords_given_surface_form[sfidx] =\
- (self.parser.theta.lexicon_weight) # * 2 new for demonstration
if sem not in self.parser.lexicon.semantic_forms:
self.parser.lexicon.semantic_forms.append(sem)
sem_idx = self.parser.lexicon.semantic_forms.index(sem)
self.parser.lexicon.entries[sfidx].append(sem_idx)
self.parser.theta._lexicon_entry_given_token_counts[(sem_idx, sfidx)] = \
self.parser.theta.lexicon_weight # fresh entry not borrowing neighbor value
if debug:
print ("add_new_perceptual_lexical_entries: created lexical entry for new " +
"perceptual concept: " + self.parser.print_parse(sem, True))
# Since entries may have been added, update probabilities before any more parsing is done.
self.parser.theta.update_probabilities()
# Return the new ontological concept string, if any, produced by this procedure.
return ont_tk
# We assume if the token to the right of tk is the end of utterance or a non-perceptual
# word based on our lexicon (or appropriate beam search), it's a noun. Otherwise, it is an adjective.
# tkidx - the index of the token in question
# tks - the sequence of tokens
def is_token_adjective(self, tkidx, tks):
if tkidx < len(tks) - 1 and self.is_token_perceptual(tks[tkidx + 1]):
return True
else:
return False
# If word is last or has non-perceptual to the right and perceptual to the left, probably a noun.
# If the word to the left is 'the'/'a' that picks out an object, etc., also probably a noun.
# tkidx - the index of the token in question
# tks - the sequence of tokens
def is_token_noun(self, tkidx, tks):
if ((tkidx == len(tks) - 1 or not self.is_token_perceptual(tks[tkidx + 1])) and
(tkidx == 0 or self.is_token_perceptual(tks[tkidx - 1]))):
return True
elif tkidx > 0 and (tkidx == len(tks) - 1 or tks[tkidx + 1] in self.parser.lexicon.surface_forms):
# Check whether left of this is 'the'/'a' and right is a -known- word that isn't perceptual.
if tks[tkidx - 1] in self.parser.lexicon.surface_forms:
sts = []
for sf_idx in self.parser.lexicon.entries[self.parser.lexicon.surface_forms.index(tks[tkidx - 1])]:
sts.extend(self.get_parse_subtrees(self.parser.lexicon.semantic_forms[sf_idx], ['a_i']))
if len(sts) > 0:
return True
return False
# tk - the token in question
def is_token_perceptual(self, tk):
candidate_semantic_forms = []
if tk in self.parser.lexicon.surface_forms:
candidate_semantic_forms.extend([self.parser.lexicon.semantic_forms[sem_idx]
for sem_idx in self.parser.lexicon.entries[
self.parser.lexicon.surface_forms.index(tk)]])
else:
nnn = self.parser.lexicon.get_lexicon_word_embedding_neighbors(
tk, self.word_neighbors_to_consider_as_synonyms)
for nsfidx, _ in nnn:
candidate_semantic_forms.extend([self.parser.lexicon.semantic_forms[sem_idx]
for sem_idx in self.parser.lexicon.entries[nsfidx]])
psts = []
for ncsf in candidate_semantic_forms:
psts.extend(self.get_parse_subtrees(ncsf, self.grounder.kb.perceptual_preds))
# next word is probably not perceptual
if len(psts) > 0:
return True
else:
return False
# Given an initial query, keep pestering the user for a response we can parse into a yes/no confirmation
# until it's given.
def get_yes_no_from_user(self, q, rvs=None):
if rvs is None:
self.io.say_to_user(q)
else:
self.io.say_to_user_with_referents(q, rvs)
while True:
u = self.io.get_from_user()
gps, _ = self.parse_and_ground_utterance(u)
for g, _ in gps:
if type(g) is not bool and g.type == self.parser.ontology.types.index('c'):
if g.idx == self.parser.ontology.preds.index('yes'):
return 'yes'
elif g.idx == self.parser.ontology.preds.index('no'):
return 'no'
self.io.say_to_user("Sorry, I am confused and just need a 'yes' or 'no' response for now.")
if rvs is None:
self.io.say_to_user(q)
else:
self.io.say_to_user_with_referents(q, rvs)
# g is a string of values 'yes'|'no'
def update_action_belief_from_confirmation(self, g, action_confirmed, action_chosen, roles_in_q):
debug = False
if debug:
print ("update_action_belief_from_confirmation: confirmation response str " +
g + " with roles_in_q " + str(roles_in_q))
if g == 'yes':
for r in roles_in_q:
action_confirmed[r] = action_chosen[r][0]
if debug:
print ("update_action_belief_from_confirmation: confirmed role " + r + " with argument " +
action_chosen[r][0])
elif g == 'no':
if len(roles_in_q) > 0:
# Create a distribution from this negative confirmation with zero confidence in all
# members of mentioned roles, and uniform confidence in un-mentioned, to be interpolated
# with the existing belief state.
gd = {r: {a: 0.0 for a in self.action_belief_state[r]} for r in roles_in_q}
roles_to_dec = [r for r in roles_in_q if action_confirmed[r] is None]
for r in roles_to_dec:
to_inc = [arg for arg in self.action_belief_state[r] if arg != action_chosen[r][0]]
for arg in to_inc:
gd[r][arg] = 1.0 / len(to_inc)
if debug:
print ("update_action_belief_from_confirmation: neg conf distribution: " + str(gd))
# Update the primary belief distribution by interpolating it with this negative conf distribution.
for r in gd:
for a in gd[r]:
self.action_belief_state[r][a] = (self.action_belief_state[r][a] *
(1 - self.belief_update_rate) +
gd[r][a] * self.belief_update_rate)
else:
print("WARNING: confirmation update string was not yes/no; '" + str(g) + "'")
if debug:
print ("update_action_belief_from_groundings: interpolated, new belief distribution: " +
str(self.action_belief_state))
return action_confirmed
# Given a parse and a list of the roles felicitous in the dialog to update, update those roles' distributions
# Distribute portion of mass from everything not in confirmations to everything that is evenly.
def update_action_belief_from_groundings(self, gs, roles):
debug = False
if debug:
print ("update_action_belief_from_groundings called with gs\n" +
'\n'.join([str((self.parser.print_parse(g), conf)) for g, conf in gs]) +
" and roles " + str(roles))
# Form belief distribution based only on the incoming groundings.
gd = {r: {a: 0.0 for a in self.action_belief_state[r]} for r in roles}
for g, conf in gs:
if type(g) is bool:
continue
if debug:
print ("update_action_belief_from_groundings processing g:\n" +
str((self.parser.print_parse(g), conf)))
# Crawl parse for recognized actions.
based_on_action_trees = False
if 'action' in roles:
action_trees = self.get_parse_subtrees(g, self.actions)
if len(action_trees) > 0:
if debug:
print ("\tupdate_action_belief_from_groundings: action tree update")
based_on_action_trees = True
portion = conf / float(len(action_trees))
for at in action_trees:
a = self.parser.ontology.preds[at.idx]
gd['action'][a] += portion
# Update patient and recipient, if present, with action tree args.
# These disregard argument order in favor of finding matching argument types.
# This gives us more robustness to bad parses with incorrectly ordered args or incomplete args.
if self.parser.ontology.preds[at.idx] != 'move':
for r in ['goal', 'patient', 'recipient']:
if r in roles and at.children is not None:
for cn in at.children:
if (r in self.action_args[a] and
self.parser.ontology.types[cn.type] in self.action_args[a][r]):
c = self.parser.ontology.preds[cn.idx]
if c not in self.action_belief_state[r]:
continue
gd[r][c] += portion
# For 'move', order matters (source versus goal), so handle this separately
else:
role_order = ['patient', 'source', 'goal']
for idx in range(len(at.children)):
cn = at.children[idx]
r = role_order[idx]
if r in roles: # we might not have asked about each arg
if self.parser.ontology.types[cn.type] in self.action_args[a][r]:
c = self.parser.ontology.preds[cn.idx]
if c not in self.action_belief_state[r]:
continue
gd[r][c] += portion
# Else, just add counts as appropriate based on roles asked based on a trace of the whole tree.
# If we were trying to update an action but didn't find any trees, also take this route.
if not based_on_action_trees:
if debug:
print ("\tupdate_action_belief_from_groundings: crawling update")
for r in roles:
to_traverse = [g]
to_increment = []
while len(to_traverse) > 0:
cn = to_traverse.pop()
if self.parser.ontology.types[cn.type] in [t for a in self.actions
if r in self.action_args[a]
for t in self.action_args[a][r]]:
if not cn.is_lambda: # otherwise utterance isn't grounded
c = self.parser.ontology.preds[cn.idx]
if c not in self.action_belief_state[r]:
continue
to_increment.append(c)
if cn.children is not None:
to_traverse.extend(cn.children)
if len(to_increment) > 0:
portion = conf / float(len(to_increment))