diff --git a/rgoap/src/rgoap/common.py b/rgoap/src/rgoap/common.py index 40884d9..f54f156 100644 --- a/rgoap/src/rgoap/common.py +++ b/rgoap/src/rgoap/common.py @@ -99,11 +99,11 @@ def get_unsatisfied_conditions(self, worldstate): unsatisfied_conditions = {condition for condition in common_conditions_set if (self.get_condition_value(condition) != - worldstate.get_condition_value(condition)) - } + worldstate.get_condition_value(condition))} if _logger.isEnabledFor(logging.DEBUG): - _logger.debug("unsatisfied conditions:\n%s", + _logger.debug("unsatisfied conditions between world states: %d:\n%s", + len(unsatisfied_conditions), '\n'.join([str(c) for c in unsatisfied_conditions])) return unsatisfied_conditions @@ -265,7 +265,7 @@ def __init__(self, preconditions, usability=1): self.usability = usability def __str__(self): - return '%s (u=%s)' % (self.__class__.__name__, self.usability) + return '%s (usability=%s)' % (self.__class__.__name__, self.usability) def __repr__(self): return '<%s usability=%f preconditions=%s>' % ( @@ -378,43 +378,8 @@ def _generate_variable_preconditions(self, var_effects, worldstate, start_worlds Must be implemented if the action contains variable effects. """ # TODO: maybe implement a default behaviour, at least for variable effects that can reach any value - raise NotImplementedError - - - -class ActionBag(object): - - def __init__(self): - self._actions = set() - - def __repr__(self): - return '' % self._actions - - def add(self, action): - self._actions.add(action) - - # regressive planning - def generate_matching_actions(self, start_worldstate, node_worldstate): - """Generator providing actions that might help between - start_worldstate and current node_worldstate. - """ - # TODO: This solution does not work when there are actions that produce an empty - # common_states_set and no valid action is considered - is this actually possible? - # the start_worldstate should contain every condition ever needed by an action or condition - - # check which conditions differ between start and current node - unsatisfied_conditions_set = node_worldstate.get_unsatisfied_conditions(start_worldstate) - - # check which action might satisfy those conditions - for action in self._actions: - if not action.check_freeform_context(): - # TODO: make warning appear once per planning cycle (not per runtime), if possible - _logger.warn("ignoring action with bad freeform context: %s", action) - elif action.has_satisfying_effects(node_worldstate, start_worldstate, unsatisfied_conditions_set): - _logger.debug("helping action: %s", action) - yield action - else: - _logger.debug("helpless action: %s", action) - + for effect in self._effects: + if isinstance(effect, VariableEffect): + raise NotImplementedError diff --git a/rgoap/src/rgoap/planning.py b/rgoap/src/rgoap/planning.py index d115036..7d2227e 100644 --- a/rgoap/src/rgoap/planning.py +++ b/rgoap/src/rgoap/planning.py @@ -147,9 +147,12 @@ def _calc_heuristic_distance_for_node(self, start_worldstate): self.heuristic_distance = min(len(unsatisfied_conditions_set), self.heuristic_distance) # regressive planning - def get_child_nodes_for_valid_actions(self, actions_generator, start_worldstate): - assert len(self.possible_prev_nodes) == 0, "Node.get_child_nodes_for_valid_actions is probably not safe to be called twice" - for action in actions_generator: + def get_child_nodes(self, actions, start_worldstate): + """Returns a list of nodes that are childs of this node and + contain the given action and start worldstate. + """ + assert len(self.possible_prev_nodes) == 0, "Node.get_child_nodes is probably not safe to be called twice" + for action in actions: nodes_path_list = self.parent_nodes_path_list[:] nodes_path_list.append(self) actions_path_list = self.parent_actions_path_list[:] @@ -164,10 +167,14 @@ def get_child_nodes_for_valid_actions(self, actions_generator, start_worldstate) class Planner(object): + """ + The given start_worldstate must contain every condition ever needed + by an action or condition. + """ # TODO: make ordering of actions possible (e.g. move before lookaround) - def __init__(self, actionbag, worldstate, goal): - self._actionbag = actionbag + def __init__(self, actions, worldstate, goal): + self._actions = actions self._start_worldstate = worldstate self._goal = goal @@ -180,18 +187,26 @@ def plan(self, start_worldstate=None, goal=None): If any parameter is not given the data given at initialisation is used. """ - + # store parameters in instance variables if start_worldstate is not None: self._start_worldstate = start_worldstate if goal is not None: self._goal = goal - _logger.info("Planning loop started\n""actionbag: %s\n" + # check input + checked_actions = set() + for action in self._actions: + if not action.check_freeform_context(): + _logger.warn("Ignoring action with bad freeform context: %s", action) + else: + checked_actions.add(action) + + _logger.info("Planner started\n""actions: %s\n" "start_worldstate: %s\n""goal: %s", - self._actionbag, self._start_worldstate, self._goal) + self._actions, self._start_worldstate, self._goal) + # setup goal and loop variables goal_worldstate = WorldState() - self._goal.apply_preconditions(goal_worldstate) _logger.debug("goal_worldstate: %s", goal_worldstate) @@ -222,9 +237,10 @@ def plan(self, start_worldstate=None, goal=None): _logger.info("plan actions: %s", current_node.parent_actions_path_list) return current_node - new_child_nodes = current_node.get_child_nodes_for_valid_actions( - self._actionbag.generate_matching_actions(self._start_worldstate, current_node.worldstate), - self._start_worldstate) + helpful_actions = self._filter_matching_actions(current_node.worldstate, + checked_actions) + new_child_nodes = current_node.get_child_nodes(helpful_actions, + self._start_worldstate) _logger.debug("new child nodes: %s", new_child_nodes) # add new nodes and sort. this is stable, so old nodes stay @@ -235,6 +251,24 @@ def plan(self, start_worldstate=None, goal=None): _logger.warn("No plan found.") return None + def _filter_matching_actions(self, node_worldstate, actions): + """Returns a list of actions that might help between + start_worldstate and current node_worldstate. + """ + # check which conditions differ between start and current node + unsatisfied_conditions_set = node_worldstate.get_unsatisfied_conditions(self._start_worldstate) + + helpful_actions = [] + # check which action might satisfy those conditions + for action in actions: + if action.has_satisfying_effects(node_worldstate, self._start_worldstate, unsatisfied_conditions_set): + _logger.debug("helping action: %s", action) + helpful_actions.append(action) + else: + _logger.debug("helpless action: %s", action) + + return helpful_actions + class PlanExecutor(object): diff --git a/rgoap/src/rgoap/runner.py b/rgoap/src/rgoap/runner.py index 2373528..8c98c29 100644 --- a/rgoap/src/rgoap/runner.py +++ b/rgoap/src/rgoap/runner.py @@ -37,7 +37,7 @@ import rgoap -from common import ActionBag, Condition, WorldState, stringify, stringify_dict +from common import Condition, WorldState, stringify, stringify_dict from memory import Memory from planning import Planner, PlanExecutor @@ -51,7 +51,7 @@ class Runner(object): """ self.memory: memory to be used for conditions and actions self.worldstate: the default/start worldstate - self.actionbag: the actions this runner uses + self.actions: the actions this runner uses self.planner: the planner this runner uses """ @@ -64,15 +64,15 @@ def __init__(self, config_module=None): """ self.memory = Memory() self.worldstate = WorldState() - self.actionbag = ActionBag() + self.actions = set() if config_module is not None: for condition in config_module.get_all_conditions(self.memory): Condition.add(condition) for action in config_module.get_all_actions(self.memory): - self.actionbag.add(action) + self.actions.add(action) - self.planner = Planner(self.actionbag, self.worldstate, None) + self.planner = Planner(self.actions, self.worldstate, None) self._last_goal = None self._preempt_requested = False # preemption mechanism @@ -80,7 +80,7 @@ def __init__(self, config_module=None): def __repr__(self): return '<%s memory=%s worldstate=%s actions=%s planner=%s>' % (self.__class__.__name__, - self.memory, self.worldstate, self.actionbag, self.planner) + self.memory, self.worldstate, self.actions, self.planner) def request_preempt(self): diff --git a/rgoap/test/TestMemory.py b/rgoap/test/TestMemory.py index 94b10c9..48543f8 100644 --- a/rgoap/test/TestMemory.py +++ b/rgoap/test/TestMemory.py @@ -56,13 +56,13 @@ def setUp(self): print Condition.print_dict() - self.actionbag = self.runner.actionbag - self.actionbag.add(MemoryChangeVarAction(self.memory, 'memory.counter', 2, 3)) - self.actionbag.add(MemoryChangeVarAction(self.memory, 'memory.counter', 0, 1)) - self.actionbag.add(MemoryChangeVarAction(self.memory, 'memory.counter', 1, 2)) - self.actionbag.add(MemoryChangeVarAction(self.memory, 'memory.counter', -2, 3)) + self.actions = self.runner.actions + self.actions.add(MemoryChangeVarAction(self.memory, 'memory.counter', 2, 3)) + self.actions.add(MemoryChangeVarAction(self.memory, 'memory.counter', 0, 1)) + self.actions.add(MemoryChangeVarAction(self.memory, 'memory.counter', 1, 2)) + self.actions.add(MemoryChangeVarAction(self.memory, 'memory.counter', -2, 3)) - print self.actionbag + print self.actions self.goal = Goal([Precondition(Condition.get('memory.counter'), 3)]) @@ -122,12 +122,12 @@ def setUp(self): Condition.initialize_worldstate(self.worldstate) - self.actionbag = self.runner.actionbag - self.actionbag.add(MemoryIncrementerAction(self.memory, 'memory.counter')) + self.actions = self.runner.actions + self.actions.add(MemoryIncrementerAction(self.memory, 'memory.counter')) print Condition.print_dict() - print self.actionbag + print self.actions self.goal = Goal([Precondition(Condition.get('memory.counter'), 3)]) @@ -166,7 +166,7 @@ def testPlannerNeg(self): def testPlannerNegPos(self): print '==', self.testPlannerNegPos.__name__ - self.actionbag.add(MemoryIncrementerAction(self.memory, 'memory.counter', -4)) + self.actions.add(MemoryIncrementerAction(self.memory, 'memory.counter', -4)) start_node = self.runner.update_and_plan(self.goal_inaccessible, introspection=True) print 'start_node found: ', start_node self.assertIsNotNone(start_node, 'There should be a plan') @@ -190,9 +190,9 @@ def testPlannerDeviation(self): def testPlannerBig(self): print '==', self.testPlannerBig.__name__ - self.actionbag.add(MemoryIncrementerAction(self.memory, 'memory.counter', -4)) - self.actionbag.add(MemoryIncrementerAction(self.memory, 'memory.counter', 11)) - self.actionbag.add(MemoryIncrementerAction(self.memory, 'memory.counter', 3)) + self.actions.add(MemoryIncrementerAction(self.memory, 'memory.counter', -4)) + self.actions.add(MemoryIncrementerAction(self.memory, 'memory.counter', 11)) + self.actions.add(MemoryIncrementerAction(self.memory, 'memory.counter', 3)) goal_big = Goal([Precondition(Condition.get('memory.counter'), 23)]) start_node = self.runner.update_and_plan(goal_big, introspection=True) print 'start_node found: ', start_node diff --git a/rgoap/test/TestPrecEffSym.py b/rgoap/test/TestPrecEffSym.py index 08bb0ea..9abb7aa 100644 --- a/rgoap/test/TestPrecEffSym.py +++ b/rgoap/test/TestPrecEffSym.py @@ -57,7 +57,7 @@ def run(self, next_worldstate): Condition.add(MemoryCondition(runner.memory, 'robot.bumpered', True)) - runner.actionbag.add(SymmetricAction()) + runner.actions.add(SymmetricAction()) Condition.initialize_worldstate(runner.worldstate) diff --git a/rgoap_smach/test/SmachRGOAPTest.py b/rgoap_smach/test/SmachRGOAPTest.py index 704ceca..673afb4 100644 --- a/rgoap_smach/test/SmachRGOAPTest.py +++ b/rgoap_smach/test/SmachRGOAPTest.py @@ -87,8 +87,8 @@ def testRunner(self): Condition.add(MemoryCondition(memory, 'awareness', 0)) Condition.add(MemoryCondition(memory, 'arm_can_move', True)) - self.runner.actionbag.add(LookAroundAction()) - print self.runner.actionbag + self.runner.actions.add(LookAroundAction()) + print self.runner.actions goal = Goal([Precondition(Condition.get('awareness'), 2)]) self.runner.update_and_plan_and_execute(goal) @@ -141,7 +141,7 @@ def translate_userdata_to_worldstate(self, userdata, next_worldstate): # next_worldstate.set_condition_value(Condition.get('memory.out'), userdata.o) - self.runner.actionbag.add(TranslateAction()) + self.runner.actions.add(TranslateAction()) goal = Goal([Precondition(Condition.get('memory.out'), NUMBER_OUT), # memory.in is added to goal to be available in goal/next_worldstate