# search.py # --------- # Licensing Information: You are free to use or extend these projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to UC Berkeley, including a link to http://ai.berkeley.edu. # # Attribution Information: The Pacman AI projects were developed at UC Berkeley. # The core projects and autograders were primarily created by John DeNero # (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). # Student side autograding was added by Brad Miller, Nick Hay, and # Pieter Abbeel (pabbeel@cs.berkeley.edu). """ In search.py, you will implement generic search algorithms which are called by Pacman agents (in searchAgents.py). """ from copy import deepcopy from game import Directions from problems import PositionSearchProblem from util import is_a_pos_set_tuple, manhattanDistance, PriorityQueueWithFunction from typing import Set, Tuple ## ############################################### ## ## Heuristics ## ## ############################################### ## def blobHeuristic(state, problem): """ Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A* ever finds a solution that is worse uniform cost search finds, your heuristic is *not* consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to *store* information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that value, try: problem.heuristicInfo['wallCount'] = problem.walls.count() Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'] """ (position, blob) = state gameState = problem.startingGameState mazeDistCache = problem.maze_dist_cache blob_list = list(blob) blob_count = len(blob_list) if blob_count == 0: return 0 if blob_count == 1: return mazeDistance(position, blob_list[0], gameState, mazeDistCache) pair_mds = [mazeDistance(blob_list[i1], blob_list[i2], gameState, mazeDistCache) + min(mazeDistance(position, blob_list[i1], gameState, mazeDistCache), mazeDistance(position, blob_list[i2], gameState, mazeDistCache)) for i1 in range(blob_count - 1) for i2 in range(i1 + 1, blob_count)] return max(pair_mds) cornersHeuristic = foodHeuristic = blobHeuristic def euclideanHeuristic(position, problem, info={}): """The Euclidean distance heuristic for a PositionSearchProblem""" xy1 = position xy2 = problem.goal return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5 def manhattanHeuristic(position, problem, info={}): """The Manhattan distance heuristic for a PositionSearchProblem""" return manhattanDistance(position, problem.goal) def mazeDistance(point1, point2, gameState, mazeDistCache): """ Returns the maze distance between any two points, using the search functions you have already built. The gameState can be any game state -- Pacman's position in that state is ignored. Example usage: mazeDistance( (2,4), (5,6), gameState) This might be a useful helper function for your ApproximateSearchAgent. """ (x1, y1) = point1 (x2, y2) = point2 walls = gameState.getWalls() assert not walls[x1][y1], f'point1 is a wall: {point1}. ' assert not walls[x2][y2], f'point2 is a wall: {point2}. ' sorted_points = (point1, point2) if point1 <= point2 else (point2, point1) # Can't write mazeDistCache.setdefault(sorted_points, maze_dist(point1, point2, gameState)) # because as an argument, maze_dist will always be evaluated even if the value isn't needed. if sorted_points in mazeDistCache: return mazeDistCache[sorted_points] prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False) dist = mazeDistCache.setdefault(sorted_points, len(bfs(prob))) return dist def nullHeuristic(state, problem=None): """ A heuristic function estimates the cost from the current state to the nearest goal in the provided SearchProblem. This heuristic is trivial. """ return 0 def visit_all_heuristic(current_xy: Tuple[int, int], unvisited_xys: Set[Tuple[int, int]]): total_dist = 0 while len(unvisited_xys) > 0: distances_to_xys = [(manhattanDistance(current_xy, xy), xy) for xy in unvisited_xys] (next_dist, current_xy) = min(distances_to_xys, key=lambda dist_xy: dist_xy[0]) total_dist += next_dist unvisited_xys -= {current_xy} return total_dist ## ############################################### ## ## Search strategies ## ## ############################################### ## def graph_search(problem, priority_fn): """ This general search function can be used for BFS, DFS, UCS, and A*-search. The way to differentiate one from another is with the priority function. :param problem: The searchProblem being solved. (See the abstract definition of a SearchProblem in problems.py.) :param priority_fn: A function that takes a state and the list of actions to get to that state, and returns a value that can be used as a priority. :return: A list of actions: the actions to take from the start state to the goal. """ # The elements in the priority queue will be: (state, list_of_actions). frontier = PriorityQueueWithFunction(priority_fn) start_state = problem.getStartState() actions = [] # The PriorityQueue calls the priority_fn, which maps (state, actions) -> priority # when an item is pushed onto the PriorityQueue. frontier.push((start_state, actions)) closed = {} while not frontier.isEmpty(): (state, actions) = frontier.pop() if problem.isGoalState(state): return actions if not has_seen(state, closed): successors = problem.getSuccessors(state) for (successor, action, _) in successors: frontier.push((successor, actions + [action])) # If we get here, we didn't find a path to the goal. return [Directions.STOP] def has_seen(state, closed): if is_a_pos_set_tuple(state): (pos, sub_blob) = state blobs = closed.setdefault(pos, set()) if any([sub_blob >= seen_set for seen_set in blobs]): return True else: blobs.add(sub_blob) return False else: if state in closed: return True else: closed[state] = [] return False def aStarSearch(problem, heuristic=nullHeuristic): """Search the node that has the lowest combined cost and heuristic first.""" "*** YOUR CODE HERE ***" def priority_fn(state_actions): (state, actions) = state_actions cost_of_actions = problem.getCostOfActions(actions) h_value = heuristic(state, problem) # Is the state: (pos, remaining elements to retrieve)? if isinstance(state, tuple) and isinstance(state[0], tuple): return (cost_of_actions + h_value, len(state[1])) return cost_of_actions + h_value return graph_search(problem, priority_fn) def breadthFirstSearch(problem): """Search the shallowest nodes in the search tree first.""" "*** YOUR CODE HERE ***" return graph_search(problem, lambda state_actions: len(state_actions[1])) def depthFirstSearch(problem): """ Search the deepest nodes in the search tree first. Your search algorithm needs to return a list of actions that reaches the goal. Make sure to implement a graph search algorithm. To get started, you might want to try some of these simple commands to understand the search problem that is being passed in: print("Start:", problem.getStartState()) print("Is the start a goal?", problem.isGoalState(problem.getStartState())) print("Start's successors:", problem.getSuccessors(problem.getStartState())) """ "*** YOUR CODE HERE ***" return graph_search(problem, lambda state_actions: (-len(state_actions[1]))) # raiseNotDefined() def tinyMazeSearch(problem): """ Returns a sequence of moves that solves tinyMaze. For any other maze, the sequence of moves will be incorrect, so only use this for tinyMaze. """ from game import Directions s = Directions.SOUTH w = Directions.WEST return [s, s, w, s, w, w, s, w] def uniformCostSearch(problem): """Search the node of least total cost first.""" "*** YOUR CODE HERE ***" return graph_search(problem, lambda state_actions: problem.getCostOfActions(state_actions[1])) # raiseNotDefined() def print_frontier(frontier: PriorityQueueWithFunction): """ A utility debug function that prints the frontier. It makes a copy first so that the actual frontier is unchanged. """ print(f'Frontier: ({frontier.count}): ') f_copy = deepcopy(frontier) while not f_copy.isEmpty(): print(f'\t{f_copy.pop()}') print() # Abbreviations bfs = breadthFirstSearch dfs = depthFirstSearch astar = aStarSearch ucs = uniformCostSearch