diff --git a/notes.md b/notes.md index 86d7e35..ebae127 100644 --- a/notes.md +++ b/notes.md @@ -27,9 +27,16 @@ and it's your turn, which action would you take? Why? ---+---+--- ---+---+--- ---+---+--- ---+---+--- | | | | O | | | | +For the first board I would play 5 because I would win. +For the second board I would play 5 because I would block O from winning. +For the third board I would play 0 because I would be guaranteed a board where I could always win next turn. +For the third board I would play 4 because it gets me closer to winning and blocks O from getting closer + ### Initial game state You can get the inital game state using game.get_initial_state(). What is the current and future reward for this state? What does this mean? +The current reward state is 0, which means the game is either incomplete or it has ended in a draw. In this case, the game is incomplete. +The future reward state is 0, which means if both players play optimally, the game will end in a draw. diff --git a/play_ttt.py b/play_ttt.py index ef5530a..92542ba 100644 --- a/play_ttt.py +++ b/play_ttt.py @@ -3,7 +3,7 @@ from ttt.view import TTTView from ttt.player import TTTHumanPlayer, TTTComputerPlayer player0 = TTTHumanPlayer("Player 1") -player1 = TTTHumanPlayer("Player 2") +player1 = TTTComputerPlayer("Robot") game = TTTGame() view = TTTView(player0, player1) diff --git a/ttt/player.py b/ttt/player.py index bfbbe15..0349fdf 100644 --- a/ttt/player.py +++ b/ttt/player.py @@ -1,5 +1,6 @@ from click import Choice, prompt from strategy.random_strategy import RandomStrategy +from strategy.lookahead_strategy import LookaheadStrategy from ttt.game import TTTGame import random @@ -24,7 +25,7 @@ class TTTComputerPlayer: def __init__(self, name): "Sets up the player." self.name = name - self.strategy = RandomStrategy(TTTGame()) + self.strategy = LookaheadStrategy(TTTGame(), deterministic=False) def choose_action(self, state): "Chooses a random move from the moves available."