You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
SafariChess/SafariChess_AI.py

257 lines
9.2 KiB

'''
Notice that we are using 2 unique moving functions for:
fox_moves and eagle_moves
varies on the two genres
'''
import random
from math import inf
import collections
2 years ago
piece_score = {"7": 180, "1": 200, "6": 150, "5": 100, "4": 60, "3": 50, "2": 81}
mouse_score = [[13, 13, 13, 13, 12, 11, 10, 9, 8],
2 years ago
[25, 20, 15, 18, 16, 13, 10, 9, 8],
[50, 25, 20, 16, 15, 13, 10, 9, 8],
[1919810, 50, 20, 11, 9, 9, 9, 9, 0],
2 years ago
[50, 25, 20, 16, 14, 12, 8, 8, 8],
[25, 20, 15, 17, 15, 12, 8, 8, 8],
[11, 11, 10, 8, 8, 8, 8, 8, 8]]
2 years ago
# eagle_score = [[11, 12, 14, 13, 12, 11, 10, 8, 8],
# [15, 15, 14, 14, 0, 0, 0, 8, 8],
# [50, 20, 20, 0, 0, 0, 0, 8, 8],
# [100000, 50, 20, 13, 12, 11, 10, 8, 0],
# [50, 20, 20, 14, 0, 0, 0, 8, 8],
# [15, 12, 15, 14, 0, 0, 0, 8, 8],
# [11, 12, 14, 13, 12, 11, 10, 8, 8]]
eagle_score = [[14, 13, 10, 12, 14, 13, 10, 8, 7],
[15, 15, 14, 0, 0, 0, 9, 8, 6],
[50, 20, 20, 0, 0, 0, 7, 5, 4],
[1919810, 50, 20, 14, 13, 11, 10, 8, 0],
[50, 20, 20, 0, 0, 0, 7, 5, 4],
[15, 12, 15, 0, 0, 0, 9, 8, 6],
[14, 12, 10, 13, 14, 13, 10, 8, 7]]
fox_score = [[11, 12, 14, 13, 12, 11, 10, 8, 8],
2 years ago
[15, 15, 14, 0, 0, 0, 10, 14, 11],
[50, 20, 20, 0, 0, 0, 10, 8, 10],
2 years ago
[1919810, 50, 20, 13, 12, 11, 11, 10, 0],
2 years ago
[50, 20, 20, 0, 0, 0, 10, 8, 11],
[15, 12, 15, 0, 0, 0, 10, 14, 11],
2 years ago
[11, 12, 14, 13, 12, 11, 10, 8, 8]]
# wolf_score = [[11, 12, 14, 13, 12, 11, 10, 8, 8],
# [15, 15, 14, 0, 0, 0, 10, 8, 8],
# [50, 20, 20, 0, 0, 0, 10, 9, 10],
# [1919810, 50, 20, 13, 12, 11, 11, 10, 0],
# [50, 20, 20, 0, 0, 0, 10, 9, 10],
# [15, 12, 15, 0, 0, 0, 10, 8, 8],
# [11, 12, 14, 13, 12, 11, 10, 8, 8]]
wolf_score = [[11, 12, 14, 13, 12, 11, 10, 10, 8],
2 years ago
[15, 15, 14, 0, 0, 0, 10, 14, 10],
2 years ago
[50, 20, 20, 0, 0, 0, 10, 12, 10],
[1919810, 50, 20, 12, 11, 10, 11, 12, 0],
[50, 20, 20, 0, 0, 0, 10, 13, 10],
[15, 12, 15, 0, 0, 0, 13, 14, 8],
2 years ago
[11, 12, 14, 13, 12, 10, 11, 12, 8]]
2 years ago
leopard_score = [[11, 12, 14, 13, 12, 11, 10, 8, 8],
[15, 15, 14, 0, 0, 0, 10, 8, 8],
[50, 20, 20, 0, 0, 0, 10, 9, 10],
[1919810, 50, 20, 13, 12, 11, 11, 10, 0],
[50, 20, 20, 0, 0, 0, 10, 9, 10],
[15, 12, 15, 0, 0, 0, 10, 8, 8],
[11, 12, 14, 13, 12, 11, 10, 8, 8]]
lion_score = [[20, 20, 18, 15, 12, 11, 14, 12, 5],
[40, 25, 30, 0, 0, 0, 16, 12, 12],
[50, 40, 30, 0, 0, 0, 16, 12, 12],
[1919810, 50, 20, 15, 15, 15, 9, 12, 0],
[50, 40, 30, 0, 0, 0, 16, 12, 12],
[40, 25, 30, 0, 0, 0, 16, 12, 12],
[20, 20, 18, 15, 12, 11, 14, 12, 5]]
elephant_score = [[20, 20, 18, 15, 12, 11, 14, 12, 5],
[40, 25, 30, 0, 0, 0, 16, 12, 12],
[50, 40, 30, 0, 0, 0, 16, 12, 12],
[1919810, 50, 20, 15, 15, 15, 9, 12, 0],
[50, 40, 30, 0, 0, 0, 16, 12, 12],
[40, 25, 30, 0, 0, 0, 16, 12, 12],
[20, 20, 18, 15, 12, 11, 14, 12, 5]]
piece_position_scores = {"r1": mouse_score,
"b1": [line[::-1] for line in mouse_score[::-1]],
"r2": eagle_score,
"b2": [line[::-1] for line in eagle_score[::-1]],
"r3": fox_score,
"b3": [line[::-1] for line in fox_score[::-1]],
"r4": wolf_score,
"b4": [line[::-1] for line in wolf_score[::-1]],
"r5": leopard_score,
"b5": [line[::-1] for line in leopard_score[::-1]],
"r6": lion_score,
"b6": [line[::-1] for line in lion_score[::-1]],
"r7": elephant_score,
"b7": [line[::-1] for line in elephant_score[::-1]],
}
2 years ago
DEN_CONQUESTED = 10000
DRAW = 0
global DEPTH # =4
def findRandomMove(valid_moves):
2 years ago
return valid_moves[random.randint(0, len(valid_moves) - 1)]
2 years ago
# is greedy_move function used here?
def find_GreadyMove(game_state, valid_moves):
turnMultiplier = 1 if game_state.red_to_move else -1
maxScore = -DEN_CONQUESTED
bestMove = None
for playerMove in valid_moves:
game_state.makeMove(playerMove)
score = turnMultiplier * scoreMaterial(game_state)
if score > maxScore:
maxScore = score
bestMove = playerMove
game_state.undoMove()
return bestMove
2 years ago
def scoreMaterial(game_state): # get the score
# input: current game_state
score = 0
penalty_for_rep = 0
for row in range(len(game_state.board)):
2 years ago
for col in range(len(game_state.board[row])): # 遍历整个棋盘
piece = game_state.board[row][col]
if piece != "00":
piece_position_score = 0
2 years ago
piece_position_score = piece_position_scores[piece][row][col] # 获得当前位置的得分
if piece_position_scores[piece][row][col] in last_moves: # 重复判罚
penalty_for_rep += 70
if piece[0] == 'r':
2 years ago
score += piece_position_score + piece_score[piece[1]] - penalty_for_rep
elif piece[0] == 'b':
2 years ago
score -= piece_position_score + piece_score[piece[1]] - penalty_for_rep # 注意:这个默认没有考虑”淘汰“的可能性?
#待检查:
return score
2 years ago
def findMove_NegaMaxAlphaBeta(game_state, valid_moves, depth, DEPTH, alpha, beta, turn_multiplier):
# 对于各个参数的理解:
# game_state当前状态
# valid_moves可行的行动列表
# depth当前深度
# DEPTH限制深度
# alphaalpha限制值
# betabeta限制值
# turn_multiplierNegaMax特性
global next_move
if depth == 0:
return turn_multiplier * scoreMaterial(game_state)
max_score = -inf
for move in valid_moves:
game_state.makeMove(move)
next_moves = game_state.getAllMoves()
2 years ago
score = -findMove_NegaMaxAlphaBeta(game_state, next_moves, depth - 1, DEPTH, -beta, -alpha, -turn_multiplier)
2 years ago
if score > max_score: # > or >= ??
max_score = score
if depth == DEPTH:
next_move = move
game_state.undoMove()
if max_score > alpha:
2 years ago
alpha = max_score
if alpha >= beta:
break
return max_score
def findMove_MiniMaxAlphaBeta(game_state, valid_moves, depth, alpha, beta, turn_multiplier):
global next_move
2 years ago
def max_value(game_state, next_moves, alpha, beta, depth):
if depth == 0:
return turn_multiplier * scoreMaterial(game_state)
v = -inf
for move in valid_moves:
next_moves = game_state.getAllMoves()
2 years ago
v = max(v, min_value(game_state, next_moves, alpha, beta, depth - 1))
game_state.undoMove()
if v >= beta:
return v
alpha = max(alpha, v)
return v
2 years ago
def min_value(game_state, next_moves, alpha, beta, depth):
if depth == 0:
return turn_multiplier * scoreMaterial(game_state)
v = -inf
for move in valid_moves:
next_moves = game_state.getAllMoves()
2 years ago
v = min(v, max_value(game_state, next_moves, alpha, beta, depth - 1))
game_state.undoMove()
if v <= alpha:
return v
beta = min(beta, v)
return v
# Body of alpha_beta_search:
best_score = -inf
beta = inf
best_action = None
for move in valid_moves:
2 years ago
v = min_value(game_state, valid_moves, best_score, beta, depth)
if v > best_score:
best_score = v
best_action = move
return best_action
2 years ago
def find_BestMove(game_state, valid_moves, depth_p):
global next_move
2 years ago
DEPTH = depth_p
next_move = None
global last_moves
last_moves = collections.deque(maxlen=12)
random.shuffle(valid_moves)
2 years ago
ordered_valid_moves = orderby_GreadyMove(game_state, valid_moves)
# for i in valid_moves:
# print(f"Possible: {i}")
# for i in ordered_valid_moves:
# print(f"New possible: {i}")
2 years ago
findMove_NegaMaxAlphaBeta(game_state, ordered_valid_moves, depth_p, DEPTH, -DEN_CONQUESTED, DEN_CONQUESTED,
1 if game_state.red_to_move else -1)
last_moves.append(next_move)
return next_move
def orderby_GreadyMove(game_state, valid_moves):
turnMultiplier = 1 if game_state.red_to_move else -1
maxScore = -DEN_CONQUESTED
de = collections.deque([])
for playerMove in valid_moves:
game_state.makeMove(playerMove)
score = turnMultiplier * scoreMaterial(game_state)
if score > maxScore:
maxScore = score
de.appendleft(playerMove)
else:
de.append(playerMove)
game_state.undoMove()
return de