|
|
|
@ -9,6 +9,8 @@ from pymoo.operators.crossover.sbx import SBX
|
|
|
|
|
from pymoo.operators.mutation.pm import PM
|
|
|
|
|
from pymoo.termination import get_termination
|
|
|
|
|
from sklearn.gaussian_process import GaussianProcessRegressor
|
|
|
|
|
from sklearn.gaussian_process.kernels import RBF
|
|
|
|
|
from sklearn.linear_model import Ridge
|
|
|
|
|
|
|
|
|
|
# 动态模型参数
|
|
|
|
|
alpha = 0.05 # 冰川吸引力系数
|
|
|
|
@ -93,63 +95,97 @@ class SustainableTourismProblem(Problem):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# MOEA/D-EGO算法
|
|
|
|
|
def moead_ego(problem, n_init=50, n_iter=200):
|
|
|
|
|
ref_dirs = get_reference_directions("das-dennis", 3, n_partitions=18)
|
|
|
|
|
def moead_ego(problem, n_init=50, n_iter=100):
|
|
|
|
|
ref_dirs = get_reference_directions("das-dennis", 3, n_partitions=24)
|
|
|
|
|
|
|
|
|
|
# 初始样本生成
|
|
|
|
|
X_init = LHS().do(problem, n_init).get("X")
|
|
|
|
|
# 修改 LHS 采样设置
|
|
|
|
|
sampling = LHS() # 移除有问题的参数
|
|
|
|
|
X_init = sampling.do(problem, n_init).get("X")
|
|
|
|
|
F_init, G_init = problem.evaluate(X_init, return_values_of=["F", "G"])
|
|
|
|
|
|
|
|
|
|
# 代理模型
|
|
|
|
|
# 代理模型优化
|
|
|
|
|
class SurrogateModel:
|
|
|
|
|
def __init__(self):
|
|
|
|
|
self.models = {
|
|
|
|
|
'f1': GaussianProcessRegressor(),
|
|
|
|
|
'f2': GaussianProcessRegressor(),
|
|
|
|
|
'f3': GaussianProcessRegressor(),
|
|
|
|
|
'g1': GaussianProcessRegressor(),
|
|
|
|
|
'g2': GaussianProcessRegressor(),
|
|
|
|
|
'g3': GaussianProcessRegressor(),
|
|
|
|
|
'g4': GaussianProcessRegressor(),
|
|
|
|
|
'g5': GaussianProcessRegressor(),
|
|
|
|
|
'g6': GaussianProcessRegressor()
|
|
|
|
|
'f1': GaussianProcessRegressor(
|
|
|
|
|
kernel=1.0 * RBF(length_scale=[1.0] * 4, length_scale_bounds=(1e-3, 1e3)),
|
|
|
|
|
n_restarts_optimizer=2,
|
|
|
|
|
normalize_y=True,
|
|
|
|
|
random_state=42
|
|
|
|
|
),
|
|
|
|
|
'f2': GaussianProcessRegressor(
|
|
|
|
|
kernel=1.0 * RBF(length_scale=[1.0] * 4, length_scale_bounds=(1e-3, 1e3)),
|
|
|
|
|
n_restarts_optimizer=2,
|
|
|
|
|
normalize_y=True,
|
|
|
|
|
random_state=42
|
|
|
|
|
),
|
|
|
|
|
'f3': GaussianProcessRegressor(
|
|
|
|
|
kernel=1.0 * RBF(length_scale=[1.0] * 4, length_scale_bounds=(1e-3, 1e3)),
|
|
|
|
|
n_restarts_optimizer=2,
|
|
|
|
|
normalize_y=True,
|
|
|
|
|
random_state=42
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
# 约束条件使用简单的线性回归
|
|
|
|
|
self.constraint_models = {
|
|
|
|
|
f'g{i+1}': Ridge(alpha=1.0) for i in range(6)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def fit(self, X, F, G):
|
|
|
|
|
self.models['f1'].fit(X, F[:, 0])
|
|
|
|
|
self.models['f2'].fit(X, F[:, 1])
|
|
|
|
|
self.models['f3'].fit(X, F[:, 2])
|
|
|
|
|
# 标准化输入
|
|
|
|
|
self.X_mean = X.mean(axis=0)
|
|
|
|
|
self.X_std = X.std(axis=0)
|
|
|
|
|
X_norm = (X - self.X_mean) / self.X_std
|
|
|
|
|
|
|
|
|
|
for i, model in enumerate(['f1', 'f2', 'f3']):
|
|
|
|
|
self.models[model].fit(X_norm, F[:, i])
|
|
|
|
|
|
|
|
|
|
for i in range(6):
|
|
|
|
|
self.models[f'g{i + 1}'].fit(X, G[:, i])
|
|
|
|
|
self.constraint_models[f'g{i+1}'].fit(X_norm, G[:, i])
|
|
|
|
|
|
|
|
|
|
def predict(self, X):
|
|
|
|
|
f1_pred = self.models['f1'].predict(X)
|
|
|
|
|
f2_pred = self.models['f2'].predict(X)
|
|
|
|
|
f3_pred = self.models['f3'].predict(X)
|
|
|
|
|
g_pred = np.column_stack([self.models[f'g{i + 1}'].predict(X) for i in range(6)])
|
|
|
|
|
X_norm = (X - self.X_mean) / self.X_std
|
|
|
|
|
f1_pred = self.models['f1'].predict(X_norm)
|
|
|
|
|
f2_pred = self.models['f2'].predict(X_norm)
|
|
|
|
|
f3_pred = self.models['f3'].predict(X_norm)
|
|
|
|
|
|
|
|
|
|
g_pred = np.column_stack([
|
|
|
|
|
self.constraint_models[f'g{i+1}'].predict(X_norm)
|
|
|
|
|
for i in range(6)
|
|
|
|
|
])
|
|
|
|
|
|
|
|
|
|
return f1_pred, f2_pred, f3_pred, g_pred
|
|
|
|
|
|
|
|
|
|
surrogate = SurrogateModel()
|
|
|
|
|
surrogate.fit(X_init, F_init, G_init)
|
|
|
|
|
|
|
|
|
|
# MOEA/D配置
|
|
|
|
|
# MOEA/D配置优化
|
|
|
|
|
algorithm = MOEAD(
|
|
|
|
|
ref_dirs=ref_dirs,
|
|
|
|
|
n_neighbors=15,
|
|
|
|
|
n_neighbors=25,
|
|
|
|
|
decomposition="tchebi",
|
|
|
|
|
prob_neighbor_mating=0.9,
|
|
|
|
|
sampling=LHS(),
|
|
|
|
|
crossover=SBX(prob=0.9, eta=15),
|
|
|
|
|
mutation=PM(eta=20),
|
|
|
|
|
prob_neighbor_mating=0.95,
|
|
|
|
|
sampling=sampling,
|
|
|
|
|
crossover=SBX(prob=0.95, eta=20),
|
|
|
|
|
mutation=PM(eta=25),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
for _ in range(n_iter):
|
|
|
|
|
# 生成候选解并预测
|
|
|
|
|
X_candidate = LHS().do(problem, 100).get("X")
|
|
|
|
|
f1_pred, f2_pred,f3_pred, g_pred = surrogate.predict(X_candidate)
|
|
|
|
|
# 记录最优解历史
|
|
|
|
|
best_f = float('inf')
|
|
|
|
|
no_improve_count = 0
|
|
|
|
|
|
|
|
|
|
print("开始优化...")
|
|
|
|
|
for iter_num in range(n_iter):
|
|
|
|
|
X_candidate = LHS().do(problem, 50).get("X") # 减少候选解数量
|
|
|
|
|
f1_pred, f2_pred, f3_pred, g_pred = surrogate.predict(X_candidate)
|
|
|
|
|
|
|
|
|
|
# 综合考虑多个目标
|
|
|
|
|
improvement = -(f1_pred + f2_pred + f3_pred) # 负号是因为我们要最小化
|
|
|
|
|
|
|
|
|
|
# 添加约束惩罚
|
|
|
|
|
penalty = np.sum(np.maximum(g_pred, 0), axis=1)
|
|
|
|
|
improvement -= 100 * penalty # 大的惩罚系数
|
|
|
|
|
|
|
|
|
|
# 计算期望改进(EI)
|
|
|
|
|
improvement = (f1_pred.min() - f1_pred) + (f2_pred.min() - f2_pred)+(f3_pred.min() - f3_pred)
|
|
|
|
|
best_idx = np.argmax(improvement)
|
|
|
|
|
X_new = X_candidate[best_idx].reshape(1, -1)
|
|
|
|
|
|
|
|
|
@ -160,6 +196,15 @@ def moead_ego(problem, n_init=50, n_iter=200):
|
|
|
|
|
G_init = np.vstack([G_init, G_new])
|
|
|
|
|
surrogate.fit(X_init, F_init, G_init)
|
|
|
|
|
|
|
|
|
|
# 提前停止条件
|
|
|
|
|
if iter_num > 10 and np.max(improvement) < 1e-4:
|
|
|
|
|
print(f"在第{iter_num}次迭代达到收敛")
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
if (iter_num + 1) % 5 == 0: # 更频繁地打印进度
|
|
|
|
|
print(f"完成迭代: {iter_num + 1}/{n_iter}")
|
|
|
|
|
|
|
|
|
|
print("优化完成")
|
|
|
|
|
return X_init, F_init, G_init
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|