Compare commits

..

No commits in common. 'zhu1ku2' and 'main' have entirely different histories.

8
.idea/.gitignore vendored

@ -1,8 +0,0 @@
# 默认忽略的文件
/shelf/
/workspace.xml
# 基于编辑器的 HTTP 客户端请求
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

@ -191,7 +191,7 @@ A: 系统设计注重引导而非替代1AI提供建议而非直接答案
## 联系我们
### 开发团队
- **技术支持**
- **技术支持**267278466@qq.com
## 开源协议

@ -12,7 +12,6 @@ from typing import Dict, Any, Optional, List
from urllib.parse import urljoin
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from openai import OpenAI
# 确保正确的导入路径
current_dir = os.path.dirname(os.path.abspath(__file__))
@ -64,7 +63,6 @@ except Exception as e:
ANTHROPIC = "Anthropic"
GOOGLE = "Google"
LMSTUDIO = "LMStudio"
DEEPSEEK = "DeepSeek"
class LLMModel(BaseModel):
display_name: str
@ -162,32 +160,6 @@ class FastLLMClient:
return prompt
def _call_deepseek_api(self, prompt: str, model: LLMModel, max_tokens: int = 1000, **kwargs) -> str:
"""调用DeepSeek API通过openai包官方兼容方式"""
# 构建 system_message
messages = []
if kwargs.get('system_message'):
messages.append({"role": "system", "content": kwargs['system_message']})
messages.append({"role": "user", "content": prompt})
# 创建 OpenAI 客户端,指定 DeepSeek base_url
client = OpenAI(
api_key=getattr(model, "api_key", ""),
base_url="https://api.deepseek.com"
)
response = client.chat.completions.create(
model=model.model_name,
messages=messages,
temperature=kwargs.get('temperature', 0.7),
max_tokens=max_tokens,
stream=False
)
result = response.model_dump()
return result['choices'][0]['message']['content']
def _call_openai_api(self, prompt: str, model: LLMModel, max_tokens: int = 1000,
**kwargs) -> str:
"""调用OpenAI API - 优化版本"""
@ -341,18 +313,16 @@ class FastLLMClient:
elif model.provider == ModelProvider.GOOGLE:
response = self._call_google_api(context_prompt, model, max_tokens, **kwargs)
elif model.provider == ModelProvider.LMSTUDIO:
# LMStudio使用OpenAI兼容API
response = self._call_openai_api(context_prompt, model, max_tokens, **kwargs)
elif model.provider == ModelProvider.DEEPSEEK:
response = self._call_deepseek_api(context_prompt, model, max_tokens, **kwargs)
else:
raise ValueError(f"不支持的模型供应商: {model.provider}")
elapsed_time = time.time() - start_time
print(f"API调用耗时: {elapsed_time:.2f}")
return response.strip()
return response.strip()
except requests.exceptions.Timeout:
raise Exception("API调用超时")
except requests.exceptions.ConnectionError:

@ -13,7 +13,6 @@ class ModelProvider(str, Enum):
ANTHROPIC = "Anthropic"
DEEPSEEK = "DeepSeek"
GEMINI = "Gemini"
GOOGLE = "Google"
GROQ = "Groq"
OPENAI = "OpenAI"
OLLAMA = "Ollama"

@ -9,8 +9,6 @@ import asyncio
import logging
from typing import Dict, List, Optional, Any
from config import get_config
import re
from typing import Dict, Optional, List, Union
# 配置调试日志
logging.basicConfig(level=logging.DEBUG)
@ -24,47 +22,6 @@ from models import ModelProvider, LLMModel
config = get_config()
def robust_json_parse(result: str) -> Dict:
"""增强的JSON解析函数处理常见格式问题"""
def fix_json(json_str: str) -> str:
"""修复常见JSON格式问题"""
# 1. 修复键名引号问题
json_str = re.sub(r"(\w+)\s*:", r'"\1":', json_str)
# 2. 修复字符串值引号问题
json_str = re.sub(r':\s*([\'"]?)([^\'",]+?)\1([,\]])', r': "\2"\3', json_str)
# 3. 移除尾随逗号
json_str = re.sub(r",\s*([}\]])", r"\1", json_str)
# 4. 修复数组中的字符串引号
json_str = re.sub(r'\[\s*([\'"]?)([^\'"]+?)\1\s*\]', r'["\2"]', json_str)
return json_str
try:
# 尝试直接解析
return json.loads(result)
except json.JSONDecodeError:
try:
# 尝试修复常见问题后解析
fixed = fix_json(result)
return json.loads(fixed)
except json.JSONDecodeError:
try:
# 尝试提取JSON部分
json_match = re.search(r'\{[\s\S]*\}', result)
if json_match:
return json.loads(fix_json(json_match.group()))
except:
pass
# 最终fallback
return {
"strengths": ["内容已提交"],
"issues": ["AI分析格式异常"],
"suggestions": ["建议重新分析"],
"next_steps": ["继续完善内容"],
"raw_response": result
}
class AIService:
"""AI服务类 - 统一上下文处理"""
@ -84,14 +41,14 @@ class AIService:
if user_settings:
return user_settings
return self.default_settings
def _create_llm_model(self, settings: Dict) -> LLMModel:
"""创建LLM模型对象"""
try:
provider = ModelProvider(settings['provider'])
except ValueError:
provider = ModelProvider.OLLAMA
logger.warning(f"未知的provider: {settings['provider']}已回退到OLLAMA")
model = LLMModel(
display_name=settings['model'],
model_name=settings['model'],
@ -233,12 +190,11 @@ Please analyze the content with American student writing style in mind, focusing
Provide constructive feedback that helps the student write like an American student would, with natural flow and authentic voice.
请用完整JSON格式回复包含以下字段讲解文字用中文示范文字用英文
请用JSON格式回复包含以下字段讲解文字用中文示范文字用英文
- strengths: 优点列表中文讲解
- issues: 问题列表中文讲解
- suggestions: 改进建议列表中文讲解英文示例要体现美国学生写作风格
- next_steps: 下一步建议列表中文讲解
"""
- next_steps: 下一步建议列表中文讲解"""
else:
prompt = f"""作为专业的{ctx_info['subject']}写作指导老师,请分析学生的{stage_name}
@ -254,24 +210,42 @@ Provide constructive feedback that helps the student write like an American stud
- strengths: 优点列表
- issues: 问题列表
- suggestions: 改进建议列表
- next_steps: 下一步建议列表
"""
- next_steps: 下一步建议列表"""
try:
result = quick_generate(
prompt=prompt,
model=model,
max_tokens=1500,
max_tokens=600,
grade=ctx_info['grade'],
subject=ctx_info['subject'],
topic=ctx_info['topic'],
requirement=f"分析{stage_name}阶段的写作内容",
json_mode=True,
temperature=0.3
json_mode=True, # 启用JSON模式
temperature=0.3 # 降低温度以提高准确性
)
# 使用增强的JSON解析
return robust_json_parse(result)
# 尝试解析JSON响应
try:
return json.loads(result)
except json.JSONDecodeError:
# 如果解析失败尝试提取JSON部分
import re
json_match = re.search(r'\{.*\}', result, re.DOTALL)
if json_match:
try:
return json.loads(json_match.group())
except json.JSONDecodeError:
pass
# 如果仍然失败,返回结构化的默认响应
return {
"strengths": ["内容已提交"],
"issues": ["AI分析格式异常"],
"suggestions": ["建议重新分析"],
"next_steps": ["继续完善内容"],
"raw_response": result
}
except Exception as e:
raise Exception(f"分析内容失败: {str(e)}")
@ -366,360 +340,6 @@ Consider how well the student demonstrates understanding of the topic and expres
except Exception as e:
raise Exception(f"评估文章失败: {str(e)}")
def check_grammar(self, content: str, context: Dict,
user_settings: Optional[Dict] = None) -> Dict:
"""检查语法错误"""
settings = self._get_ai_settings(user_settings)
model = self._create_llm_model(settings)
# 提取上下文信息
ctx_info = self._extract_context_info(context)
# 根据学科制作不同的提示词
if ctx_info['subject'] == '英语':
prompt = f"""As an experienced American English teacher, please conduct a comprehensive grammar check for this {ctx_info['article_type']} written by a {ctx_info['grade']} student.
Topic: {ctx_info['topic']}
Student's Essay:
{content}
Please identify and correct grammatical errors with a focus on:
1. **Grammar Mistakes**: Subject-verb agreement, verb tenses, articles, prepositions
2. **Sentence Structure**: Run-on sentences, sentence fragments, parallel structure
3. **Punctuation**: Commas, periods, quotation marks, apostrophes
4. **Word Usage**: Wrong word choices, awkward phrasing, informal language
5. **Spelling**: Common spelling errors and typos
For each error found, please provide:
- The original text with error highlighted
- Explanation of the error (in Chinese for understanding)
- Corrected version (in proper English)
- Suggestion for improvement
请用完整JSON格式回复包含以下字段
- overall_assessment: 总体评价中文
- total_errors: 总错误数量
- error_categories: 错误分类统计 {{"grammar": 数量, "punctuation": 数量, "word_usage": 数量, "spelling": 数量}}
- errors: 错误列表每个错误包含
- original_text: 原始错误文本
- error_type: 错误类型
- explanation: 错误解释中文
- corrected_text: 修正后的文本
- suggestion: 改进建议中文
- score: 语法得分0-100
- recommendation: 学习建议中文"""
else:
prompt = f"""作为专业的语文老师,请对这篇{ctx_info['article_type']}进行语法检查:
题目{ctx_info['topic']}
学生作文
{content}
请检查以下方面的语法错误
1. **字词错误**错别字用词不当词语搭配错误
2. **句子错误**病句成分残缺搭配不当语序混乱
3. **标点错误**标点符号使用不当缺失或多余
4. **表达错误**表达不清逻辑混乱修辞不当
5. **格式错误**段落格式书写规范问题
对于每个发现的错误请提供
- 包含错误的原始文本
- 错误类型说明
- 错误解释
- 修改后的正确文本
- 改进建议
请用JSON格式回复包含以下字段
- overall_assessment: 总体评价
- total_errors: 总错误数量
- error_categories: 错误分类统计 {{"word": 数量, "sentence": 数量, "punctuation": 数量, "expression": 数量}}
- errors: 错误列表每个错误包含
- original_text: 原始错误文本
- error_type: 错误类型
- explanation: 错误解释
- corrected_text: 修正后的文本
- suggestion: 改进建议
- score: 语法得分0-100
- recommendation: 学习建议"""
try:
result = quick_generate(
prompt=prompt,
model=model,
max_tokens=1800,
grade=ctx_info['grade'],
subject=ctx_info['subject'],
topic=ctx_info['topic'],
requirement="语法检查",
json_mode=True,
temperature=0.1 # 低温度确保准确性
)
try:
grammar_data = json.loads(result)
except json.JSONDecodeError:
# 尝试提取可能的 JSON 子串并解析
import re
json_match = re.search(r'\{[\s\S]*\}', result)
if json_match:
try:
grammar_data = json.loads(json_match.group(0))
except json.JSONDecodeError:
logger.error("[语法检查] 提取到的JSON片段解析失败")
return {
"overall_assessment": "语法检查暂时不可用",
"total_errors": 0,
"error_categories": {},
"errors": [],
"score": 0,
"recommendation": "AI返回的数据无法解析请稍后重试",
"raw_response": result,
"error": "JSON解析失败提取片段后仍失败"
}
else:
logger.error("[语法检查] AI返回内容非JSON且未能提取JSON片段")
return {
"overall_assessment": "语法检查暂时不可用",
"total_errors": 0,
"error_categories": {},
"errors": [],
"score": 0,
"recommendation": "AI返回的数据格式不正确请稍后重试",
"raw_response": result,
"error": "返回非JSON"
}
# --------- END: 更严格的 JSON 解析 ----------
print("===== 语法检查返回数据 =====")
print(grammar_data)
print("==========================")
# 确保返回数据格式正确且包含必要的字段
grammar_data = self._validate_grammar_data(grammar_data, content)
return grammar_data
except Exception as e:
logger.error(f"语法检查失败: {str(e)}")
return {
"overall_assessment": "语法检查暂时不可用",
"total_errors": 0,
"error_categories": {},
"errors": [],
"score": 0,
"recommendation": "请稍后重试或检查网络连接",
"error": str(e)
}
def _validate_grammar_data(self, grammar_data: Dict, content: str) -> Dict:
"""验证和补充语法检查数据"""
# 确保必要字段存在
required_fields = {
'overall_assessment': '文章内容良好,但需要进一步改进语法准确性。',
'total_errors': 0,
'error_categories': {},
'errors': [],
'score': 100,
'recommendation': '建议继续练习,提高语法准确性。'
}
for field, default_value in required_fields.items():
if field not in grammar_data or grammar_data[field] is None:
grammar_data[field] = default_value
# 确保error_categories是字典格式
if not isinstance(grammar_data.get('error_categories'), dict):
grammar_data['error_categories'] = {}
# 确保errors是列表格式
if not isinstance(grammar_data.get('errors'), list):
grammar_data['errors'] = []
# 计算总错误数如果与error_categories不一致
if grammar_data['total_errors'] == 0 and grammar_data['errors']:
grammar_data['total_errors'] = len(grammar_data['errors'])
# 根据错误数量重新计算分数(如果分数不合理)
error_count = grammar_data['total_errors']
content_length = len(content.strip())
if content_length > 0:
# 每100字错误密度
error_density = error_count / max(content_length / 100, 1)
# 如果AI返回的分数不合理根据错误密度重新计算
if grammar_data['score'] > 90 and error_density > 2:
grammar_data['score'] = max(100 - error_density * 10, 0)
elif grammar_data['score'] < 10 and error_density < 1:
grammar_data['score'] = min(100 - error_density * 5, 100)
# 确保分数在合理范围内
grammar_data['score'] = max(0, min(100, grammar_data['score']))
return grammar_data
def vocabulary_upgrade(self, content: str, context: Dict,
user_settings: Optional[Dict] = None) -> Dict:
"""升级词汇,识别基础词汇并提供高阶替代"""
settings = self._get_ai_settings(user_settings)
model = self._create_llm_model(settings)
# 提取上下文信息
ctx_info = self._extract_context_info(context)
# 根据学科制作不同的提示词
if ctx_info['subject'] == '英语':
prompt = f"""As an experienced American English writing instructor, please analyze this {ctx_info['article_type']} and identify basic vocabulary that can be upgraded to more sophisticated alternatives.
Topic: {ctx_info['topic']}
Student's Content:
{content}
Please identify 5-8 basic words or phrases that could be replaced with more advanced vocabulary appropriate for {ctx_info['grade']} level American academic writing.
For each vocabulary item, provide:
1. The original basic word/phrase
2. 2-3 more sophisticated alternatives with explanations
3. Example sentences showing proper usage
4. Contextual guidance on when to use each alternative
Focus on vocabulary that would make the writing sound more like a native American student's work.
请用完整JSON格式回复包含以下字段讲解文字用中文示范文字用英文
- overall_assessment: 总体词汇水平评估中文
- total_suggestions: 总建议数量
- vocabulary_suggestions: 词汇建议列表每个建议包含
- original_word: 原始词汇
- alternatives: 替代词汇列表每个包含
- word: 高阶词汇
- meaning: 词汇含义解释中文
- usage_example: 使用示例英文
- explanation: 升级理由中文
- difficulty_level: 难度等级初级/中级/高级
- learning_tips: 学习建议列表中文
- next_steps: 后续学习步骤中文"""
else:
prompt = f"""作为专业的{ctx_info['subject']}写作指导老师,请分析这篇{ctx_info['article_type']}并识别可以升级的基础词汇。
题目{ctx_info['topic']}
学生作文
{content}
请识别5-8个可以升级的基础词汇或短语为每个词汇提供更高级的替代方案
对于每个词汇项目请提供
1. 原始基础词汇
2. 2-3个更高级的替代词汇及解释
3. 使用示例句子
4. 使用场景指导
请用JSON格式回复包含以下字段
- overall_assessment: 总体词汇水平评估
- total_suggestions: 总建议数量
- vocabulary_suggestions: 词汇建议列表每个建议包含
- original_word: 原始词汇
- alternatives: 替代词汇列表每个包含
- word: 高阶词汇
- meaning: 词汇含义解释
- usage_example: 使用示例
- explanation: 升级理由
- difficulty_level: 难度等级初级/中级/高级
- learning_tips: 学习建议列表
- next_steps: 后续学习步骤"""
try:
result = quick_generate(
prompt=prompt,
model=model,
max_tokens=1200,
grade=ctx_info['grade'],
subject=ctx_info['subject'],
topic=ctx_info['topic'],
requirement="词汇升级分析",
json_mode=True,
temperature=0.3
)
print("===== 词汇升级返回数据 =====")
print(result)
print("==========================")
try:
vocabulary_data = json.loads(result)
except json.JSONDecodeError:
# 尝试提取JSON部分
import re
json_match = re.search(r'\{.*\}', result, re.DOTALL)
if json_match:
try:
vocabulary_data = json.loads(json_match.group(0))
except json.JSONDecodeError:
logger.error("[词汇升级] 提取到的JSON片段解析失败")
return self._get_default_vocabulary_response(content)
else:
logger.error("[词汇升级] AI返回内容非JSON且未能提取JSON片段")
return self._get_default_vocabulary_response(content)
# 验证和补充词汇升级数据
vocabulary_data = self._validate_vocabulary_data(vocabulary_data, content)
return vocabulary_data
except Exception as e:
logger.error(f"词汇升级失败: {str(e)}")
return self._get_default_vocabulary_response(content, str(e))
def _get_default_vocabulary_response(self, content: str, error_msg: str = "") -> Dict:
"""获取默认的词汇升级响应"""
return {
"overall_assessment": "词汇升级功能暂时不可用" + (f"{error_msg}" if error_msg else ""),
"total_suggestions": 0,
"vocabulary_suggestions": [],
"learning_tips": ["请检查网络连接后重试", "建议先保存作品再尝试词汇升级"],
"next_steps": ["继续积累词汇量", "多阅读优秀范文"],
"error": error_msg or "未知错误"
}
def _validate_vocabulary_data(self, vocabulary_data: Dict, content: str) -> Dict:
"""验证和补充词汇升级数据"""
# 确保必要字段存在
required_fields = {
'overall_assessment': '词汇使用基本正确,有提升空间。',
'total_suggestions': 0,
'vocabulary_suggestions': [],
'learning_tips': ['多阅读优秀作品,积累词汇'],
'next_steps': ['定期复习升级的词汇']
}
for field, default_value in required_fields.items():
if field not in vocabulary_data or vocabulary_data[field] is None:
vocabulary_data[field] = default_value
# 确保vocabulary_suggestions是列表格式
if not isinstance(vocabulary_data.get('vocabulary_suggestions'), list):
vocabulary_data['vocabulary_suggestions'] = []
# 计算总建议数如果与vocabulary_suggestions不一致
if vocabulary_data['total_suggestions'] == 0 and vocabulary_data['vocabulary_suggestions']:
vocabulary_data['total_suggestions'] = len(vocabulary_data['vocabulary_suggestions'])
# 确保每个建议都有必要的字段
for suggestion in vocabulary_data['vocabulary_suggestions']:
if 'original_word' not in suggestion:
suggestion['original_word'] = '未知词汇'
if 'alternatives' not in suggestion or not isinstance(suggestion.get('alternatives'), list):
suggestion['alternatives'] = []
if 'explanation' not in suggestion:
suggestion['explanation'] = '可以升级为更高级的词汇'
if 'difficulty_level' not in suggestion:
suggestion['difficulty_level'] = '中级'
return vocabulary_data
def generate_suggestions(self, content: str, context: Dict,
suggestion_type: str = "improvement",
user_settings: Optional[Dict] = None) -> List[str]:
@ -913,7 +533,7 @@ Please provide:
result = quick_generate(
prompt=enhanced_prompt,
model=model,
max_tokens=1800,
max_tokens=1000,
grade=ctx_info['grade'],
subject=ctx_info['subject'],
topic=ctx_info['topic'],
@ -1330,14 +950,4 @@ def sync_test_connection(user_settings: Dict) -> bool:
def sync_health_check() -> Dict[str, Any]:
"""同步健康检查"""
return ai_service.health_check()
def sync_check_grammar(content: str, context: Dict,
user_settings: Optional[Dict] = None) -> Dict:
"""同步语法检查"""
return ai_service.check_grammar(content, context, user_settings)
def sync_vocabulary_upgrade(content: str, context: Dict,
user_settings: Optional[Dict] = None) -> Dict:
"""同步词汇升级"""
return ai_service.vocabulary_upgrade(content, context, user_settings)
return ai_service.health_check()

@ -9,14 +9,14 @@
"grade": "初中",
"brainstorm_content": "",
"outline_content": "",
"writing_content": "Generative AI are a hot topic. Some peoples thinks it make students lazy. Because they just use AI to do they homework. This is not completely true. Actually, if use correct, AI can helps student learning better. For example, when a student don't understand a concept, they can asks AI for explain. It give instant answer, more better than just search online. But, student must to think by themselves first. Rely on AI too much is bad. It can damaging they ability for independent thinking. So, the key is balance. We should use AI like a tool, not a replacement of our brain. In conclude, AI have both positive and negative affects. It is depend on how we uses it.",
"ai_feedback": "{\"writing_analysis\": {\"strengths\": [\"观点辩证学生能认识到AI的双面性既指出过度依赖的危害也肯定合理使用的价值\", \"结构完整:包含问题提出、正反论证和结论的基本框架\", \"立场明确:最终提出'关键在于平衡'的核心观点\", \"举例具体:用'不理解概念时询问AI'的案例支撑论点\"], \"issues\": [\"论点展开不足:缺乏分论点支撑,正反论证都停留在表面陈述\", \"学术规范缺失:没有明确的主题句和过渡词,段落间逻辑跳跃\", \"语言不地道:存在中式英语表达和语法错误\", \"论证深度不够:未涉及具体研究数据或教育理论支撑\", \"批判性思维薄弱未分析AI如何具体影响不同学习能力的学生\"], \"suggestions\": [{\"讲解\": \"建立清晰论点结构,使用主题句+支撑句模式\", \"示例\": \"While critics argue that generative AI fosters academic laziness, this perspective overlooks its potential as a cognitive tool when used intentionally. For instance, AI can serve as a 24/7 learning partner that provides customized explanations...\"}, {\"讲解\": \"增加学术过渡词和逻辑连接\", \"示例\": \"Conversely, unmonitored AI use may indeed undermine metacognitive skills. A study by Stanford University found that students who over-relied on AI for problem-solving showed decreased ability to...\"}, {\"讲解\": \"用具体案例替代泛泛而谈\", \"示例\": \"In Mr. Johnson's 8th-grade science class, students using AI for hypothesis refinement scored 23% higher on critical thinking assessments than those using traditional methods...\"}, {\"讲解\": \"强化批判性分析维度\", \"示例\": \"The central dilemma isn't whether to use AI, but how to design usage protocols that maximize its scaffolding function while minimizing dependency. Educators might consider...\"}, {\"讲解\": \"使用更地道的学术表达\", \"示例\": \"Rather than replacing human intellect, generative AI should function as a collaborative tool that amplifies our cognitive capabilities—much like calculators enhanced mathematical reasoning without eliminating the need to understand core principles.\"}], \"next_steps\": [\"学习美国中学议论文的经典五段式结构(引言-论点1-论点2-反论点-结论)\", \"收集关于AI教育影响的具体研究数据和权威来源\", \"练习使用学术过渡词however, furthermore, consequently等\", \"阅读《纽约时报》教育版相关文章,观察地道议论文表达\", \"尝试写作时先建立论证大纲,再展开具体段落\"]}}",
"final_score": 40,
"scores": "{\"brainstorm\": 0, \"outline\": 0, \"writing\": 2, \"highlight\": 2}",
"writing_content": "",
"ai_feedback": "",
"final_score": 0,
"scores": "{\"brainstorm\": 0, \"outline\": 0, \"writing\": 0, \"highlight\": 0}",
"status": "writing",
"word_count": 546,
"word_count": 0,
"created_at": "2025-10-01T13:18:06.056827",
"updated_at": "2025-10-15T17:10:55.984866",
"updated_at": "2025-10-09T10:43:01.941600",
"completed_at": null
}
]

@ -6,10 +6,10 @@
"grade": "高中",
"subject": "英语",
"created_at": "2025-07-07T23:12:26.092924",
"ai_provider": "DeepSeek",
"ai_model": "deepseek-chat",
"ai_api_key": "sk-ccdfd6d1973b45a084decf2654cf171a",
"ai_base_url": "https://api.deepseek.com",
"updated_at": "2025-10-15T17:05:01.345349"
"ai_provider": "",
"ai_model": "",
"ai_api_key": "",
"ai_base_url": "",
"updated_at": "2025-10-09T10:43:25.409404"
}
]

@ -72,20 +72,6 @@ class ProjectDAO:
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def update_project(self, project_id: int, **kwargs) -> Optional[Dict]:
"""更新项目信息"""
return ProjectStorage.update_project(project_id, **kwargs)
def save_vocabulary_upgrade(self, project_id: int, vocabulary_data: Dict[str, Any]) -> Optional[Dict]:
"""保存词汇升级结果"""
vocabulary_json = json.dumps(vocabulary_data, ensure_ascii=False)
return ProjectStorage.update_project(project_id, vocabulary_upgrade=vocabulary_json)
def save_grammar_check(self, project_id: int, grammar_data: Dict[str, Any]) -> Optional[Dict]:
"""保存语法检查结果"""
grammar_json = json.dumps(grammar_data, ensure_ascii=False)
return ProjectStorage.update_project(project_id, grammar_check=grammar_json)
def create_project(self, user_id: int, title: str, topic: str, article_type: str, subject: str, grade: str = '初中') -> Dict:
"""创建写作项目"""

@ -7,7 +7,7 @@ from datetime import datetime
import json
from json_dao import UserDAO, ProjectDAO, with_user_dao, with_project_dao, dict_to_user, dict_to_project, dicts_to_projects
from ai_service import sync_generate_topic, sync_analyze_content, sync_evaluate_article, sync_health_check, sync_test_connection, sync_generate_suggestions, sync_generate_stage_suggestions, sync_check_grammar,sync_vocabulary_upgrade
from ai_service import sync_generate_topic, sync_analyze_content, sync_evaluate_article, sync_health_check, sync_test_connection, sync_generate_suggestions, sync_generate_stage_suggestions
from scoring_service import ScoringService
from ai_service import AIService
@ -770,198 +770,6 @@ def generate_suggestions():
traceback.print_exc()
return error_response(f"生成建议失败: {str(e)}", 500)
@api_bp.route('/ai/vocabulary_upgrade', methods=['POST'])
def vocabulary_upgrade():
"""词汇升级"""
# 自动设置为已登录状态
if 'user_id' not in session:
session['user_id'] = 1
session['username'] = '267278466@qq.com'
user_id = session.get('user_id')
try:
data = request.get_json()
content = data.get('content')
project_id = data.get('project_id')
if not content:
return error_response("内容不能为空")
# 获取用户信息
@with_user_dao
def _get_user_data(dao):
user = dao.get_user_by_id(user_id)
return user # JSON DAO直接返回字典
user_data = _get_user_data()
if not user_data:
return error_response("用户不存在", 404)
# 获取项目信息
project_data = None
if project_id:
@with_project_dao
def _get_project_data(dao):
proj = dao.get_project_by_id(project_id)
return proj if proj and proj.get('user_id') == user_id else None
project_data = _get_project_data()
# 构建AI上下文 - 优先使用项目的年级和学科信息
if project_data:
# 使用项目的年级和学科信息
context = {
'grade': project_data.get('grade', user_data.get('grade', '')),
'subject': project_data.get('subject', user_data.get('subject', '')),
'content': content,
'topic': project_data.get('topic', ''),
'article_type': project_data.get('article_type', ''),
'title': project_data.get('title', '')
}
else:
# 没有项目信息时使用用户设置
context = {
'grade': user_data.get('grade', ''),
'subject': user_data.get('subject', ''),
'content': content,
'topic': data.get('topic', ''),
'article_type': data.get('article_type', '')
}
# 获取用户AI设置
user_ai_settings = get_user_ai_settings(user_id)
# 词汇升级
vocabulary_result = sync_vocabulary_upgrade(
content=content,
context=context,
user_settings=user_ai_settings
)
# 保存词汇升级结果到项目
if project_id:
@with_project_dao
def _save_vocabulary_result(dao):
project = dao.get_project_by_id(project_id)
if project and project.get('user_id') == user_id:
# 更新项目的词汇升级信息
vocab_data_str = project.get('vocabulary_upgrade', '{}')
if not vocab_data_str or vocab_data_str.strip() == '':
vocab_data_str = '{}'
vocab_data = json.loads(vocab_data_str)
vocab_data['latest_upgrade'] = vocabulary_result
vocab_data['last_upgraded_at'] = datetime.utcnow().isoformat()
# 使用DAO更新项目 - 需要先添加这个字段到项目存储
dao.save_vocabulary_upgrade(project_id, vocab_data)
return True
return False
_save_vocabulary_result()
return success_response(vocabulary_result)
except Exception as e:
import traceback
traceback.print_exc()
return error_response(f"词汇升级失败: {str(e)}", 500)
@api_bp.route('/ai/check_grammar', methods=['POST'])
def check_grammar():
"""检查语法错误"""
# 自动设置为已登录状态
if 'user_id' not in session:
session['user_id'] = 1
session['username'] = '267278466@qq.com'
user_id = session.get('user_id')
try:
data = request.get_json()
content = data.get('content')
project_id = data.get('project_id')
if not content:
return error_response("内容不能为空")
# 获取用户信息
@with_user_dao
def _get_user_data(dao):
user = dao.get_user_by_id(user_id)
return user # JSON DAO直接返回字典
user_data = _get_user_data()
if not user_data:
return error_response("用户不存在", 404)
# 获取项目信息
project_data = None
if project_id:
@with_project_dao
def _get_project_data(dao):
proj = dao.get_project_by_id(project_id)
return proj if proj and proj.get('user_id') == user_id else None
project_data = _get_project_data()
# 构建AI上下文 - 优先使用项目的年级和学科信息
if project_data:
# 使用项目的年级和学科信息
context = {
'grade': project_data.get('grade', user_data.get('grade', '')),
'subject': project_data.get('subject', user_data.get('subject', '')),
'content': content,
'topic': project_data.get('topic', ''),
'article_type': project_data.get('article_type', ''),
'title': project_data.get('title', '')
}
else:
# 没有项目信息时使用用户设置
context = {
'grade': user_data.get('grade', ''),
'subject': user_data.get('subject', ''),
'content': content,
'topic': data.get('topic', ''),
'article_type': data.get('article_type', '')
}
# 获取用户AI设置
user_ai_settings = get_user_ai_settings(user_id)
# 检查语法
grammar_result = sync_check_grammar(
content=content,
context=context,
user_settings=user_ai_settings
)
# 保存语法检查结果到项目
if project_id:
@with_project_dao
def _save_grammar_result(dao):
project = dao.get_project_by_id(project_id)
if project and project.get('user_id') == user_id:
# 更新项目的语法检查信息
grammar_data_str = project.get('grammar_check', '{}')
if not grammar_data_str or grammar_data_str.strip() == '':
grammar_data_str = '{}'
grammar_data = json.loads(grammar_data_str)
grammar_data['latest_check'] = grammar_result
grammar_data['last_checked_at'] = datetime.utcnow().isoformat()
# 使用DAO更新项目
dao.save_grammar_check(project_id, grammar_data)
return True
return False
_save_grammar_result()
return success_response(grammar_result)
except Exception as e:
import traceback
traceback.print_exc()
return error_response(f"语法检查失败: {str(e)}", 500)
# AI配置相关API
@api_bp.route('/ai/models/ollama', methods=['GET'])
def get_ollama_models():

File diff suppressed because it is too large Load Diff

@ -1,22 +0,0 @@
{
"ai_model": {
"value": "gpt-3.5-turbo",
"description": "默认配置: ai_model",
"updated_at": "2025-10-14T19:10:47.462376"
},
"max_word_count": {
"value": 800,
"description": "默认配置: max_word_count",
"updated_at": "2025-10-14T19:10:47.476253"
},
"enable_ai_suggestions": {
"value": true,
"description": "默认配置: enable_ai_suggestions",
"updated_at": "2025-10-14T19:10:47.491171"
},
"auto_save_interval": {
"value": 30,
"description": "默认配置: auto_save_interval",
"updated_at": "2025-10-14T19:10:47.504964"
}
}

@ -1,42 +0,0 @@
[
{
"id": 1,
"user_id": 1,
"title": "写作项目 - 2025年10月14日 19:11",
"topic": "AI生成对学习的影响\n",
"article_type": "议论文",
"subject": "英语",
"grade": "大学",
"brainstorm_content": "",
"outline_content": "",
"writing_content": "Generative AI, such as large language models, are becoming increasingly popular. They offers students a powerful tool for complete homework and generating ideas. However, its impact on learning ability are a subject of intense debate.\n\nOn one hand, it is argued that AI hinder the development of critical thinking. When a student rely on AI to write essays, they doesn't engage in the rigorous process of research and analysis their own. This lead to a superficial understanding and a failure to develop their own voice. Furthermore, the convenience of AI means that less effort are put into mastering fundamental skills, such as grammar and structuring arguments.",
"ai_feedback": "{\"writing_analysis\": {\"strengths\": [\"能够识别AI对学习的潜在负面影响特别是批判性思维和基础技能方面显示出对主题的基本理解\", \"尝试从不同角度展开讨论,体现了初步的论证意识\", \"使用了一些学术写作的基本元素,如'On one hand'这样的过渡词\"], \"issues\": [\"缺乏明确的中心论点(thesis statement),文章方向不清晰\", \"段落缺乏有力的主题句(topic sentence),论证结构松散\", \"语法错误较多,特别是主谓一致问题(如'are'代替'is''doesn't'代替'don't')\", \"论证深度不足,缺乏具体例证和深入分析\", \"语言表达不够地道,有些表达显得生硬或不自然\"], \"suggestions\": [{\"讲解\": \"在开头段落加入清晰的中心论点\", \"示例\": \"While generative AI offers unprecedented convenience for students, its overreliance ultimately undermines the very cognitive skills essential for meaningful learning—critical analysis, original thought, and academic integrity.\"}, {\"讲解\": \"为每个主体段落添加有力的主题句\", \"示例\": \"Perhaps the most significant concern is how AI dependency stunts the growth of critical thinking abilities that form the bedrock of genuine education.\"}, {\"讲解\": \"使用更自然的过渡词和表达方式\", \"示例\": \"Moreover, the instant gratification provided by AI tools creates a dangerous disincentive for developing foundational writing competencies.\"}, {\"讲解\": \"添加具体例证增强说服力\", \"示例\": \"For instance, when a student uses AI to generate a history paper instead of wrestling with primary sources, they miss the opportunity to develop their own historical interpretation and analytical voice—skills that simply cannot be outsourced to an algorithm.\"}, {\"讲解\": \"修正语法错误,使用更地道的表达\", \"示例\": \"This reliance leads to superficial understanding and prevents students from developing their unique academic voice, ultimately creating a generation of passive consumers rather than active creators of knowledge.\"}], \"next_steps\": [\"阅读几篇优秀的美国大学生议论文范文,注意观察其论点陈述和段落结构\", \"练习写作清晰的主题句,确保每个段落都有明确的中心思想\", \"重点复习英语主谓一致和时态等基础语法规则\", \"在论证中添加具体事例、数据或引用,增强说服力\", \"多使用美国学术写作中常见的过渡词和表达方式,如'furthermore', 'conversely', 'in light of this'等\"]}}",
"final_score": 60,
"scores": "{\"brainstorm\": 0, \"outline\": 0, \"writing\": 3, \"highlight\": 3}",
"status": "writing",
"word_count": 558,
"created_at": "2025-10-14T19:14:23.761019",
"updated_at": "2025-11-02T16:19:18.601309",
"completed_at": null
},
{
"id": 2,
"user_id": 1,
"title": "写作项目 - 2025年11月02日 16:35",
"topic": "The Role of Failure in Personal Growth (2024 National)",
"article_type": "议论文",
"subject": "英语",
"grade": "高中",
"brainstorm_content": "",
"outline_content": "",
"writing_content": "",
"ai_feedback": "",
"final_score": 0,
"scores": "",
"status": "writing",
"word_count": 0,
"created_at": "2025-11-02T16:36:39.799848",
"updated_at": "2025-11-02T16:48:04.387646",
"completed_at": null
}
]

@ -1,15 +0,0 @@
[
{
"id": 1,
"username": "267278466@qq.com",
"password_hash": "07d0a715a160482331a556b514bee739$fe094c2d3e40fad4e83c6b06aaa95976c68e325bd899a003e7a2928edfe0fce8",
"grade": "高中",
"subject": "英语",
"ai_provider": "DeepSeek",
"ai_model": "deepseek-chat",
"ai_api_key": "sk-4fa62c38b3f44e1da5741c553ebe0344",
"ai_base_url": "https://api.deepseek.com",
"created_at": "2025-10-14T19:10:47.461150",
"updated_at": "2025-11-02T16:36:09.442564"
}
]
Loading…
Cancel
Save