|
|
|
@ -1,5 +1,6 @@
|
|
|
|
|
import os
|
|
|
|
|
import json
|
|
|
|
|
import re
|
|
|
|
|
from typing import Iterator
|
|
|
|
|
from openai import OpenAI
|
|
|
|
|
|
|
|
|
@ -24,7 +25,7 @@ class EnglishEssayAnalyzer:
|
|
|
|
|
用中文回答,保持专业但易懂的语气。"""
|
|
|
|
|
|
|
|
|
|
def analyze_stream(self, essay: str) -> Iterator[str]:
|
|
|
|
|
"""流式分析作文"""
|
|
|
|
|
"""流式分析作文(新增关键方法)"""
|
|
|
|
|
try:
|
|
|
|
|
stream = self.client.chat.completions.create(
|
|
|
|
|
model="deepseek-r1",
|
|
|
|
@ -36,21 +37,9 @@ class EnglishEssayAnalyzer:
|
|
|
|
|
stream=True
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
buffer = []
|
|
|
|
|
for chunk in stream:
|
|
|
|
|
if chunk.choices and chunk.choices[0].delta.content:
|
|
|
|
|
content = chunk.choices[0].delta.content
|
|
|
|
|
buffer.append(content)
|
|
|
|
|
|
|
|
|
|
# 遇到标点或换行时输出完整句子
|
|
|
|
|
if content in ('\n', '.', '!', '?', ';'):
|
|
|
|
|
yield ''.join(buffer)
|
|
|
|
|
buffer = []
|
|
|
|
|
else:
|
|
|
|
|
yield content
|
|
|
|
|
|
|
|
|
|
if buffer:
|
|
|
|
|
yield ''.join(buffer)
|
|
|
|
|
yield chunk.choices[0].delta.content
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
yield f"\n分析中断:{str(e)}"
|
|
|
|
@ -65,22 +54,42 @@ class EnglishEssayAnalyzer:
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
current_category = None
|
|
|
|
|
for chunk in self.analyze_stream(essay):
|
|
|
|
|
# 实时输出
|
|
|
|
|
buffer = ""
|
|
|
|
|
for chunk in self.analyze_stream(essay): # 现在可以正确调用
|
|
|
|
|
print(chunk, end='', flush=True)
|
|
|
|
|
|
|
|
|
|
# 解析结构
|
|
|
|
|
if chunk.strip().endswith(':'):
|
|
|
|
|
current_category = chunk.strip(' :')
|
|
|
|
|
elif current_category:
|
|
|
|
|
if '语法错误' in current_category:
|
|
|
|
|
analysis['grammar_errors'].append(chunk.strip())
|
|
|
|
|
elif '用词不当' in current_category:
|
|
|
|
|
analysis['vocabulary_issues'].append(chunk.strip())
|
|
|
|
|
elif '逻辑结构' in current_category:
|
|
|
|
|
analysis['structure_problems'].append(chunk.strip())
|
|
|
|
|
elif '改进建议' in current_category:
|
|
|
|
|
analysis['suggestions'].append(chunk.strip())
|
|
|
|
|
buffer += chunk
|
|
|
|
|
|
|
|
|
|
# 分类检测逻辑
|
|
|
|
|
if re.match(r'^\d+\.\s', buffer):
|
|
|
|
|
if ':' in buffer:
|
|
|
|
|
parts = buffer.split(':', 1)
|
|
|
|
|
current_category = parts[0].strip()
|
|
|
|
|
buffer = parts[1]
|
|
|
|
|
|
|
|
|
|
# 初始化当前分类
|
|
|
|
|
if '语法' in current_category:
|
|
|
|
|
analysis['grammar_errors'].append('')
|
|
|
|
|
elif '用词' in current_category:
|
|
|
|
|
analysis['vocabulary_issues'].append('')
|
|
|
|
|
elif '逻辑' in current_category:
|
|
|
|
|
analysis['structure_problems'].append('')
|
|
|
|
|
elif '改进' in current_category:
|
|
|
|
|
analysis['suggestions'].append('')
|
|
|
|
|
|
|
|
|
|
# 内容填充
|
|
|
|
|
if current_category:
|
|
|
|
|
if '语法' in current_category:
|
|
|
|
|
analysis['grammar_errors'][-1] += chunk
|
|
|
|
|
elif '用词' in current_category:
|
|
|
|
|
analysis['vocabulary_issues'][-1] += chunk
|
|
|
|
|
elif '逻辑' in current_category:
|
|
|
|
|
analysis['structure_problems'][-1] += chunk
|
|
|
|
|
elif '改进' in current_category:
|
|
|
|
|
analysis['suggestions'][-1] += chunk
|
|
|
|
|
|
|
|
|
|
# 后处理
|
|
|
|
|
for key in analysis:
|
|
|
|
|
analysis[key] = [text.strip() for text in analysis[key] if text.strip()]
|
|
|
|
|
|
|
|
|
|
return analysis
|
|
|
|
|
|
|
|
|
@ -92,11 +101,9 @@ if __name__ == "__main__":
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
analyzer = EnglishEssayAnalyzer()
|
|
|
|
|
|
|
|
|
|
print("🔍 开始分析作文...\n")
|
|
|
|
|
result = analyzer.full_analysis(essay)
|
|
|
|
|
|
|
|
|
|
# 保存结果
|
|
|
|
|
with open("analysis_report.json", "w", encoding="utf-8") as f:
|
|
|
|
|
json.dump(result, f, ensure_ascii=False, indent=2)
|
|
|
|
|
|
|
|
|
|