|
|
import os
|
|
|
import uuid
|
|
|
|
|
|
from fastapi import FastAPI, Form, HTTPException
|
|
|
from openai import OpenAI
|
|
|
from TtsConfig import *
|
|
|
from WxMini.OssUtil import upload_mp3_to_oss
|
|
|
from WxMini.TTS import TTS
|
|
|
|
|
|
# 初始化 FastAPI 应用
|
|
|
app = FastAPI()
|
|
|
|
|
|
# 初始化 OpenAI 客户端
|
|
|
client = OpenAI(
|
|
|
api_key=MODEL_API_KEY,
|
|
|
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
|
|
)
|
|
|
|
|
|
|
|
|
@app.post("/reply")
|
|
|
async def reply(prompt: str = Form(...)):
|
|
|
"""
|
|
|
接收用户输入的 prompt,调用大模型并返回结果
|
|
|
:param prompt: 用户输入的 prompt
|
|
|
:return: 大模型的回复
|
|
|
"""
|
|
|
try:
|
|
|
# 调用大模型
|
|
|
response = client.chat.completions.create(
|
|
|
model=MODEL_NAME,
|
|
|
messages=[
|
|
|
{"role": "system", "content": "你是一个非常好的聊天伙伴,可以疏导用户,帮他解压,一句控制在20字以内。"},
|
|
|
{"role": "user", "content": prompt}
|
|
|
],
|
|
|
max_tokens=500
|
|
|
)
|
|
|
|
|
|
# 提取生成的回复
|
|
|
if response.choices and response.choices[0].message.content:
|
|
|
result = response.choices[0].message.content.strip()
|
|
|
# 调用tts进行生成mp3
|
|
|
# 生成一个uuid的文件名
|
|
|
uuid_str = str(uuid.uuid4())
|
|
|
tts_file = "audio/" + uuid_str + ".mp3"
|
|
|
t = TTS(tts_file)
|
|
|
t.start(result)
|
|
|
# 文件上传到oss
|
|
|
upload_mp3_to_oss(tts_file, tts_file)
|
|
|
# 删除临时文件
|
|
|
try:
|
|
|
os.remove(tts_file)
|
|
|
print(f"临时文件 {tts_file} 已删除")
|
|
|
except Exception as e:
|
|
|
print(f"删除临时文件失败: {e}")
|
|
|
# 完整的url
|
|
|
url = 'https://ylt.oss-cn-hangzhou.aliyuncs.com/' + tts_file
|
|
|
return {"success": True, "url": url}
|
|
|
else:
|
|
|
raise HTTPException(status_code=500, detail="大模型未返回有效结果")
|
|
|
except Exception as e:
|
|
|
raise HTTPException(status_code=500, detail=f"调用大模型失败: {str(e)}")
|
|
|
|
|
|
|
|
|
# 运行 FastAPI 应用
|
|
|
if __name__ == "__main__":
|
|
|
import uvicorn
|
|
|
|
|
|
uvicorn.run(app, host="0.0.0.0", port=5500)
|