This commit is contained in:
2025-08-31 10:22:31 +08:00
parent 75a751aba0
commit 58e4e06d8a
6 changed files with 515 additions and 151 deletions

View File

@@ -4,7 +4,7 @@ import tempfile
import uuid import uuid
from datetime import datetime from datetime import datetime
from fastapi import APIRouter, Request, File, UploadFile from fastapi import APIRouter, Request, File, UploadFile, WebSocket, WebSocketDisconnect
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
# 创建路由路由器 # 创建路由路由器
@@ -17,14 +17,14 @@ logger = logging.getLogger(__name__)
from Util.XueBanUtil import get_xueban_response_async from Util.XueBanUtil import get_xueban_response_async
from Util.ASRClient import ASRClient from Util.ASRClient import ASRClient
from Util.ObsUtil import ObsUploader from Util.ObsUtil import ObsUploader
# 新增导入TTSService # 导入TTS管道
from Util.TTSService import TTSService from Util.TTS_Pipeline import stream_and_split_text, StreamingVolcanoTTS
# 保留原有的HTTP接口用于向后兼容
@router.post("/xueban/upload-audio") @router.post("/xueban/upload-audio")
async def upload_audio(file: UploadFile = File(...)): async def upload_audio(file: UploadFile = File(...)):
""" """
上传音频文件并进行ASR处理 上传音频文件并进行ASR处理 - 原有接口,用于向后兼容
- 参数: file - 音频文件 - 参数: file - 音频文件
- 返回: JSON包含识别结果 - 返回: JSON包含识别结果
""" """
@@ -59,13 +59,32 @@ async def upload_audio(file: UploadFile = File(...)):
feedback_text += chunk feedback_text += chunk
logger.info(f"大模型反馈生成完成: {feedback_text}") logger.info(f"大模型反馈生成完成: {feedback_text}")
# 使用TTS生成语音 # 使用流式TTS生成语音
tts_service = TTSService() import io
tts_temp_file = os.path.join(tempfile.gettempdir(), f"tts_{timestamp}.mp3") audio_chunks = []
success = tts_service.synthesize(feedback_text, output_file=tts_temp_file)
if not success: # 定义音频回调函数,收集音频块
raise Exception("TTS语音合成失败") def audio_callback(audio_chunk):
logger.info(f"TTS语音合成成功文件保存至: {tts_temp_file}") audio_chunks.append(audio_chunk)
# 获取LLM流式输出并断句
text_stream = stream_and_split_text(asr_result['text'])
# 初始化TTS处理器
tts = StreamingVolcanoTTS(max_concurrency=2)
# 流式处理文本并生成音频
await tts.synthesize_stream(text_stream, audio_callback)
# 合并所有音频块
if audio_chunks:
tts_temp_file = os.path.join(tempfile.gettempdir(), f"tts_{timestamp}.mp3")
with open(tts_temp_file, "wb") as f:
for chunk in audio_chunks:
f.write(chunk)
logger.info(f"TTS语音合成成功文件保存至: {tts_temp_file}")
else:
raise Exception("TTS语音合成失败未生成音频数据")
# 上传TTS音频文件到OBS # 上传TTS音频文件到OBS
tts_audio_url = upload_file_to_obs(tts_temp_file) tts_audio_url = upload_file_to_obs(tts_temp_file)
@@ -90,7 +109,119 @@ async def upload_audio(file: UploadFile = File(...)):
"message": f"音频处理失败: {str(e)}" "message": f"音频处理失败: {str(e)}"
}, status_code=500) }, status_code=500)
# 新增WebSocket接口用于流式处理
@router.websocket("/xueban/streaming-chat")
async def streaming_chat(websocket: WebSocket):
await websocket.accept()
logger.info("WebSocket连接已接受")
try:
# 接收用户音频文件
logger.info("等待接收音频数据...")
data = await websocket.receive_json()
logger.info(f"接收到数据类型: {type(data)}")
logger.info(f"接收到数据内容: {data.keys() if isinstance(data, dict) else '非字典类型'}")
# 检查数据格式
if not isinstance(data, dict):
logger.error(f"接收到的数据不是字典类型,而是: {type(data)}")
await websocket.send_json({"type": "error", "message": "数据格式错误"})
return
audio_data = data.get("audio_data")
logger.info(f"音频数据是否存在: {audio_data is not None}")
logger.info(f"音频数据长度: {len(audio_data) if audio_data else 0}")
if not audio_data:
logger.error("未收到音频数据")
await websocket.send_json({"type": "error", "message": "未收到音频数据"})
return
# 保存临时音频文件
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
temp_file_path = os.path.join(tempfile.gettempdir(), f"temp_audio_{timestamp}.wav")
logger.info(f"保存临时音频文件到: {temp_file_path}")
# 解码base64音频数据并保存
import base64
try:
with open(temp_file_path, "wb") as f:
f.write(base64.b64decode(audio_data))
logger.info("音频文件保存完成")
except Exception as e:
logger.error(f"音频文件保存失败: {str(e)}")
await websocket.send_json({"type": "error", "message": f"音频文件保存失败: {str(e)}"})
return
# 处理ASR
logger.info("开始ASR处理...")
try:
asr_result = await process_asr(temp_file_path)
logger.info(f"ASR处理完成结果: {asr_result['text']}")
os.remove(temp_file_path) # 删除临时文件
except Exception as e:
logger.error(f"ASR处理失败: {str(e)}")
await websocket.send_json({"type": "error", "message": f"ASR处理失败: {str(e)}"})
if os.path.exists(temp_file_path):
os.remove(temp_file_path) # 确保删除临时文件
return
# 发送ASR结果给前端
logger.info("发送ASR结果给前端")
try:
await websocket.send_json({
"type": "asr_result",
"text": asr_result['text']
})
logger.info("ASR结果发送成功")
except Exception as e:
logger.error(f"发送ASR结果失败: {str(e)}")
return
# 定义音频回调函数,将音频块发送给前端
async def audio_callback(audio_chunk):
logger.info(f"发送音频块,大小: {len(audio_chunk)}")
try:
await websocket.send_bytes(audio_chunk)
logger.info("音频块发送成功")
except Exception as e:
logger.error(f"发送音频块失败: {str(e)}")
raise
# 获取LLM流式输出并断句
logger.info("开始LLM处理和TTS合成...")
try:
text_stream = stream_and_split_text(asr_result['text'])
# 初始化TTS处理器
tts = StreamingVolcanoTTS(max_concurrency=2)
# 流式处理文本并生成音频
await tts.synthesize_stream(text_stream, audio_callback)
logger.info("TTS合成完成")
except Exception as e:
logger.error(f"TTS合成失败: {str(e)}")
await websocket.send_json({"type": "error", "message": f"TTS合成失败: {str(e)}"})
return
# 发送结束信号
logger.info("发送结束信号")
try:
await websocket.send_json({"type": "end"})
logger.info("结束信号发送成功")
except Exception as e:
logger.error(f"发送结束信号失败: {str(e)}")
return
except WebSocketDisconnect:
logger.info("客户端断开连接")
except Exception as e:
logger.error(f"WebSocket处理失败: {str(e)}")
try:
await websocket.send_json({"type": "error", "message": str(e)})
except:
logger.error("发送错误消息失败")
# 原有的辅助函数保持不变
async def process_asr(audio_path: str) -> dict: async def process_asr(audio_path: str) -> dict:
""" """
调用ASR服务处理音频文件 调用ASR服务处理音频文件

View File

@@ -2,6 +2,7 @@ import uvicorn
import asyncio import asyncio
from fastapi import FastAPI from fastapi import FastAPI
from starlette.staticfiles import StaticFiles from starlette.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware # 添加此导入
from Routes.TeachingModel.tasks.BackgroundTasks import train_document_task from Routes.TeachingModel.tasks.BackgroundTasks import train_document_task
from Util.PostgreSQLUtil import init_postgres_pool, close_postgres_pool from Util.PostgreSQLUtil import init_postgres_pool, close_postgres_pool
@@ -26,6 +27,7 @@ from Routes.MjRoute import router as mj_router
from Routes.QWenImageRoute import router as qwen_image_router from Routes.QWenImageRoute import router as qwen_image_router
from Util.LightRagUtil import * from Util.LightRagUtil import *
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
import logging # 添加此导入
# 控制日志输出 # 控制日志输出
logger = logging.getLogger('lightrag') logger = logging.getLogger('lightrag')
@@ -52,6 +54,15 @@ async def lifespan(_: FastAPI):
app = FastAPI(lifespan=lifespan) app = FastAPI(lifespan=lifespan)
# 添加CORS中间件
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # 允许所有来源,生产环境中可以限制为特定域名
allow_credentials=True,
allow_methods=["*"], # 允许所有方法
allow_headers=["*"], # 允许所有头部
)
# 挂载静态文件目录 # 挂载静态文件目录
app.mount("/static", StaticFiles(directory="Static"), name="static") app.mount("/static", StaticFiles(directory="Static"), name="static")

View File

@@ -128,7 +128,7 @@ class StreamingVolcanoTTS:
# 通过回调函数返回音频数据 # 通过回调函数返回音频数据
if audio_data: if audio_data:
audio_callback(audio_data) await audio_callback(audio_data)
finally: finally:
await websocket.close() await websocket.close()

View File

@@ -13,7 +13,12 @@ const AudioState = {
}, },
playback: { playback: {
audioElement: null, audioElement: null,
isPlaying: false isPlaying: false,
audioChunks: [] // 存储接收到的音频块
},
websocket: {
connection: null,
isConnected: false
} }
}; };
@@ -31,6 +36,16 @@ const Utils = {
const mins = Math.floor(seconds / 60).toString().padStart(2, '0'); const mins = Math.floor(seconds / 60).toString().padStart(2, '0');
const secs = Math.floor(seconds % 60).toString().padStart(2, '0'); const secs = Math.floor(seconds % 60).toString().padStart(2, '0');
return `${mins}:${secs}`; return `${mins}:${secs}`;
},
// 将Blob转换为Base64
blobToBase64(blob) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onloadend = () => resolve(reader.result.split(',')[1]);
reader.onerror = reject;
reader.readAsDataURL(blob);
});
} }
}; };
@@ -89,153 +104,141 @@ const UIController = {
} }
}; };
// ==================== 录音管理模块 ==================== // ==================== WebSocket管理模块 ====================
const RecordingManager = { const WebSocketManager = {
// 初始化录音 // 初始化WebSocket连接
async initRecording() { initConnection() {
try { console.log('初始化WebSocket连接');
const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); if (AudioState.websocket.connection &&
AudioState.recording.mediaRecorder = new MediaRecorder(stream); AudioState.websocket.connection.readyState === WebSocket.OPEN) {
AudioState.recording.audioChunks = []; console.log('WebSocket连接已存在');
return;
// 设置录音数据收集回调
AudioState.recording.mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
AudioState.recording.audioChunks.push(event.data);
}
};
// 设置录音完成回调
AudioState.recording.mediaRecorder.onstop = () => {
const audioBlob = new Blob(AudioState.recording.audioChunks, { type: 'audio/wav' });
console.log('录音完成,音频数据大小:', audioBlob.size);
ASRProcessor.processAudio(audioBlob);
};
return true;
} catch (error) {
console.error('获取麦克风权限失败:', error);
alert('请授权麦克风权限以使用录音功能');
return false;
} }
},
// 开始录音 const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
async startRecording() { const wsUrl = `${protocol}//${window.location.host}/api/xueban/streaming-chat`;
if (AudioState.recording.isRecording) return;
console.log('尝试开始录音'); console.log('正在建立WebSocket连接:', wsUrl);
const initialized = await this.initRecording(); AudioState.websocket.connection = new WebSocket(wsUrl);
if (initialized && AudioState.recording.mediaRecorder) { // 连接打开
AudioState.recording.mediaRecorder.start(); AudioState.websocket.connection.onopen = () => {
AudioState.recording.isRecording = true; console.log('WebSocket连接已建立');
UIController.updateRecordingButtons(true); AudioState.websocket.isConnected = true;
console.log('开始录音成功'); };
// 设置最长录音时间 // 连接关闭
setTimeout(() => this.stopRecording(), AudioState.recording.maxDuration); AudioState.websocket.connection.onclose = () => {
} console.log('WebSocket连接已关闭');
}, AudioState.websocket.isConnected = false;
};
// 停止录音 // 连接错误
stopRecording() { AudioState.websocket.connection.onerror = (error) => {
if (!AudioState.recording.isRecording || !AudioState.recording.mediaRecorder) return; console.error('WebSocket连接错误:', error);
AudioState.websocket.isConnected = false;
UIController.toggleElement('thinkingIndicator', false);
UIController.setStartRecordButtonEnabled(true);
alert('连接服务器失败,请稍后再试');
};
AudioState.recording.mediaRecorder.stop(); // 接收消息
AudioState.recording.isRecording = false; AudioState.websocket.connection.onmessage = (event) => {
UIController.updateRecordingButtons(false); console.log('收到WebSocket消息:', {
console.log('停止录音'); type: typeof event.data,
size: typeof event.data === 'string' ? event.data.length : event.data.size
// 停止音频流
if (AudioState.recording.mediaRecorder.stream) {
AudioState.recording.mediaRecorder.stream.getTracks().forEach(track => track.stop());
}
}
};
// ==================== ASR处理模块 ====================
const ASRProcessor = {
// 处理音频数据
async processAudio(audioBlob) {
console.log('开始上传音频到服务器');
UIController.toggleElement('thinkingIndicator', true);
// 禁用帮我讲题按钮,防止在思考过程中重复点击
UIController.setStartRecordButtonEnabled(false);
// 创建AbortController用于超时控制
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 120000); // 120秒超时
try {
const formData = new FormData();
formData.append('file', audioBlob, 'recording.wav');
const response = await fetch('/api/xueban/upload-audio', {
method: 'POST',
body: formData,
signal: controller.signal // 添加超时信号
}); });
this.handleMessage(event);
};
},
// 请求成功,清除超时定时器 // 处理接收到的消息
clearTimeout(timeoutId); async handleMessage(event) {
// 检查消息类型
if (typeof event.data === 'string') {
// JSON消息
try {
const data = JSON.parse(event.data);
console.log('解析JSON消息成功:', data);
if (!response.ok) throw new Error('服务器响应错误'); switch (data.type) {
case 'asr_result':
// 显示ASR识别结果
console.log('收到ASR结果:', data.text);
const asrTextElement = document.getElementById('asrResultText');
if (asrTextElement) {
asrTextElement.textContent = data.text || '未识别到内容';
}
break;
const data = await response.json(); case 'end':
console.log('处理结果:', data); // 处理结束
UIController.toggleElement('thinkingIndicator', false); console.log('流式处理完成');
// 思考结束,重新启用帮我讲题按钮 console.log('当前音频块数量:', AudioState.playback.audioChunks.length);
UIController.setStartRecordButtonEnabled(true); UIController.toggleElement('thinkingIndicator', false);
UIController.setStartRecordButtonEnabled(true);
if (data.success) { // 合并所有音频块并播放
ResultDisplay.showResults(data.data); if (AudioState.playback.audioChunks.length > 0) {
} else { console.log('开始合并和播放音频');
alert('音频处理失败: ' + data.message); this.combineAndPlayAudio();
} else {
console.warn('没有收到音频数据,无法播放');
}
break;
case 'error':
// 错误处理
console.error('收到错误消息:', data.message);
UIController.toggleElement('thinkingIndicator', false);
UIController.setStartRecordButtonEnabled(true);
alert('处理失败: ' + data.message);
break;
default:
console.log('未知消息类型:', data.type);
}
} catch (e) {
console.error('解析JSON消息失败:', e);
console.error('原始消息内容:', event.data);
} }
} else {
// 二进制音频数据
console.log('收到音频数据,大小:', event.data.size);
console.log('音频数据类型:', event.data.type);
AudioState.playback.audioChunks.push(event.data);
console.log('当前音频块数量:', AudioState.playback.audioChunks.length);
}
},
// 合并所有音频块并播放
combineAndPlayAudio() {
try {
console.log('开始合并音频块,数量:', AudioState.playback.audioChunks.length);
// 创建一个新的Blob包含所有音频块
const combinedBlob = new Blob(AudioState.playback.audioChunks, { type: 'audio/wav' });
console.log('合并后的Blob大小:', combinedBlob.size);
// 创建音频URL
const audioUrl = URL.createObjectURL(combinedBlob);
console.log('创建音频URL:', audioUrl);
// 初始化音频播放器
AudioPlayer.initPlayer(audioUrl);
} catch (error) { } catch (error) {
// 清除超时定时器 console.error('合并和播放音频失败:', error);
clearTimeout(timeoutId);
console.error('上传音频失败:', error);
UIController.toggleElement('thinkingIndicator', false);
// 发生错误时也要重新启用按钮
UIController.setStartRecordButtonEnabled(true);
// 判断是否是超时错误
if (error.name === 'AbortError') {
alert('请求超时,服务器响应时间过长,请稍后再试');
} else {
alert('上传音频失败: ' + error.message);
}
} }
} },
};
// ==================== 结果显示模块 ==================== // 关闭WebSocket连接
const ResultDisplay = { closeConnection() {
// 显示ASR识别结果和反馈 if (AudioState.websocket.connection) {
showResults(data) { AudioState.websocket.connection.close();
const resultContainer = document.getElementById('resultContainer'); AudioState.websocket.connection = null;
if (resultContainer) { AudioState.websocket.isConnected = false;
resultContainer.style.display = 'flex'; console.log('WebSocket连接已关闭');
}
// 显示识别文本
const asrTextElement = document.getElementById('asrResultText');
if (asrTextElement) {
asrTextElement.textContent = data.asr_text || '未识别到内容';
}
// 显示反馈文本
const feedbackTextElement = document.getElementById('feedbackResultText');
if (feedbackTextElement) {
feedbackTextElement.textContent = data.feedback_text || '无反馈内容';
}
// 如果有音频URL初始化音频播放器
if (data.audio_url) {
AudioPlayer.initPlayer(data.audio_url);
} }
} }
}; };
@@ -244,31 +247,49 @@ const ResultDisplay = {
const AudioPlayer = { const AudioPlayer = {
// 初始化音频播放器 // 初始化音频播放器
initPlayer(audioUrl) { initPlayer(audioUrl) {
console.log('AudioPlayer.initPlayer 被调用音频URL:', audioUrl);
// 停止当前播放的音频 // 停止当前播放的音频
if (AudioState.playback.audioElement) { if (AudioState.playback.audioElement) {
console.log('停止当前播放的音频');
AudioState.playback.audioElement.pause(); AudioState.playback.audioElement.pause();
} }
// 创建新的音频元素 // 创建新的音频元素
console.log('创建新的音频元素');
AudioState.playback.audioElement = new Audio(audioUrl); AudioState.playback.audioElement = new Audio(audioUrl);
AudioState.playback.isPlaying = false; AudioState.playback.isPlaying = false;
// 绑定音频事件 // 绑定音频事件
AudioState.playback.audioElement.onloadedmetadata = () => { AudioState.playback.audioElement.onloadedmetadata = () => {
console.log('音频元数据加载完成');
this.updateTimeDisplay(); this.updateTimeDisplay();
this.play(); // 自动播放 this.play(); // 自动播放
}; };
AudioState.playback.audioElement.onplay = () => {
console.log('音频开始播放');
};
AudioState.playback.audioElement.onpause = () => {
console.log('音频暂停');
};
AudioState.playback.audioElement.ontimeupdate = () => { AudioState.playback.audioElement.ontimeupdate = () => {
this.updateProgress(); this.updateProgress();
this.updateTimeDisplay(); this.updateTimeDisplay();
}; };
AudioState.playback.audioElement.onended = () => { AudioState.playback.audioElement.onended = () => {
console.log('音频播放结束');
AudioState.playback.isPlaying = false; AudioState.playback.isPlaying = false;
UIController.updatePlayButton(false); UIController.updatePlayButton(false);
}; };
AudioState.playback.audioElement.onerror = (error) => {
console.error('音频播放错误:', error);
};
// 绑定播放按钮点击事件 // 绑定播放按钮点击事件
const playBtn = document.getElementById('playAudioBtn'); const playBtn = document.getElementById('playAudioBtn');
if (playBtn) { if (playBtn) {
@@ -337,7 +358,7 @@ const AudioPlayer = {
} }
}; };
// ==================== 事件绑定 ==================== // ==================== 事件绑定模块 ====================
const EventBinder = { const EventBinder = {
// 绑定所有事件 // 绑定所有事件
bindEvents() { bindEvents() {
@@ -371,6 +392,202 @@ const EventBinder = {
} }
}; };
// ==================== 录音管理模块 ====================
const RecordingManager = {
// 初始化录音
async initRecording() {
try {
// 获取用户媒体设备
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
// 创建媒体录制器
AudioState.recording.mediaRecorder = new MediaRecorder(stream, {
mimeType: 'audio/webm;codecs=opus'
});
// 监听数据可用事件
AudioState.recording.mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
AudioState.recording.audioChunks.push(event.data);
}
};
// 监听停止事件
AudioState.recording.mediaRecorder.onstop = async () => {
console.log('录音停止,开始处理音频数据');
// 创建音频Blob
const audioBlob = new Blob(AudioState.recording.audioChunks, { type: 'audio/webm' });
console.log('录音Blob大小:', audioBlob.size);
// 更新UI
UIController.toggleElement('thinkingIndicator', true);
// 初始化WebSocket连接
WebSocketManager.initConnection();
// 等待连接建立
await this.waitForConnection();
// 发送音频数据 - 这里是错误的调用
// const success = await WebSocketManager.sendAudio(audioBlob);
// 修复后的正确调用
const success = await RecordingManager.sendAudio(audioBlob);
if (!success) {
console.error('发送音频数据失败');
UIController.toggleElement('thinkingIndicator', false);
UIController.setStartRecordButtonEnabled(true);
}
// 清空音频块
AudioState.recording.audioChunks = [];
// 停止所有音频轨道
stream.getTracks().forEach(track => track.stop());
};
console.log('录音初始化成功');
return true;
} catch (error) {
console.error('录音初始化失败:', error);
alert('录音初始化失败,请授予麦克风权限后重试');
return false;
}
},
// 开始录音
async startRecording() {
console.log('开始录音');
// 检查是否已经在录音
if (AudioState.recording.isRecording) {
console.warn('已经在录音中');
return;
}
// 初始化录音
const initialized = await this.initRecording();
if (!initialized) {
console.error('录音初始化失败,无法开始录音');
return;
}
// 开始录音
AudioState.recording.isRecording = true;
AudioState.recording.mediaRecorder.start();
// 更新UI
UIController.updateRecordingButtons(true);
console.log('录音开始成功');
// 设置最大录音时长
setTimeout(() => {
if (AudioState.recording.isRecording) {
console.log('达到最大录音时长,自动停止录音');
this.stopRecording();
}
}, AudioState.recording.maxDuration);
},
// 停止录音
stopRecording() {
console.log('停止录音');
if (!AudioState.recording.isRecording || !AudioState.recording.mediaRecorder) {
console.warn('当前没有在录音');
return;
}
// 停止录音
AudioState.recording.mediaRecorder.stop();
AudioState.recording.isRecording = false;
// 更新UI
UIController.updateRecordingButtons(false);
console.log('录音停止命令已发送');
},
// 等待WebSocket连接建立
waitForConnection() {
return new Promise((resolve) => {
const checkConnection = () => {
console.log('检查WebSocket连接状态:', AudioState.websocket.isConnected);
if (AudioState.websocket.isConnected &&
AudioState.websocket.connection &&
AudioState.websocket.connection.readyState === WebSocket.OPEN) {
console.log('WebSocket连接已建立可以发送数据');
resolve();
} else {
console.log('WebSocket连接未建立等待...');
setTimeout(checkConnection, 100);
}
};
checkConnection();
});
},
// 发送音频数据
async sendAudio(audioBlob) {
console.log('=== 开始执行sendAudio方法 ===');
// 参数验证
if (!audioBlob) {
console.error('sendAudio方法参数错误: audioBlob为空');
return false;
}
console.log('音频数据参数:', {
exists: !!audioBlob,
size: audioBlob.size,
type: audioBlob.type
});
// 连接状态检查
console.log('WebSocket连接状态:', {
isConnected: AudioState.websocket.isConnected,
connectionExists: !!AudioState.websocket.connection,
readyState: AudioState.websocket.connection ? AudioState.websocket.connection.readyState : 'N/A'
});
if (!AudioState.websocket.isConnected ||
!AudioState.websocket.connection ||
AudioState.websocket.connection.readyState !== WebSocket.OPEN) {
console.error('WebSocket连接未建立无法发送音频数据');
return false;
}
try {
console.log('将音频数据转换为Base64');
// 将音频数据转换为Base64
const base64Audio = await Utils.blobToBase64(audioBlob);
console.log('音频数据Base64长度:', base64Audio.length);
const payload = {
audio_data: base64Audio
};
console.log('准备发送的载荷:', {
keys: Object.keys(payload),
audioDataLength: payload.audio_data.length
});
// 发送音频数据
console.log('发送音频数据到WebSocket');
AudioState.websocket.connection.send(JSON.stringify(payload));
console.log('=== 音频数据发送成功 ===');
return true;
} catch (error) {
console.error('发送音频数据失败:', error);
return false;
}
}
};
// ==================== 初始化 ==================== // ==================== 初始化 ====================
// 页面加载完成后初始化 // 页面加载完成后初始化
function initializeApp() { function initializeApp() {
@@ -403,3 +620,8 @@ window.addEventListener('load', () => {
EventBinder.bindEvents(); EventBinder.bindEvents();
console.log('学伴录音功能load事件初始化完成'); console.log('学伴录音功能load事件初始化完成');
}); });
// 页面关闭时关闭WebSocket连接
window.addEventListener('beforeunload', () => {
WebSocketManager.closeConnection();
});