|
|
|
@ -1,67 +1,16 @@
|
|
|
|
|
# system_stats
|
|
|
|
|
import json
|
|
|
|
|
import urllib.parse
|
|
|
|
|
import urllib.request
|
|
|
|
|
|
|
|
|
|
import urllib.parse
|
|
|
|
|
import urllib.request
|
|
|
|
|
from Util import ConfigUtil
|
|
|
|
|
from Util.ComfyUIUtil import *
|
|
|
|
|
from Util.CommonUtil import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def queue_prompt(server_address, client_id, prompt):
|
|
|
|
|
p = {"prompt": prompt, "client_id": client_id}
|
|
|
|
|
data = json.dumps(p).encode('utf-8')
|
|
|
|
|
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
|
|
|
|
|
try:
|
|
|
|
|
urllib.request.urlopen(req)
|
|
|
|
|
except Exception as err:
|
|
|
|
|
print(err)
|
|
|
|
|
return json.loads(urllib.request.urlopen(req).read())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
vram_total # 显存容量:25756696576 即24GB
|
|
|
|
|
vram_free # 显存剩余:25756696576
|
|
|
|
|
torch_vram_total 16005464064
|
|
|
|
|
torch_vram_free 331041996
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getUse(server_address):
|
|
|
|
|
req = urllib.request.Request("http://{}/system_stats".format(server_address))
|
|
|
|
|
res = json.loads(urllib.request.urlopen(req).read())
|
|
|
|
|
|
|
|
|
|
vram_total = res['devices'][0]['vram_total']
|
|
|
|
|
vram_total_str = str(int(vram_total / 1024 / 1024 / 1024 + 0.5))
|
|
|
|
|
vram_free = res['devices'][0]['vram_free']
|
|
|
|
|
used_vram = vram_total - vram_free
|
|
|
|
|
used_vram_str = str(int((vram_total - vram_free) / 1024 / 1024 / 1024 + 0.5))
|
|
|
|
|
used_lv = round(1.0 * (used_vram) / vram_total * 100, 2)
|
|
|
|
|
print("显存共:" + vram_total_str + "GB,已使用:" + used_vram_str + "GB,使用率:" + str(used_lv) + "% ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# GET /system_stats
|
|
|
|
|
# 系统统计信息接口
|
|
|
|
|
|
|
|
|
|
# 打开配置文件
|
|
|
|
|
config = ConfigUtil.getConfig()
|
|
|
|
|
|
|
|
|
|
server_address = config.get('comfyui', 'server_address')
|
|
|
|
|
getUse(server_address)
|
|
|
|
|
|
|
|
|
|
# 打开文件并读取内容
|
|
|
|
|
file_path = r'../JSON/clearGPU.json'
|
|
|
|
|
with open(file_path, 'r', encoding='utf-8') as file:
|
|
|
|
|
prompt_data = json.load(file)
|
|
|
|
|
|
|
|
|
|
queue_prompt(server_address, "cleanGpuRam", prompt_data)
|
|
|
|
|
# 显示显卡使用率
|
|
|
|
|
print('清理显存前:' + getUse(server_address))
|
|
|
|
|
|
|
|
|
|
getUse(server_address)
|
|
|
|
|
# 如何清空Comfyui的gpu缓存
|
|
|
|
|
# https://wailikeji.blog.csdn.net/article/details/140035515
|
|
|
|
|
# 清理GPU缓存
|
|
|
|
|
clearGPU(server_address)
|
|
|
|
|
|
|
|
|
|
# https://comfy.icu/node/easy-clearCacheAll
|
|
|
|
|
# https://github.com/comfyanonymous/ComfyUI/issues/3615
|
|
|
|
|
# https://github.com/yolain/ComfyUI-Easy-Use/issues/124
|
|
|
|
|
# 显示显卡使用率
|
|
|
|
|
print('清理显存后:' + getUse(server_address))
|
|
|
|
|