You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

76 lines
2.9 KiB

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

# system_stats
import json
import urllib.parse
import urllib.request
import urllib.parse
import urllib.request
from Util import ConfigUtil
from Util.ComfyUIUtil import *
from Util.CommonUtil import *
def queue_prompt(server_address, client_id, prompt):
p = {"prompt": prompt, "client_id": client_id}
data = json.dumps(p).encode('utf-8')
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
try:
urllib.request.urlopen(req)
except Exception as err:
print(err)
return json.loads(urllib.request.urlopen(req).read())
def getUse(server_address):
req = urllib.request.Request("http://{}/system_stats".format(server_address))
res = json.loads(urllib.request.urlopen(req).read())
vram_total = res['devices'][0]['vram_total']
vram_total_str = str(int(vram_total / 1024 / 1024 / 1024 + 0.5))
vram_free = res['devices'][0]['vram_free']
used_vram = vram_total - vram_free
used_vram_str = str(int((vram_total - vram_free) / 1024 / 1024 / 1024 + 0.5))
used_lv = round(1.0 * (used_vram) / vram_total * 100, 2)
print("显存共:" + vram_total_str + "GB,已使用:" + used_vram_str + "GB,使用率:" + str(used_lv) + "% ")
# vram_total # 显存容量25756696576 即24GB
# vram_free # 显存剩余25756696576
# torch_vram_total 16005464064
# torch_vram_free 331041996
# GET /system_stats
# 系统统计信息接口
# 打开配置文件
config = ConfigUtil.getConfig()
server_address = config.get('comfyui', 'server_address')
getUse(server_address)
# 打开文件并读取内容
file_path = r'../JSON/27.json'
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
# 如何清空Comfyui的gpu缓存
# https://wailikeji.blog.csdn.net/article/details/140035515
# queue_prompt(server_address, "cleanGpuRam", content)
# https://comfy.icu/node/easy-clearCacheAll
# https://github.com/comfyanonymous/ComfyUI/issues/3615
# Is there a way to clear the memory (VRAM) after a workflow run?
# There's an API route for it (post /free with { "unload_models": true, "free_memory": true }, and Swarm has a button in the Server tab for that. I don't think comfy itself currently has a button for it
# https://github.com/yolain/ComfyUI-Easy-Use/issues/124
p = {"prompt": {"unload_models": True, "free_memory": True}, "client_id": "cleanGpuRam"}
data = json.dumps(p).encode('utf-8')
req = urllib.request.Request("http://{}/free".format(server_address), data=data)
print(urllib.request.urlopen(req).read())
# https://github.com/yolain/ComfyUI-Easy-Use
# ComfyUI-Easy-Use 插件
# https://www.runcomfy.com/comfyui-nodes/ComfyUI-Easy-Use/easy-cleanGpuUsed
# https://www.yunrobot.cn/showdoc/web/#/641840309/231516860
# https://www.reddit.com/r/comfyui/comments/1cdhz5v/best_way_to_clear_vram_after_each_generation/