From 1bb0b6abd7e52f88529cf6ad0f2ece0c17220b27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=84=E6=B5=B7?= <10402852@qq.com> Date: Tue, 2 Jul 2024 14:04:06 +0800 Subject: [PATCH] 'commit' --- BaiHu/Doc/GPU监控.txt | 49 +++++++++++++++++++++++++++++++++++++++++ BaiHu/Test/TestGPU.py | 11 ++++++++- 2 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 BaiHu/Doc/GPU监控.txt diff --git a/BaiHu/Doc/GPU监控.txt b/BaiHu/Doc/GPU监控.txt new file mode 100644 index 00000000..645a9c85 --- /dev/null +++ b/BaiHu/Doc/GPU监控.txt @@ -0,0 +1,49 @@ +https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/api/api.py + +#秋叶整合包如何安装Python包 +https://www.cnblogs.com/bossma/p/17593474.html + +# 升级 pip +打开 D:\sd-webui-aki-v4.7\python\Scripts 然后在地址栏中输入cmd回车: + +python -m pip install --upgrade pip + +# 安装包 + +pip install pynvml + +# 修改的文件 +D:\sd-webui-aki-v4.7\modules\api\api.py + + + +# 添加的内容一 +from pynvml import * +import torch +import gc + +# 添加的内容二 +self.add_api_route("/sdapi/v1/empty_cache", self.empty_cache, methods=["POST"]) +self.add_api_route("/sdapi/v1/get_vram", self.get_vram, methods=["POST"]) + + +# 添加的 +def empty_cache(self): + if torch.cuda.is_available(): + gc.collect() + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + gc.collect() + return {"success":True,"message":"GPU is cleared!"} + + +def getVRam(self): + nvmlInit() + h = nvmlDeviceGetHandleByIndex(0) + info = nvmlDeviceGetMemoryInfo(h) + res=[] + res.append(f'total: {round(info.total / 1024 / 1024 / 1024, 1)} GB') + res.append(f'free : {round(info.free / 1024 / 1024 / 1024, 1)} GB') + res.append(f'used : {round(info.used / 1024 / 1024 / 1024, 1)} GB') + nvmlShutdown() + return res \ No newline at end of file diff --git a/BaiHu/Test/TestGPU.py b/BaiHu/Test/TestGPU.py index 8a4d546e..35eeb327 100644 --- a/BaiHu/Test/TestGPU.py +++ b/BaiHu/Test/TestGPU.py @@ -18,7 +18,7 @@ def getVRam(): # 输出一下显存的占用 -getVRam() +#getVRam() # 如果输出的结果是False,那么说明当前的Pytorch版本无法使用显卡。 if torch.cuda.is_available(): @@ -31,3 +31,12 @@ if torch.cuda.is_available(): getVRam() else: print("当前机器不支持显卡清理!") + + +def clean_vram(self): + if torch.cuda.is_available(): + gc.collect() + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + gc.collect() + return {}