From 93e1850a2c1eef8fe914bd020dde3e94d6b54f6c Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 15 May 2025 21:42:15 -0700 Subject: [PATCH] Only show the VRAM info for llama.cpp --- modules/loaders.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/loaders.py b/modules/loaders.py index 583b65c2..79a7a4a3 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -28,6 +28,7 @@ loaders_and_params = OrderedDict({ 'device_draft', 'ctx_size_draft', 'speculative_decoding_accordion', + 'vram_info', ], 'Transformers': [ 'gpu_split',