Only show the VRAM info for llama.cpp

This commit is contained in:
oobabooga 2025-05-15 21:42:15 -07:00
parent cbf4daf1c8
commit 93e1850a2c

View file

@ -28,6 +28,7 @@ loaders_and_params = OrderedDict({
'device_draft',
'ctx_size_draft',
'speculative_decoding_accordion',
'vram_info',
],
'Transformers': [
'gpu_split',