diff --git a/modules/models_settings.py b/modules/models_settings.py
index e742e0d8..df5a8e8d 100644
--- a/modules/models_settings.py
+++ b/modules/models_settings.py
@@ -438,7 +438,7 @@ def update_gpu_layers_and_vram(loader, model, gpu_layers, ctx_size, cache_type,
- If for_ui=False: (vram_usage, adjusted_layers) or just vram_usage
"""
if loader != 'llama.cpp' or model in ["None", None] or not model.endswith(".gguf"):
- vram_info = "
Estimated VRAM to load the model:"
+ vram_info = "
Estimated VRAM to load the model:
"
if for_ui:
return (vram_info, gr.update()) if auto_adjust else vram_info
else:
@@ -480,7 +480,7 @@ def update_gpu_layers_and_vram(loader, model, gpu_layers, ctx_size, cache_type,
vram_usage = estimate_vram(model, current_layers, ctx_size, cache_type)
if for_ui:
- vram_info = f"
Estimated VRAM to load the model:
{vram_usage:.0f} MiB"
+ vram_info = f"
Estimated VRAM to load the model: {vram_usage:.0f} MiB
"
if auto_adjust:
return vram_info, gr.update(value=current_layers, maximum=max_layers)
else:
diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py
index d361f692..862b3893 100644
--- a/modules/ui_model_menu.py
+++ b/modules/ui_model_menu.py
@@ -310,7 +310,7 @@ def get_initial_vram_info():
for_ui=True
)
- return "
Estimated VRAM to load the model:"
+ return "
Estimated VRAM to load the model:
"
def get_initial_gpu_layers_max():