llama.cpp: Add --no-webui to the llama-server command

This commit is contained in:
oobabooga 2025-05-08 10:41:25 -07:00
parent 3bc2ec2b11
commit 9ea2a69210

View file

@ -261,6 +261,7 @@ class LlamaServer:
"--gpu-layers", str(shared.args.gpu_layers),
"--batch-size", str(shared.args.batch_size),
"--port", str(self.port),
"--no-webui",
]
if shared.args.flash_attn: