diff --git a/modules/loaders.py b/modules/loaders.py index b29679bd..4b76549b 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -84,7 +84,6 @@ loaders_and_params = OrderedDict({ 'no_flash_attn', 'no_xformers', 'no_sdpa', - 'exllamav2_info', 'model_draft', 'draft_max', 'ctx_size_draft', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 28b7222d..33e152a0 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -68,7 +68,6 @@ def create_ui(): shared.gradio['enable_tp'] = gr.Checkbox(label="enable_tp", value=shared.args.enable_tp, info='Enable Tensor Parallelism (TP).') shared.gradio['cpp_runner'] = gr.Checkbox(label="cpp-runner", value=shared.args.cpp_runner, info='Enable inference with ModelRunnerCpp, which is faster than the default ModelRunner.') shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Set trust_remote_code=True while loading the tokenizer/model. To enable this option, start the web UI with the --trust-remote-code flag.', interactive=shared.args.trust_remote_code) - shared.gradio['exllamav2_info'] = gr.Markdown("ExLlamav2_HF is recommended over ExLlamav2 for better integration with extensions and more consistent sampling behavior across loaders.") shared.gradio['tensorrt_llm_info'] = gr.Markdown('* TensorRT-LLM has to be installed manually in a separate Python 3.10 environment at the moment. For a guide, consult the description of [this PR](https://github.com/oobabooga/text-generation-webui/pull/5715). \n\n* `ctx_size` is only used when `cpp-runner` is checked.\n\n* `cpp_runner` does not support streaming at the moment.') # Speculative decoding