From 0a04d3fb2a84be2d716874d475d20bb7ccb2bd1e Mon Sep 17 00:00:00 2001 From: PanQiWei <594557445@qq.com> Date: Sun, 13 Aug 2023 16:14:01 +0800 Subject: [PATCH] explicit set "base" value --- auto_gptq/modeling/llama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/auto_gptq/modeling/llama.py b/auto_gptq/modeling/llama.py index 8062d8f..f13d02b 100644 --- a/auto_gptq/modeling/llama.py +++ b/auto_gptq/modeling/llama.py @@ -40,6 +40,7 @@ class LlamaGPTQForCausalLM(BaseGPTQForCausalLM): rope_cache = build_rope_cache( rotary_dim=model_config.hidden_size // num_heads, max_position=model_config.max_position_embeddings, + base=10000, device=model.device, dtype=model.dtype )