From c89bb6450c36ad425f3356a502d2238ae625c37c Mon Sep 17 00:00:00 2001 From: PanQiWei <594557445@qq.com> Date: Wed, 24 May 2023 17:43:38 +0800 Subject: [PATCH] correct typo of function name --- auto_gptq/modeling/_base.py | 2 +- auto_gptq/modeling/_utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/auto_gptq/modeling/_base.py b/auto_gptq/modeling/_base.py index c7e4ece..9a3969c 100644 --- a/auto_gptq/modeling/_base.py +++ b/auto_gptq/modeling/_base.py @@ -607,7 +607,7 @@ class BaseGPTQForCausalLM(nn.Module, PushToHubMixin): ) if low_cpu_mem_usage: - make_sure_not_tensor_in_meta_device(model, use_triton, quantize_config.desc_act, quantize_config.group_size) + make_sure_no_tensor_in_meta_device(model, use_triton, quantize_config.desc_act, quantize_config.group_size) accelerate.utils.modeling.load_checkpoint_in_model( model, diff --git a/auto_gptq/modeling/_utils.py b/auto_gptq/modeling/_utils.py index d893dc7..cdc1120 100644 --- a/auto_gptq/modeling/_utils.py +++ b/auto_gptq/modeling/_utils.py @@ -163,7 +163,7 @@ def simple_dispatch_model(model, device_map): return model -def make_sure_not_tensor_in_meta_device(model, use_triton, desc_act, group_size): +def make_sure_no_tensor_in_meta_device(model, use_triton, desc_act, group_size): QuantLinear = dynamically_import_QuantLinear(use_triton, desc_act, group_size) for n, m in model.named_modules(): if isinstance(m, QuantLinear) and m.bias.device == torch.device("meta"): @@ -180,5 +180,5 @@ __all__ = [ "pack_model", "check_and_get_model_type", "simple_dispatch_model", - "make_sure_not_tensor_in_meta_device" + "make_sure_no_tensor_in_meta_device" ]