From f64c71e779bc5625b603d152cf08a27f0fb8c610 Mon Sep 17 00:00:00 2001 From: TheBloke Date: Fri, 5 May 2023 13:21:13 +0100 Subject: [PATCH] Change referenes to 'group_size' to 'groupsize' to match rest of this file --- auto_gptq/modeling/_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/auto_gptq/modeling/_utils.py b/auto_gptq/modeling/_utils.py index 3984aaf..ddb27f9 100644 --- a/auto_gptq/modeling/_utils.py +++ b/auto_gptq/modeling/_utils.py @@ -82,7 +82,7 @@ def pack_model( model, quantizers, bits, - group_size, + groupsize, use_triton=False, use_cuda_fp16=True, desc_act=False, @@ -103,7 +103,7 @@ def pack_model( logger.info('Packing model...') layers = find_layers(model) layers = {n: layers[n] for n in quantizers} - make_quant(model, quantizers, bits, group_size, use_triton=use_triton, use_cuda_fp16=use_cuda_fp16, desc_act=desc_act) + make_quant(model, quantizers, bits, groupsize, use_triton=use_triton, use_cuda_fp16=use_cuda_fp16, desc_act=desc_act) qlayers = find_layers(model, [QuantLinear]) for name in qlayers: logger.info(name)