AutoGPTQ/auto_gptq/modeling/_const.py
LaaZa bf47892b81 Merge branch 'main' into MPT
# Conflicts:
#	auto_gptq/modeling/__init__.py
#	auto_gptq/modeling/_const.py
#	auto_gptq/modeling/auto.py
2023-06-02 15:01:10 +03:00

13 lines
465 B
Python

from packaging.version import parse as parse_version
from torch import device
from transformers import __version__ as transformers_version
CPU = device("cpu")
CUDA_0 = device("cuda:0")
SUPPORTED_MODELS = ["bloom", "gptj", "gpt2", "gpt_neox", "opt", "moss", "gpt_bigcode", "codegen", "RefinedWebModel", "RefinedWeb", "mpt"]
if compare_transformers_version("v4.28.0", op="ge"):
SUPPORTED_MODELS.append("llama")
__all__ = ["CPU", "CUDA_0", "SUPPORTED_MODELS"]