__init__.py
|
support qwen
|
2023-08-08 19:27:43 +09:00 |
_base.py
|
extend to support qlinear_exllama's fusion
|
2023-08-11 14:52:26 +08:00 |
_const.py
|
Merge branch 'main' into xformers_integration
|
2023-08-10 15:27:11 +08:00 |
_utils.py
|
patch for transformers compatiblity
|
2023-08-09 14:23:59 +00:00 |
auto.py
|
Merge branch 'main' into xformers_integration
|
2023-08-10 15:27:11 +08:00 |
baichuan.py
|
add baichuan model attention fusion logic
|
2023-08-11 19:12:43 +08:00 |
codegen.py
|
Add support for CodeGen/2
|
2023-05-08 17:34:00 +03:00 |
gpt_bigcode.py
|
Add support for GPTBigCode
|
2023-05-08 12:28:29 +03:00 |
gptj.py
|
using transformers gptj rope implementation
|
2023-08-11 18:26:23 +08:00 |
internlm.py
|
Add support for InternLM
|
2023-07-07 09:25:40 -07:00 |
llama.py
|
explicit set "base" value
|
2023-08-13 16:14:01 +08:00 |
qwen.py
|
support qwen
|
2023-08-08 19:27:43 +09:00 |
rw.py
|
support falcon
|
2023-05-27 07:53:39 +09:00 |