Unoficial implementation of PEFTs (LoRa, QLoRa and Prefix TUning) in python
ref:
@article{hu2021lora,
title={Lora: Low-rank adaptation of large language models},
author={Hu, Edward J and Shen, Yelong and Wallis, Phillip and Allen-Zhu, Zeyuan and Li, Yuanzhi and Wang, Shean and Wang, Lu and Chen, Weizhu},
journal={arXiv preprint arXiv:2106.09685},
year={2021}
}
@article{dettmers2023qlora,
title={Qlora: Efficient finetuning of quantized llms},
author={Dettmers, Tim and Pagnoni, Artidoro and Holtzman, Ari and Zettlemoyer, Luke},
journal={arXiv preprint arXiv:2305.14314},
year={2023}
}
@article{li2021prefix,
title={Prefix-tuning: Optimizing continuous prompts for generation},
author={Li, Xiang Lisa and Liang, Percy},
journal={arXiv preprint arXiv:2101.00190},
year={2021}
}
Comming Soon!