@article{ART003063527},
author={Daegeon Kim and Namgyu Kim},
title={Enhancing LoRA Fine-tuning Performance Using Curriculum Learning},
journal={Journal of The Korea Society of Computer and Information},
issn={1598-849X},
year={2024},
volume={29},
number={3},
pages={43-54},
doi={10.9708/jksci.2024.29.03.043}
TY - JOUR
AU - Daegeon Kim
AU - Namgyu Kim
TI - Enhancing LoRA Fine-tuning Performance Using Curriculum Learning
JO - Journal of The Korea Society of Computer and Information
PY - 2024
VL - 29
IS - 3
PB - The Korean Society Of Computer And Information
SP - 43
EP - 54
SN - 1598-849X
AB - Recently, there has been a lot of research on utilizing Language Models, and Large Language Models have achieved innovative results in various tasks. However, the practical application faces limitations due to the constrained resources and costs required to utilize Large Language Models. Consequently, there has been recent attention towards methods to effectively utilize models within given resources. Curriculum Learning, a methodology that categorizes training data according to difficulty and learns sequentially, has been attracting attention, but it has the limitation that the method of measuring difficulty is complex or not universal. Therefore, in this study, we propose a methodology based on data heterogeneity-based Curriculum Learning that measures the difficulty of data using reliable prior information and facilitates easy utilization across various tasks. To evaluate the performance of the proposed methodology, experiments were conducted using 5,000 specialized documents in the field of information communication technology and 4,917 documents in the field of healthcare. The results confirm that the proposed methodology outperforms traditional fine-tuning in terms of classification accuracy in both LoRA fine-tuning and full fine-tuning.
KW - Pre-training Language Models;Large Language Models;Curriculum Learning;LoRA;Data heterogeneity
DO - 10.9708/jksci.2024.29.03.043
ER -
Daegeon Kim and Namgyu Kim. (2024). Enhancing LoRA Fine-tuning Performance Using Curriculum Learning. Journal of The Korea Society of Computer and Information, 29(3), 43-54.
Daegeon Kim and Namgyu Kim. 2024, "Enhancing LoRA Fine-tuning Performance Using Curriculum Learning", Journal of The Korea Society of Computer and Information, vol.29, no.3 pp.43-54. Available from: doi:10.9708/jksci.2024.29.03.043
Daegeon Kim, Namgyu Kim "Enhancing LoRA Fine-tuning Performance Using Curriculum Learning" Journal of The Korea Society of Computer and Information 29.3 pp.43-54 (2024) : 43.
Daegeon Kim, Namgyu Kim. Enhancing LoRA Fine-tuning Performance Using Curriculum Learning. 2024; 29(3), 43-54. Available from: doi:10.9708/jksci.2024.29.03.043
Daegeon Kim and Namgyu Kim. "Enhancing LoRA Fine-tuning Performance Using Curriculum Learning" Journal of The Korea Society of Computer and Information 29, no.3 (2024) : 43-54.doi: 10.9708/jksci.2024.29.03.043
Daegeon Kim; Namgyu Kim. Enhancing LoRA Fine-tuning Performance Using Curriculum Learning. Journal of The Korea Society of Computer and Information, 29(3), 43-54. doi: 10.9708/jksci.2024.29.03.043
Daegeon Kim; Namgyu Kim. Enhancing LoRA Fine-tuning Performance Using Curriculum Learning. Journal of The Korea Society of Computer and Information. 2024; 29(3) 43-54. doi: 10.9708/jksci.2024.29.03.043
Daegeon Kim, Namgyu Kim. Enhancing LoRA Fine-tuning Performance Using Curriculum Learning. 2024; 29(3), 43-54. Available from: doi:10.9708/jksci.2024.29.03.043
Daegeon Kim and Namgyu Kim. "Enhancing LoRA Fine-tuning Performance Using Curriculum Learning" Journal of The Korea Society of Computer and Information 29, no.3 (2024) : 43-54.doi: 10.9708/jksci.2024.29.03.043