@article{ART003306279},
author={Tae-O Lee and KIM, TAEKOOK},
title={Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education},
journal={Journal of Internet of Things and Convergence},
issn={2466-0078},
year={2026},
volume={12},
number={1},
pages={17-23}
TY - JOUR
AU - Tae-O Lee
AU - KIM, TAEKOOK
TI - Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education
JO - Journal of Internet of Things and Convergence
PY - 2026
VL - 12
IS - 1
PB - The Korea Internet of Things Society
SP - 17
EP - 23
SN - 2466-0078
AB - This study aims to develop a conversational learning model designed to support elementary students’ understanding of economic concepts and to empirically validate the educational feasibility of Large Language Models (LLMs). To this end, a custom dataset comprising 1,000 question-and-answer pairs was constructed, capturing everyday economic scenarios closely related to the lived experiences of elementary students. A domain-specific model for economic education was then developed by applying Low-Rank Adaptation (LoRA)-based fine-tuning to the LLaMA 3.1 8B Instruct model.
Experimental results demonstrated that the fine-tuned model achieved a substantial performance gain, with the Bilingual Evaluation Understudy (BLEU) score increasing from 0.18 to 5.67—approximately a 31-fold improvement over the base model. Furthermore, the BERTScore (F1), which measures semantic similarity, rose from 0.6642 to 0.7743, confirming enhanced quality and accuracy in sentence generation. These findings suggest that Parameter-Efficient Fine-Tuning (PEFT) enables the effective construction of domain-specific language models even within environments characterized by compact datasets and limited GPU resources. This study establishes an empirical foundation for designing LLM-based economic education systems for primary learners and highlights the potential for evolving into personalized, adaptive educational tools through future user-centered evaluations and dataset expansion.
KW - Large Language Model (LLM);Economic Education;Fine-tuning;LoRA;Conversational Learning
DO -
UR -
ER -
Tae-O Lee and KIM, TAEKOOK. (2026). Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education. Journal of Internet of Things and Convergence, 12(1), 17-23.
Tae-O Lee and KIM, TAEKOOK. 2026, "Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education", Journal of Internet of Things and Convergence, vol.12, no.1 pp.17-23.
Tae-O Lee, KIM, TAEKOOK "Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education" Journal of Internet of Things and Convergence 12.1 pp.17-23 (2026) : 17.
Tae-O Lee, KIM, TAEKOOK. Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education. 2026; 12(1), 17-23.
Tae-O Lee and KIM, TAEKOOK. "Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education" Journal of Internet of Things and Convergence 12, no.1 (2026) : 17-23.
Tae-O Lee; KIM, TAEKOOK. Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education. Journal of Internet of Things and Convergence, 12(1), 17-23.
Tae-O Lee; KIM, TAEKOOK. Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education. Journal of Internet of Things and Convergence. 2026; 12(1) 17-23.
Tae-O Lee, KIM, TAEKOOK. Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education. 2026; 12(1), 17-23.
Tae-O Lee and KIM, TAEKOOK. "Development and Evaluation of a LoRA-Fine-Tuned Large Language Model for Elementary Economic Education" Journal of Internet of Things and Convergence 12, no.1 (2026) : 17-23.