@article{ART003187502},
author={Wonjun Cho and Jaesung Yoo and Sang-Min Kim and Jaeeun Jang},
title={RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models},
journal={Journal of The Korea Society of Computer and Information},
issn={1598-849X},
year={2025},
volume={30},
number={3},
pages={43-57}
TY - JOUR
AU - Wonjun Cho
AU - Jaesung Yoo
AU - Sang-Min Kim
AU - Jaeeun Jang
TI - RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models
JO - Journal of The Korea Society of Computer and Information
PY - 2025
VL - 30
IS - 3
PB - The Korean Society Of Computer And Information
SP - 43
EP - 57
SN - 1598-849X
AB - The increasing complexity of modern battlefields and the importance of real-time data processing have heightened the need for effective battlefield situation analysis systems. This study proposes a battlefield analysis system utilizing Large Language Models (LLMs), specifically introducing an advanced approach that combines Retrieval-Augmented Generation (RAG) with Supervised Fine-Tuning (SFT). To address the hallucination problems and lost-in-the-middle phenomenon inherent in existing RAG systems, we introduce a triple-structured learning approach that incorporates reference documents in the SFT process. Based on synthetic battlefield datasets developed in collaboration with military experts, our experimental results demonstrate exceptional performance in source extraction accuracy and response quality evaluation. Notably, when applying triple-structured SFT to an 8B parameter model, we achieved comparable performance to a 405B parameter model, proving its practicality in actual battlefield environments. Furthermore, our lightweight model enhanced with specialized training strategies showed minimal performance degradation compared to larger models, suggesting its viability for deployment in resource-constrained environments. This research demonstrates the effective application of LLMs in battlefield situation analysis and presents a novel direction for military domains requiring real-time data processing and high reliability.
KW - Large Language Models;Retrieval-Augmented Generation;Supervised Fine-Tuning;Prompt Chaining;Battlefield Situation Analysis
DO -
UR -
ER -
Wonjun Cho, Jaesung Yoo, Sang-Min Kim and Jaeeun Jang. (2025). RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models. Journal of The Korea Society of Computer and Information, 30(3), 43-57.
Wonjun Cho, Jaesung Yoo, Sang-Min Kim and Jaeeun Jang. 2025, "RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models", Journal of The Korea Society of Computer and Information, vol.30, no.3 pp.43-57.
Wonjun Cho, Jaesung Yoo, Sang-Min Kim, Jaeeun Jang "RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models" Journal of The Korea Society of Computer and Information 30.3 pp.43-57 (2025) : 43.
Wonjun Cho, Jaesung Yoo, Sang-Min Kim, Jaeeun Jang. RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models. 2025; 30(3), 43-57.
Wonjun Cho, Jaesung Yoo, Sang-Min Kim and Jaeeun Jang. "RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models" Journal of The Korea Society of Computer and Information 30, no.3 (2025) : 43-57.
Wonjun Cho; Jaesung Yoo; Sang-Min Kim; Jaeeun Jang. RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models. Journal of The Korea Society of Computer and Information, 30(3), 43-57.
Wonjun Cho; Jaesung Yoo; Sang-Min Kim; Jaeeun Jang. RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models. Journal of The Korea Society of Computer and Information. 2025; 30(3) 43-57.
Wonjun Cho, Jaesung Yoo, Sang-Min Kim, Jaeeun Jang. RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models. 2025; 30(3), 43-57.
Wonjun Cho, Jaesung Yoo, Sang-Min Kim and Jaeeun Jang. "RAG-Enhanced small Large Language Models: Enhancing Battlefield Analysis through Knowledge Distillation of Large Language Models" Journal of The Korea Society of Computer and Information 30, no.3 (2025) : 43-57.