@article{ART003009725},
author={Jaesung Shim and Kyuri Jo},
title={Detecting Adversarial Examples Using Edge-based Classification},
journal={Journal of The Korea Society of Computer and Information},
issn={1598-849X},
year={2023},
volume={28},
number={10},
pages={67-76},
doi={10.9708/jksci.2023.28.10.067}
TY - JOUR
AU - Jaesung Shim
AU - Kyuri Jo
TI - Detecting Adversarial Examples Using Edge-based Classification
JO - Journal of The Korea Society of Computer and Information
PY - 2023
VL - 28
IS - 10
PB - The Korean Society Of Computer And Information
SP - 67
EP - 76
SN - 1598-849X
AB - Although deep learning models are making innovative achievements in the field of computer vision, the problem of vulnerability to adversarial examples continues to be raised. Adversarial examples are attack methods that inject fine noise into images to induce misclassification, which can pose a serious threat to the application of deep learning models in the real world. In this paper, we propose a model that detects adversarial examples using differences in predictive values between edge-learned classification models and underlying classification models. The simple process of extracting the edges of the objects and reflecting them in learning can increase the robustness of the classification model, and economical and efficient detection is possible by detecting adversarial examples through differences in predictions between models. In our experiments, the general model showed accuracy of {49.9%, 29.84%, 18.46%, 4.95%, 3.36%} for adversarial examples (eps={0.02, 0.05, 0.1, 0.2, 0.3}), whereas the Canny edge model showed accuracy of {82.58%, 65.96%, 46.71%, 24.94%, 13.41%} and other edge models showed a similar level of accuracy also, indicating that the edge model was more robust against adversarial examples. In addition, adversarial example detection using differences in predictions between models revealed detection rates of {85.47%, 84.64%, 91.44%, 95.47%, and 87.61%} for each epsilon-specific adversarial example. It is expected that this study will contribute to improving the reliability of deep learning models in related research and application industries such as medical, autonomous driving, security, and national defense.
KW - Deep Learning;Computer Vision;Convolutional Neural Network;Edge-based Classification;Adversarial Example Detection
DO - 10.9708/jksci.2023.28.10.067
ER -
Jaesung Shim and Kyuri Jo. (2023). Detecting Adversarial Examples Using Edge-based Classification. Journal of The Korea Society of Computer and Information, 28(10), 67-76.
Jaesung Shim and Kyuri Jo. 2023, "Detecting Adversarial Examples Using Edge-based Classification", Journal of The Korea Society of Computer and Information, vol.28, no.10 pp.67-76. Available from: doi:10.9708/jksci.2023.28.10.067
Jaesung Shim, Kyuri Jo "Detecting Adversarial Examples Using Edge-based Classification" Journal of The Korea Society of Computer and Information 28.10 pp.67-76 (2023) : 67.
Jaesung Shim, Kyuri Jo. Detecting Adversarial Examples Using Edge-based Classification. 2023; 28(10), 67-76. Available from: doi:10.9708/jksci.2023.28.10.067
Jaesung Shim and Kyuri Jo. "Detecting Adversarial Examples Using Edge-based Classification" Journal of The Korea Society of Computer and Information 28, no.10 (2023) : 67-76.doi: 10.9708/jksci.2023.28.10.067
Jaesung Shim; Kyuri Jo. Detecting Adversarial Examples Using Edge-based Classification. Journal of The Korea Society of Computer and Information, 28(10), 67-76. doi: 10.9708/jksci.2023.28.10.067
Jaesung Shim; Kyuri Jo. Detecting Adversarial Examples Using Edge-based Classification. Journal of The Korea Society of Computer and Information. 2023; 28(10) 67-76. doi: 10.9708/jksci.2023.28.10.067
Jaesung Shim, Kyuri Jo. Detecting Adversarial Examples Using Edge-based Classification. 2023; 28(10), 67-76. Available from: doi:10.9708/jksci.2023.28.10.067
Jaesung Shim and Kyuri Jo. "Detecting Adversarial Examples Using Edge-based Classification" Journal of The Korea Society of Computer and Information 28, no.10 (2023) : 67-76.doi: 10.9708/jksci.2023.28.10.067