@inproceedings{a0199adf9228439bad76803852f30dbd,
title = "Dual Variational Knowledge Attention for Class Incremental Vision Transformer",
abstract = "Class incremental learning (CIL) strives to emulate the human cognitive process of continuously learning and adapting to new tasks while retaining knowledge from past experiences. Despite significant advancements in this field, Transformer-based models have not fully leveraged the potential of attention mechanisms to balance the transferable knowledge between tokens and the associated information. This paper addresses this gap by using a dual variational knowledge attention (DVKA) mechanism within a Transformer-based encoder-decoder framework, tailored for CIL. DVKA mechanism aims to manage the information flow through the attention maps, ensuring a balanced representation of all classes, and mitigating the risk of information dilution as new classes are incrementally introduced. This method, leverage the information bottleneck and mutual information principle, selectively filters less relevant information, directing the model's focus towards the most significant details for each class. The DVKA is designed with two distinct attentions: one focused on the feature level and the other on the token dimension. The feature-focused attention aims to purify the complex nature of various classification tasks, ensuring a comprehensive representation of both old and new tasks. The token-focused attention mechanism highlights specific tokens, facilitating local discrimination among disparate patches and fostering global coordination for a spectrum of task tokens. Our work is a major stride towards improving transformer models for class incremental learning, presenting a theoretical rationale and effective experimental results on three widely-used datasets.",
keywords = "Continual Learning, Transformers, Vision Transformer",
author = "Haoran Duan and Rui Sun and Varun Ojha and Tejal Shah and Zhuoxu Huang and Zizhou Ouyang and Yawen Huang and Yang Long and Rajiv Ranjan",
note = "Publisher Copyright: {\textcopyright} 2024 IEEE.; 2024 International Joint Conference on Neural Networks, IJCNN 2024 ; Conference date: 30-06-2024 Through 05-07-2024",
year = "2024",
month = jun,
day = "30",
doi = "10.1109/ijcnn60899.2024.10650317",
language = "English",
volume = "30",
series = "2024 International Joint Conference on Neural Networks (IJCNN)",
publisher = "IEEE Press",
booktitle = "2024 International Joint Conference on Neural Networks (IJCNN)",
address = "United States of America",
}