@article { BORN2019, author = { Jan T{"u}nnermann and Christian Born and B{"a}rbel Mertsching }, title = { Saliency from Growing Neural Gas: Learning Pre-attentional Structures for a Flexible Attention System }, month = { September }, year = { 2019 }, journal = { IEEE Transactions on Image Processing }, number = { 11 }, pages = { 5296 -- 5307 }, volume = { 28 }, issn = { 1941-0042 }, abstract = { Artificial visual attention has been an active research area for over two decades. Especially, the concept of saliency has been implemented in many different ways. Early approaches aimed at closely modeling saliency processing with concepts from biological attention to provide (at least in the long run) general-purpose attention for technical systems. More recent approaches have departed from this agenda, turning to more specific attention-guided tasks, most notably the accurate extraction of salient objects, for which large-scale ground truth datasets make it possible to quantify progress. While the first type of models is troubled by weak performance in these specific tasks, the second type, as we show with a new benchmark, has lost the ability to predict saliency in the original sense, which may be an important factor for future general-purpose attention systems. Here, we describe a new approach using growing neural gas to obtain pre-attentional structures for a scene at an early processing stage. On this basis, traditional saliency concepts can be applied while at the same time they can be linked to mechanisms that make models successful in salient object detection. The model shows high performance at predicting traditional saliency and makes substantial progress toward salient object detection, although it cannot reach the top-level performance of some specialized methods. We discuss the important implications of our findings. } }