@article { TM_COGCOMP_2013, author = { Jan T{"u}nnermann and B{"a}rbel Mertsching }, title = { Region-Based Artificial Visual Attention in Space and Time }, month = { March }, year = { 2014 }, journal = { Cognitive Computation }, number = { 1 }, pages = { 125 -- 143 }, volume = { 6 }, issn = { 1866-9964 }, abstract = { Mobile robots have to deal with an enormous amount of visual data containing static and dynamic stimuli. Depending on the task, only small portions of a scene are relevant. Artificial attention systems filter information at early stages. Among the various methods proposed to implement such systems, the region-based approach has proven to be robust and especially suited for integrating top-down influences. This concept was recently transferred to the spatiotemporal domain to obtain motion saliency. A full-featured integration of the spatial and spatiotemporal systems is presented here. We propose a biologically inspired two-stream system, which allows to use different spatial and temporal resolutions and to pick off spatiotemporal saliency at early stages. We compare the output to classic models and demonstrate the flexibility of the integrated approach in different experiments. These include online processing of continuous input, a task similar to thumbnail extraction and a top-down task of selecting specific moving and non-moving objects. } }