@mastersthesis { ENNS17, author = { Dieter Enns }, title = { Object Extraction Using the Motion Feature Saliency and Depth in GNG-based Attention System }, month = { January }, year = { 2017 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Artificial visual attention systems mimic the abilities of biological systems in selecting relevant among irrelevant information to enhance technical cognitive systems. The “Growing Neural Gas” (GNG), a self-organizing image representation, proved to be a versatile attention framework for models that considers features such as color, size, orientation, symmetry, and eccentricity. These features enable the detection of salient image elements in static scenes. However, they do not allow the estimation of saliency (a measure of bottom-up conspicuity) in dynamic scenes and neither do they support contour-accurate extraction of the target objects, which is beneficial for many follow-up tasks. This thesis aims at improving on this with a novel approach: Motion information is extracted from spatiotemporal GNG-networks to estimate motion saliency. Furthermore, depth-information, as provided by structured light cameras, is included to segregate targets from background elements. } }