@proceedings { Avd12, author = { Aris Avdelas }, title = { SEFI 40th conference: Engineering Education 2020: Meet the Future, 23-26 Sept. 2012, Thessaloniki, Greece }, month = { October }, year = { 2012 }, address = { Brussels }, publisher = { SEFI }, isbn = { 978-2-87352-005-2 } } @mastersthesis { Bo2012, author = { Christian Born }, title = { Adaptive Top-Down Vorlagen zur Steuerung K{"u}nstlicher Aufmerksamkeit }, month = { May }, year = { 2012 }, school = { Paderborn University }, type = { Bachelor's thesis }, abstract = { Um f{"u}r die maschinelle Wahrnehmung wichtige Bereiche in Bildern zu finden, werden diese durch k{"u}nstliche Aufmerksamkeitssysteme analysiert. Diese sind an biologischer visueller Aufmerksamkeit orientiert und berechnen die Salienz eines Bildelementes, die Auff{"a}lligkeit eines Bildelements im Vergleich zu seinen Nachbarn. Im GET Lab an der Universit{"a}t Paderborn wird ein Ansatz verwendet, der auf einer Segmentierung des Eingabebildes basiert. Die entstehende Regionsliste wird dann verwendet um die Salienz f{"u}r jede Region zu berechnen. Daf{"u}r k{"o}nnen sowohl Bottom-Up-Verfahren (datengetrieben) als auch Top-Down-Verfahren (Einfluss von Wissen) verwendet werden. In der aktuellen Umsetzung ist der Top-Down-Mechanismus darauf beschr{"a}nkt, eine Vorgabe-Region (Template) zu ber{"u}cksichtigen. In dieser Bachelorarbeit wird die aktuelle Implementierung um die Unterst{"u}tzung multiregionaler Templates sowie das Fokussieren von Bildelementen erweitert um komplexeres Verhalten mit Top-Down-Aufmerksamkeit zu unterst{"u}tzen. Die "SICK robot day" Aufgabe dient dabei als Beispielszenario, das multiregionale Templates erfordert. Dort m{"u}ssen bestimmte B{"a}lle in Tore bef{"o}rdert werden. Wobei die B{"a}lle einfache Objekte darstellen, sind die Tore komplexer und erfordern multiregionale Templates und der Fokus muss auf diesen komplexen Zielen gehalten werden. Die Implementierung wird mithilfe des Meta-Betriebsystems ROS (Robot Operating Sytem) umgesetzt und mit der Simulationsumgebung SIMORE getestet. } } @book { But11, author = { Tillmann Butz }, title = { Fouriertransformation f{"u}r Fu{\ss}g{"a}nger }, month = { October }, year = { 2012 }, edition = { 7. edition }, note = { (German) }, publisher = { Vieweg {\&} Teubner }, isbn = { 978-3-8348-0946-9 } } @book { CWHS12, author = { Horst Clausert and Gunther Wiesemann and Volker Hinrichsen and J{"u}rgen Stenzel }, title = { Grundgebiete der Elektrotechnik 1: Gleichstromnetze, Operationsverst{"a}rkerschaltungen, elektrische und magnetische Felder }, month = { January }, year = { 2012 }, edition = { 11. Auflage }, publisher = { Oldenbourg Verlag }, isbn = { 978-3-486-59719-6 } } @article { DATS_FK_2012, author = { Gitta Domik and Stephan Arens and Jan T{"u}nnermann and Ingrid Scharlau }, title = { Evaluierung medizinischer Volumenrendering-Algorithmen durch empirische Studien }, month = { September }, year = { 2012 }, journal = { FifF Kommunikation }, number = { 3 }, pages = { 45 -- 50 } } @mastersthesis { Ga2011, author = { Yuan Gao }, title = { Hierarchical Color Segmentation for Region-Based Visual Attention }, month = { February }, year = { 2012 }, school = { Paderborn University }, type = { Bachelor's thesis }, abstract = { In control of mobile robots, computer vision plays an important role. The current saliency detecting approach used in GET Lab performs color-segmentation as an initial step and then determines saliency using region lists. This work extends the existing implementation by enabling support for hierarchical region-based segmentation. The input image is segmented in different levels. At first the image is segmented into a few regions with coarse granularity. In the next step each region produced is segmented in a finer granularity, so that the big regions are split into smaller ones. The process continues until a predefined level is reached, while all the parent-child (region-subregion) relations are stored as a region-tree. Experiments are conducted to test this approach applied to attention-related problems: Time pressure is simulated by limiting processing to certain levels; Fast scene classification based on the "Gist" obtained from low grain regions; and extracting objects by backtracking from salient subregions to parent regions. } } @book { HS12, author = { Oliver Haas and Christian Spieker }, title = { Aufgaben zur Elektrotechnik 1 }, month = { October }, year = { 2012 }, publisher = { Oldenbourg }, isbn = { 978-3-486-71680-1 } } @inproceedings { HM_TL_2012, author = { Markus Hennig and B{"a}rbel Mertsching }, title = { Vermittlung von Mathematikkenntnissen im Kontext ingenieurwissenschaftlicher Fachlehre }, month = { May }, year = { 2012 }, booktitle = { TeachING-LearnING.EU Fachtagung } } @inproceedings { HM_SEFI_2012, author = { Markus Hennig and B{"a}rbel Mertsching }, title = { Situated Acquisition of Mathematical Knowledge - Teaching Mathematics within Electrical Engineering Courses }, month = { September }, year = { 2012 }, booktitle = { 40th Annual Conference of the European Society for Engineering Education }, pages = { 264 -- 265 }, publisher = { SEFI }, isbn = { 978-2-87352-005-2 }, abstract = { In recent years, a number of studies have shown multifaceted issues in regard to the mathematical expertise of undergraduate engineering students. Referring to this, the students' increasingly diverse range of levels and types of qualifications and their continuing deterioration of mathematical competencies are often referred to as one of the major challenges. More critical concerns stem from the fact that the initial phase usually comprises lectures requiring mathematical expertise which in some cases clearly goes beyond school mathematics, but will be presented only later in mathematical courses. Taking into account that the first year is critical for student success and to avoid high dropout rates, revised concepts for teaching engineering mathematics in respect to individual students' demands are required. Furthermore, overloaded schedules lead to the conclusion that students would not engage in additional uncoupled academic programs in regard to mathematics. In this article, a new generalizable concept to address the above mentioned challenges within undergraduate engineering courses is introduced by way of example in a Fundamentals of Electrical Engineering lecture. } } @mastersthesis { Hol12, author = { Hedda Holtmeier }, title = { Evaluierung einer logarithmisch skalierten Fensterfunktion im Rahmen der stereoskopischen Korrespondenzbestimmung }, month = { July }, year = { 2012 }, school = { Paderborn University }, type = { Studienarbeit }, abstract = { Der Mensch ist durch das Sehen und durch das Verarbeiten der aufgenommenen Informationen in der Lage r{"a}umlich zu sehen. Dies versetzt ihn in die Position, gezielte Handlungen durchzuf{"u}hren, wie beispielsweise das Greifen von Objekten oder das Reagieren auf ein Ereignis in seiner Umgebung. Die f{"u}r das r{"a}umliche Sehen erforderlichen Tiefeneindr{"u}cke werden aus den zwei durch die Augen aufgenommenen Bilder konstruiert. Um die Wahrnehmung von Tiefeneindr{"u}cken auf technische Systeme {"u}bertragen zu k{"o}nnen, ist es notwendig, die Abbildung eines Objektpunktes in einer Ansicht einem entsprechenden Bildpunkt in der anderen Ansicht zuzuordnen. Diese Zuordnung wird als Korrespondenzproblem bezeichnet und ist nicht eindeutig l{"o}sbar. Der Kostenrelaxations-Stereoalgorithmus stellt einen von vielen Ans{"a}tzen zur L{"o}sung des Korrespondenzproblems dar. In dieser Arbeit wurde eine biologisch motivierte logarithmisch skalierte Fensterfunktion f{"u}r den Kostenrelaxationsansatz entwickelt, um Fehlzuweisungen zu reduzieren. In einer Evaluierung wurde unter Verwendung des Kostenrelaxations-Stereoalgorithmus diese Fensterform den bisher h{"a}ufig verwendeten quadratischen und runden Fensterformen gegen{"u}bergestellt. } } @book { HMHB12, author = { Edward Hughes and Ian McKenzie Smith and John Hiley and Keith Brown }, title = { Electrical and Electronic Technology }, month = { January }, year = { 2012 }, edition = { 11. Auflage }, publisher = { Prentice-Hall }, isbn = { 0-273-75510-2 } } @book { J{"a}h12, author = { Bernd J{"a}hne }, title = { Digitale Bildverarbeitung: und Bildgewinnung }, month = { October }, year = { 2012 }, edition = { 7. edition }, publisher = { Springer }, isbn = { 3-642-04951-6 } } @inproceedings { LK_ICIUS_2012, author = { Ludmilla Kleinmann and Jonathan Rabe and B{"a}rbel Mertsching }, title = { Spatio-temporal Learning for a Rescue Robot }, month = { October }, year = { 2012 }, booktitle = { International Conference on Intelligent Unmanned Systems }, abstract = { The efficiency of autonomous rescue robots is characterized by their flexible adjustment to unpredictable situations. Considering the remarkable adaptability of mammals we have utilized the recent neuroscience insights about the mammalian brain’s organization and related learning processes to realize a spatio-temporal learning rescue agent with flexible behavior. } } @inproceedings { LK_BM_IRAM_2012, author = { Ludmilla Kleinmann and B{"a}rbel Mertsching }, title = { Biologically Inspired Architecture for Spatiotemporal Learning of Mobile Robots }, month = { November }, year = { 2012 }, booktitle = { Communications in Computer and Information Science }, pages = { 174 -- 182 }, publisher = { Springer }, series = { Communications in Computer and Information Science }, volume = { 330 }, abstract = { Biological systems can adapt excellently to the demands of a dynamic world and changing tasks. What kind of information processing and reasoning do they use? There are numerous studies in psychology, cognitive neuroscience and artificial intelligence which complement each other and help in getting a better understanding of this riddle. Our paper presents a biologically inspired architecture for a spatiotemporal learning system. Multiple interconnected memory structures are used to incorporate different learning paradigms. Concurrent inherent learning processes complete the functionality of corresponding memory types. Our architecture has been evaluated in the context of mobile rescue robots: The task consists of searching objects while navigating in an unknown maze. } } @inproceedings { kotthaeuser2012triangulat, author = { Tobias Kotth{"a}user and B{"a}rbel Mertsching }, title = { Triangulation-Based Plane Extraction for 3D Point Clouds }, month = { September }, year = { 2012 }, booktitle = { International Conference on Intelligent Robotics and Applications (ICIRA) }, abstract = { The processing of point clouds for extracting semantic knowledge plays a crucial role in state of the art mobile robot applications. In this work, we examine plane extraction methods that do not rely on additional point features such as normals, but rather on random triangulation in order to allow for a fast segmentation. When it comes to an implementation in this context, typically the following question arises: RANSAC or Hough transform? In this paper, we examine both methods and propose a novel plane extraction approach based on the randomized 3D Hough transform. Our main concerns for improvement are extraction time, accuracy, robustness as well as memory consumption. } } @inproceedings { MM_AIS12_2012, author = { Hossein Mirabdollah and B{"a}rbel Mertsching }, title = { Bearing Only SLAM: A New Particle Filter Based Approach }, month = { March }, year = { 2012 }, editor = { Mohamed Kamel }, booktitle = { International Conference on Autonomous and Intelligent Systems (AIS 2012) }, pages = { 116 -- 125 }, publisher = { Springer }, series = { Lecture Notes in Computer Science }, volume = { 7326 }, isbn = { 978-3-642-31367-7 }, abstract = { In this paper a new method to address bearing-only SLAM using particle ?lters is proposed. We use a set of line pieces to model the uncertainties of landmarks and derive a proper formulation to modify the joint robot and landmark assumptions in the context of a particle filter approach. } } @inproceedings { HM_BM_ROBIO_2012, author = { Hossein Mirabdollah and B{"a}rbel Mertsching }, title = { Monocular SLAM: Using Trapezoids to Model Landmark Uncertainties }, month = { December }, year = { 2012 }, booktitle = { 2012 IEEE International Conference on Robotics and Biomimetics (ROBIO 2012) }, pages = { 482 -- 488 }, isbn = { 978-1-4673-2125-9 }, abstract = { In this paper we develop a new method to address the monocular SLAM problem based on particle ?lters. We already offered an approach to use line pieces attached to particles to model uncertainties of landmarks in case the measurement noise is ignorable. However, when using a consumer camera on a mobile robot, the noise resulting especially from the unavoidable vibration of the camera cannot be neglected. In this paper we present a modi?cation demonstrating how the noise can be handled. The high performance of the algorithm is demonstrated at the end of this paper through simulations and by presenting the result of the implementation of this method on a real robot. } } @inproceedings { MM_ISVC_12, author = { Mahmoud Mohamed and B{"a}rbel Mertsching }, title = { TV-L1 Optical Flow Estimation With Image Details Recovering Based on Modified Census Transform }, month = { July }, year = { 2012 }, booktitle = { International Symposium on Visual Computing }, abstract = { Abstract. This paper proposes an improved optical fow estimation approach based on the total variational L1 minimization technique with weighted median filter. Furthermore, recovering image details using modifed census transform algorithm improves the overall accuracy of estimating large scale displacements optical fow. On the other hand, the use of the Taylor expansion approximation in most of the optical fow approaches limits the ability to estimate movement of fast objects. Hence, a coarse-to-fine scheme is used to overcome such a problem of the cost of losing small details in the interpolation process where initial values are propagated from the coarse level to the fine one. The proposed algorithm improves the accuracy of the estimation process by integrating the correspondence results of the modifed census transform into the coarse-to-fine module in order to recover the lost details. The outcome of the proposed approach yields state-of-the-art results on the Middlebury optical fow evaluations. } } @inproceedings { MMO_BAVu_2012, author = { Mahmoud Mohamed and B{"a}rbel Mertsching }, title = { Application Of Optical Flow In Automation }, month = { November }, year = { 2012 }, booktitle = { Bildverarbeitung in der Automation }, abstract = { The optical fow problem is the process of estimating the pattern of apparent motion of objects. The motion estimation algorithm gives important information about the moving objects such as the velocities and the directions of objects, which can be used in the automation industry applications. The current position of the moving object is the main problem facing the robot while trying to detect and grasp the object. Recently, an algorithm [1] based on the total variation approach with the usage of the modifed census transform has been implemented and produced accurate optical flow results. In this paper, an application of that algorithm will be discussed. Real data have been used in order to check the efciency of this approach. } } @article { MJ2012, author = { Muhannad Mujahed and Hussein Jaddu }, title = { Smooth and Safe Nearness-Diagram (SSND) Navigation for Autonomous Mobile Robots }, month = { January }, year = { 2012 }, journal = { International Journal of Advanced Materials Research, MEMS NANO and Smart Systems, (Scopus Indexed) }, pages = { 4718 -- 4726 }, volume = { 403 - 408 } } @phdthesis { Sha12, author = { Mohamed Shafik }, title = { 3D Motion Analysis for Mobile Robots }, month = { December }, year = { 2012 }, school = { Paderborn University }, type = { Dissertation (PhD) }, abstract = { In this work, we present a fast approach to estimate the motion parameter coefficients,which results in a significant reduction of the computational time of the 3D motionsegmentation approach as well as a decrease in the mean error of the estimated parameterseven with highly noisy MVF. Furthermore, a saliency-based approach forestimating and segmenting 3D motions of multiple moving objects represented by2D motion vector fields (MVF) was developed. A classification module has been implementedto define the global motion of the mounted camera in order to overcometypical problems in autonomous mobile robotic vision such as noise, occlusions, andinhibition of the ego-motion defects of a moving camera head. Moreover, we proposea fast depth-integrated 3D motion parameter estimation approach which takes intoconsideration the perspective transformation and the depth information to accurately estimate biologically motivated classifier cells in the 3D space using the geometricalinformation of the stereo camera head. The results show a successful detection andestimation of predefined 3D motion patterns such as movements toward the robotwhich is a vital milestone towards a successful prediction of possible collisions. } } @proceedings { SRL12, author = { Chun-Yi Su and Subhash Rakheja and Honghai Liu }, title = { Intelligent Robotics and Applications - ICIRA 2012, October 3-5, 2012, Montreal, QC, Canada }, month = { October }, year = { 2012 }, annote = { weitere ISBN: 978-3-642-33514-3 978-3-642-33502-0 }, publisher = { Springer }, series = { LNAI 7506 - 7508 }, isbn = { 978-3-642-33508-2 } } @inproceedings { TM_VISAPP_2012, author = { Jan T{"u}nnermann and B{"a}rbel Mertsching }, title = { Continuous Region-Based Processing of Spatiotemporal Saliency }, month = { February }, year = { 2012 }, booktitle = { International Conference on Computer Vision Theory and Applications }, pages = { 230 -- 239 }, abstract = { This paper describes a region-based attention approach on motion saliency, which is important for systems that perceive and interact with dynamic environments. Frames are collected to create volumes, which are sliced into stacks of spatiotemporal images. Color segmentation is applied to these images. The orientations of the resulting regions are used to calculate their prominence in a spatiotemporal context. Saliency is projected back into image space. Tests with different inputs produced results comparable with other state-of-the-art methods. We also demonstrate how top-down influence can affect the processing in order to attend objects that move in a particular direction. The model constitutes a framework for later integration of spatiotemporal and spatial saliency as independent streams, which respect different requirements in resolution and timing. } } @mastersthesis { la-0438, author = { Robin Vogt }, title = { Evaluation von QEMU zur echtzeitf{"a}higen Emulation der PowerPC ISA auf einem x86 PC }, month = { December }, year = { 2012 }, school = { Paderborn University }, type = { Bachelor's thesis } } @mastersthesis { We2012, author = { Konstantin Werkner }, title = { Biologisch inspirierte Objektverfolgung f{"u}r k{"u}nstliche Sehsysteme }, month = { January }, year = { 2012 }, school = { Paderborn University }, type = { Bachelor's thesis }, abstract = { Ein autonomer mobiler Roboter muss sich selbst{"a}ndig in einer unbekannten, dynamischen Umgebung bewegen und mit dieser interagieren k{"o}nnen. Ein Beispiel hierf{"u}r ist der am GET Lab entwickelte und eingesetze Rettungsroboter GETbot. In simulierten Katastrophenszenarios muss er auch sich bewegende Objekte erkennen, um zum Beispiel diesen auszuweichen oder diese zu verfolgen. Hierzu muss er die Daten seiner Sensoren m{"o}glichst effizient auswerten und verarbeiten k{"o}nnen. Durch die begrenzten Ressourcen wie Rechenleistung oder Verf{"u}gbarkeit von Effektoren muss bereits auf dem Eingangslevel eine aufgabenbezogene Filterung der Sensordaten f{"u}r die sp{"a}tere Verarbeitung erfolgen. Bei dem aktuell eingesetzten spatiotemporalen Verfahren werden die Eingangsbilder zu einem Volumen gesammelt. Dieses wird anschlie{\ss}end in XT und YT Scheiben geschnitten und nach Farbkriterien segmentiert. Anhand der Ausrichtung der durch die Segmentierung entstandenen Regionen kann eine Bewegung der erfassten Objekte festgestellt werden. Um jedoch m{"o}glichst pr{"a}zise Ergebnisse zu erreichen, m{"u}ssen die Volumen ann{"a}hernd kubisch sein (100 Bilder bei 100x100 Pixel Aufl{"o}sung), was in langer Aufnahme- und entsprechender Verarbeitungsdauer resultiert. Im Rahmen dieser Arbeit muss das aktuelle Verfahren so optimiert werden, dass unabh{"a}ngig von der Aufl{"o}sung der Eingangsbilder die Volumen klein (ca. 10-15 Eingangsbilder) gehalten werden k{"o}nnen und somit die Verarbeitungszeit drastisch verk{"u}rzt werden kann. Auch findet eine Integration einer Pan-Tilt-Zoom Netzwerkamera in das bestehende System statt, so dass auf industrielle L{"o}sung bei Handware zur{"u}ckgegriffen werden kann und eine Objektverfolgung mit der Kamera realisiert werden kann. } } @mastersthesis { Oes11, author = { Marc {"O}stermann }, title = { Autonomous 3D Mapping and Exploration in Interior Environments based on a Next-Best-View Approach }, month = { January }, year = { 2012 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Autonomous mobile robots have to handle the problem of exploring unknown environments. To resolve this problem different methods of simultaneous localization and mapping (SLAM) can be applied. The goal of these methods is to build a map of the environment and identify the robot's position in the map and in the environment, respectively. The approaches for representing environments depend highly on the sensors available on the robot. While a lot of research has been done on two dimensional mapping in the past, the field of three-dimensional mapping gained more and more importance during the last years. In many cases laser range finders are used to create three-dimensional maps in form of three-dimensional point clouds. Residing at a certain position the robot accomplishes a local scan that delivers a 360 degree representation of the environment within the sensor's range. Since the amount of collected data in three-dimensional scans is higher than in two-dimensional ones the chance of gathering more precise information rises. Thus methods working on these data might gain from a higher entropy. Although different methods were developed in the last years their utility is hard to estimate and therefore, a closer inspection is needed. To get a sufficient map resolution for later data evaluations many data points have to be collected during one scan resulting in a high data volume. Due to the high amount of data and the duration of one scan, it is not possible to take continuous scans and process the collected data in real time. When taking a local scan at a certain position in the environment, some objects might occlude other ones that are part of the scene. Furthermore, the laser range scanner has only a limited range and resolution. To get information about occluded objects and not yet covered regions of the environment, the robot has to navigate to further positions autonomously to take additionally scans. Com- bining these scans should lead to a global map minimizing the level of occlusion and maximizing the mapped environment. To determine the next scan position in an intelligent manner a next-best-view (NBV) method can be applied. Many NBV approaches consist of several different procedures, mainly taking, registering and merging scans and computing the next scan position. The most interesting ones are the registration of different scans and the computation of the next scan position because they have high impact on the run time of the NBV procedure. Therefore, carefully selecting suitable algorithms leads to shorter run times and the robot is able to navigate to it's next position earlier. Consequently, the exploration time of the whole environment is reduced. } }