@mastersthesis { AFROZE19, author = { Mohammed Afroze }, title = { Self-supervised Estimation of Depth and Ego-motion to Detect Moving Objects in an Unconstrained Monocular Video }, month = { May }, year = { 2020 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Detection of moving objects within dynamic scenes is a challenging problem because the camera ego-motion and the motion of objects are involved. This thesis concentrates on classifying the detected objects, from an object detection system, to be moving or stationary while the camera itself is moving. To solve this task, depth and ego-motion information are first computed in an end-to-end self-supervised learning network from consecutive camera frames in a monocular video. The self-supervision comes from the video data itself without the need to have ground-truth data. Hence, in this thesis, different deep neural network architectures will be evaluated and an appropriate model for moving object detection will be selected. Furthermore, various spatio-temporal information from consecutive frames will be exploited to devise a proper loss objective function. Subsequently, the objects are classified as moving or stationary based on the 2D projection of the objects on the target image using the estimated depth and ego-motion of the scene. For training the proposed approach, KITTI and Cityscapes datasets will be used. Finally, the developed system will be evaluated using the KITTI2015 dataset as it provides ground-truth data for the segmentation of moving cars. } } @book { Alb20, author = { Manfred Albach }, title = { Elektrotechnik 1 - Erfahrungss{"a}tze, Bauelemente, Gleichstromschaltungen }, month = { January }, year = { 2020 }, edition = { 4., aktualisierte Auflage }, publisher = { Pearson Studium }, isbn = { 978–3–86894–399–3 } } @book { BSMM20, author = { Ilja N. Bronstein and Konstantin A. Semendjajew and Gerhard Musiol and Heiner Muehlig }, title = { Taschenbuch der Mathematik }, month = { January }, year = { 2020 }, edition = { 11. Auflage }, publisher = { Europa Lehrmittel Verlag }, isbn = { 978-3808557921 }, abstract = { Dieses Werk ist im deutschsprachigen Raum f{"u}r viele Studierende der Ingenieur- und Naturwissenschaften ein unverzichtbares Buch geworden. Aber auch im Berufsalltag erf{"u}llt das erprobte Standardwerk thematisch und methodisch die Erfordernisse der Zeit. } } @mastersthesis { Eng2020, author = { Thomas Engelmeier }, title = { Robuste Bestimmung charakteristischer Orientierungen f{"u}r bin{"a}re Bildstrukturen }, month = { October }, year = { 2020 }, school = { Paderborn University }, type = { Bachelor's thesis }, abstract = { Im Rahmen dieser Bachelorarbeit werden Verfahren zur Bestimmung von charakteristischen Orientierungen f{"u}r bin{"a}re Bildstrukturen ausgew{"a}hlt, implementiert, ggf. angepasst und schlie{\ss}lich bewertet. Bei den hier betrachteten bin{"a}ren Bildstrukturen handelt es sich im Wesentlichen um Kontursegmente, die als lokale Bildmerkmale beispielsweise im Rahmen einer Objekterkennung eingesetzt werden k{"o}nnen. Die charakteristischen Orientierungen erm{"o}glichen unter anderem eine rotationsinvariante Beschreibung der Kontursegmente. In der Literatur werden charakteristische Orientierungen haupts{"a}chlich auf Basis von Grauwertbildern bestimmt. Im Vergleich dazu verf{"u}gen Kontursegmente {"u}ber deutlich weniger Informationen. Es ist somit davon auszugehen, dass Transformationen dieser Kontursegmente, wie z. B. Skalierungen und Rotationen, eine robuste Bestimmung der Orientierungen erschweren. } } @mastersthesis { Frie2019, author = { Philip Frieling }, title = { Robust and Precise 3D SLAM at Low Computational Cost Using Laser Scanner and Camera Data }, month = { March }, year = { 2020 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Autonomous mobile robot applications require that the environment and the robot pose is precisely known. Currently, the GET Lab uses a height-map approach to map the environment and estimate the robot pose. This restricts the robot to only operate in an environment that can be mapped on a plane surface. To overcome this issue and allow autonomous operations in complex environments, the goal of this thesis is do develop a 3D localization and mapping system, which is designed to run on the rescue robot GETjag. Object detection as well as path planning in multi leveled environments should be supported by creating a colored 3d map of the environment. The new system has to run at real-time, keep resources free for other applications and may only use the existing sensors. Backwards compatibility must be ensured by offering a 2.5D map. For evaluation, the SLAM system will be tested in real world indoor and large urban outdoor environments at the Paderborn University. } } @book { HS20, author = { Thomas Harriehausen and Dieter Schwarzenau }, title = { Moeller Grundlagen der Elektrotechnik }, month = { January }, year = { 2020 }, edition = { 24. Auflage }, publisher = { Springer Vieweg }, isbn = { 978-3658278397 } } @phdthesis { JH2020, author = { Jahn Heymann }, title = { Robust multi-channel speech recognition with neural network supported statistical beamforming }, month = { December }, year = { 2020 }, school = { Paderborn University }, type = { Dissertation (PhD) } } @phdthesis { MMu2020, author = { Muhannad Mujahed }, title = { Control of Mobile Robots Moving in Cluttered Environments }, month = { December }, year = { 2020 }, school = { Paderborn University }, type = { Dissertation (PhD) }, abstract = { Over the past few decades, mobile robots have gained a lot of attention, particularly with the evolution of application fields such as search and rescue, cleaning, and exploration. Developing such robots requires to cope with different challenges such as perception, tracking, and mapping. Nevertheless, regardless of the mission to be performed or the application domain, robots must be able to plan their own motion. Hence, motion planning is at the heart of robotics and has been thoroughly addressed since the first mobile robot was developed. Usually, real-world environments are unknown and change over time. Therefore, traditional path planning methods that build upon a previously known map fail to work properly in these environments. Reactive collision avoidance approaches tackle this problem by incorporating the perceived information into the control system, bridging the gap between planning a path and executing a motion. Unfortunately, the majority of these methods undergo some classical drawbacks limiting their performance in cluttered environments. These include being prone to oscillations, failure of guiding a robot through narrow spaces, neglect of the robot constraints, and the tendency to generate longer paths and higher execution times. The work presented in this thesis aims to cope with the above mentioned problems. To this end, a novel collision avoidance approach was developed and implemented. The key idea is to analyze the environmental structure and find out the most promising gap, once determined, a subgoal is located in a collision-free area. It is located in such a way that the opening angle of the selected gap is considered, providing a safer and smoother bridge between collision avoidance and target approach. This also leads to shorter paths and less execution times. The proposed approach has been improved by considering the clearance to obstacles and by computing the steering angle in such a way that all surrounding obstacles are taken into account. This has been possible by introducing and integrating two concepts, called “tangential” and “gap flow” navigation. Another contribution is the computation of the motion command in such a way that the stability of the system is guaranteed in the Lyapunov sense. Furthermore, this work presents a new concept, the “admissible gap”, which addresses the question of whether a given gap is traversable by performing an admissible collision-free motion control. This concept has been successfully employed to develop a collision avoidance approach, that directly respects the vehicle constraints rather than adapting a holonomic-based solution. Another contribution is the development of a new strategy for extracting gaps, which reduces the possibility of oscillation and improves the stability of navigation. Finally, experimental results along with performance assessment in highly cluttered scenarios are presented to verify that the proposed approaches outperform state-of-the-art techniques in terms of smoothness, efficiency, reliability, and safety. } } @mastersthesis { MUELLER19, author = { Arnold M{"u}ller }, title = { Velocity Estimation from Monocular Video }, month = { August }, year = { 2020 }, school = { Paderborn University }, abstract = { Velocity estimation of automotive vehicles is a challenging problem in the field of Advanced Driver Assistance Systems (ADAS). Traditional methods used range sensors to estimate the velocity. However, these kinds of sensors are generally expensive. Therefore, this thesis focuses on the usage of neural networks for real-time velocity estimation of vehicles using only a monocular camera. The proposed approach is inspired by Kampelmu?hler et al. [KMF18] method which involves a vehicle tracking module together with CNN models for depth estimation and optical flow data. The proposed model analyses the input data from the depth and the optical flow to extract features that can be used for velocity estimation. Afterwards, the problem is treated as a regression problem and a shallow neural network is designed to infer the velocity and position of detected vehicles. Hence, in this work, different deep neural network architectures will be evaluated and an appropriate model will be selected. For training and evaluating the proposed approach, datasets from the TuSimple velocity estimation challenge [tus17] will be used. } } @book { NHFS20, author = { Alfred Nischwitz and Peter Haber{"a}cker and Max W. Fischer and Gudrun Socher }, title = { Bildverarbeitung: Band II des Standardwerks Computergrafik und Bildverarbeitung }, month = { January }, year = { 2020 }, annote = { (German) }, edition = { 4. edition }, publisher = { Springer }, isbn = { 978-3658287047 } } @misc { PTB20, author = { Physikalisch-Technische Bundesanstalt PTB }, title = { Das neue Internationale Einheitensystem (SI) }, month = { May }, year = { 2020 }, note = { PDF Download (accessed: 06.10.2022) } } @mastersthesis { Rei2019, author = { Sebastian Reinke }, title = { Real-Time Detection of Object Contours using Artificial Visual Attention }, month = { March }, year = { 2020 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { The main objective of this work is to identify, implement and analyze methods for generating binary edge images using artificial visual attention. The edge images should only contain high quality and informative binary edges of objects so that they can be used for contour based object recognition systems. Generally, edge detection can be divided into gradient- and learning-based approaches. Gradient-based algorithms are quite fast, but also detect uninformative edges, e.g. background edges, which are not helpful to describe an object. In contrast, learning-based approaches have a high run time but can generate high-quality edge images. Additionally, artificial visual attention (AVA) models can be used for edge detection. Most of the AVA models are inspired by human visual attention and use different concepts to generate edge images. One possibility here is to analyze the mechanism behind AVA models and the way they highlight "interesting" areas in images. In this work it will be analyzed how such mechanisms can be used for edge detection. Other AVA methods highlight regions of interest (ROI) in an image, where objects are likely to appear. It will be investigated if a reduction of run time can be achieved if the ROIs are analyzed instead of the whole image while still generating appropriate edge images. } } @mastersthesis { tomeh2019, author = { Bashar Tomeh }, title = { Semantic-aware Lightweight Visual Place Recognition System for Severe Viewpoint and Appearance Variations }, month = { January }, year = { 2020 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Visual Place Recognition has played a significant role in different computer vision and robotics tasks. It can be seen as an image retrieval task where the system has to match a query image with images in the previously created database. Such a system can be used in image searching engine, autonomous driving as well as in a loop-closure system where a mobile robot tries to compensate for the arising errors when creating a map of an unknown environment by detecting previously visited places. The challenging problem is that the images in the database and the query image can be taken under different perceptual conditions as well as different viewpoints. Besides this, the image has to be represented in a single compact vector for efficient searching. This master's thesis focuses on creating an image representation that is robust to the severe viewpoint and appearance changes while preserving a low computational cost to create a real-time system feasible for a resource-constrained mobile robot. Furthermore, the effect of including the semantic understanding of the place in the image representation will be explored in this study. In this manner, two systems are proposed using the knowledge from previous work with a vector of locally aggregated descriptors applied as a convolutional layer, where the two systems differ in using the semantic information from a segmentation mask. The overall goal is a semantic-aware lightweight convolutional neural network system, which can learn robust image representation to distinguish places in presence of high visual ambiguity and viewpoint variations. The performance of the proposed systems is evaluated on benchmark datasets, considering the precision-recall scores, computational efficiency and memory usage. } } @mastersthesis { Wie2018a, author = { Sergej Wiebe }, title = { Korrespondenzbestimmung lokaler Bildmerkmale zur Objekterkennung unter Ber{"u}cksichtigung geometrischer Informationen }, month = { January }, year = { 2020 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Zur Objekterkennung eignen sich neben globalen Bildmerkmalen besonders lokale Bildmerkmale. Lokale Bildmerkmale, die mittels Verfahren wie Scale-Invariant Feature Transform (SIFT) [Low04} detektiert werden, werden zur Beschreibung neben ihrer Position und einem Merkmalsvektor auch eine charakteristische Skalierung und eine oder mehrere Orientierung(en) zugeordnet. Damit lassen sich nicht nur fotometrische Vergleiche, sondern auch geometrische Beziehungen der lokalen Merkmalen zueinander vergleichen. Bestehende Verfahren zur Objekterkennung auf Basis lokaler Bildmerkmale arbeiten h{"a}ufig nur auf Basis der Positionen und den Merkmalsvektoren von lokalen Merkmalen, um den Suchraum einzuschr{"a}nken. Ein zus{"a}tzlicher Schritt ist die {"U}berpr{"u}fung der charakteristischen Skalierung und der Orientierung der lokalen Merkmalen zueinander. Dieser Schritt tr{"a}gt zu einer besseren Korrespondenzbestimmung bei, ist aber mit h{"o}herem Rechenaufwand verbunden, so dass hier ein gewisser Kompromiss erforderlich ist. Ziel dieser Arbeit ist die Auswahl, Implementierung und ggf. Anpassung sowie die Evaluierung eines Verfahrens zur Objekterkennung auf Basis lokaler Bildmerkmale unter Ber{"u}cksichtigung der zugeh{"o}rigen geometrischen Beziehungen. } }