@mastersthesis { BansalAnshul2020, author = { Anshul Suresh Bansal }, title = { MultiFlow - Multiple Frame Approach for Optical Flow Estimation Using Deep Learning }, month = { January }, year = { 2021 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Motion detection, and activity recognition are some of the most prominent tasks in the domain of computer vision. With large scale developments taking place in the field of autonomous driving, and robotics, solving these tasks efficiently with accurate results becomes even more important. Taking inspiration from this challenge, in this thesis, a new architecture is proposed called as, MultiFlow, which aims at obtaining robust optical flow in terms of accuracy and smoothness. This model is based on the works of FlowNet-C and it estimates optical flow by taking consecutive multiple image frames as inputs and computing the correlation between these image frames. The model will be evaluated against the state-of-the-art neural network models in the field of optical flow estimation on the MPI-Sintel dataset. } } @mastersthesis { BansalAayush2020, author = { Aayush Suresh Bansal }, title = { UFCFlow - Optical Flow Estimation using Unsupervised Deep Learning }, month = { January }, year = { 2021 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { In recent times, deep learning is used to estimate accurate optical flow in a supervised or unsupervised setting. As opposed to an unsupervised approach, training a Convolutional Neural Network (CNN) for flow estimation in a supervised manner requires large amount of labelled data. However, due to shortage of labelled data such an approach is not feasible and cannot be applied to situations where labelled data is not present. To overcome the problem of labelled data, this thesis focuses on the development and implementation of a CNN model for optical flow estimation in an unsupervised setting. For this purpose, a CNN model will be trained using an unsupervised learning method with the help of proxy ground-truth to aid in the learning process. To guide the learning process, existing and new error functions will be studied and implemented to obtain robust and accurate optical flow. The developed model will be trained on the Sintel, FlyingChairs, and KITTI datasets for flow estimation. The developed model's performance will be evaluated and compared against state-of-the-art optical flow neural network models on the above datasets. } } @mastersthesis { Ilt2021, author = { Richard Iltner }, title = { Direkte Bestimmung lokaler Konturmerkmale mit Convolutional Neural Networks }, month = { January }, year = { 2021 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Ein h{"a}ufig verwendeter Vorverarbeitungsschritt zur Objekterkennung ist die Bestimmung informativer Bildmerkmale. Bei Verdeckungen und Deformationen von Objekten sind lokale Merkmale oft besser geeignet als globale. Zur Bestimmung dieser Merkmale existieren ausgereifte Algorithmen wie SIFT (Scale-Invariant Feature Transform), wobei den detektierten Schl{"u}sselpunkten eine charakteristische Skalierung und eine (oder mehrere) charakteristische Orientierung(en) zugeordnet werden. In Anlehnung an SIFT wird im GET Lab ein Verfahren zur Bestimmung lokaler Konturmerkmale f{"u}r diskrete Konturen entwickelt. F{"u}r die Bestimmung dieser Merkmale sind mehrere Vorverarbeitungsschritte erforderlich. Insbesondere werden eindeutige und semantisch zusammenh{"a}ngende Konturelemente ben{"o}tigt, f{"u}r die qualitativ hochwertige Kantenbilder erforderlich sind. Die Bestimmung solcher Bilder ist bisher kaum l{"o}sbar. Die Kanten stellen idealerweise alle wesentlichen Objektkonturen dar, sind unterbrechungsfrei und weisen eine Breite von einem Pixel auf. Damit k{"o}nnen dann Schl{"u}sselpunkte als Kr{"u}mmungsextrema der Konturen auf Basis einer Skalenraumanalyse bestimmt werden und somit wird ihnen eine charakteristische Skalierung zugeordnet. Im Bereich der Objekterkennung haben sich Deep Learning Verfahren bew{"a}hrt, weil mit ihnen komplexe Bildstrukturen erfasst und mit abstrakten Merkmalen repr{"a}sentiert werden k{"o}nnen. Daher wird im Rahmen dieser Masterarbeit eine direkte Detektion der Merkmale mit Hilfe von Convolutional Neural Networks als geeignete Alternative untersucht, so dass die Bestimmung der Kantenbilder entf{"a}llt. } } @mastersthesis { Jai2021, author = { Satish Jai }, title = { Autonomous Assembly of Pre-Defined Structures with a Robotic Arm Using Known Objects Detected in the Environment }, month = { June }, year = { 2021 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { The ability to assemble objects autonomously has a wide range of applications in the field of robotics. From performing industrial pick and place tasks to building a support structure during a disaster rescue operation. This thesis aimed at developing an autonomous system that can build predefined structures from simple and regular shaped objects. The system mainly consists of three subsystems: detection, arm control, and control unit. The detection system processes the 3D input data from a depth sensor camera to identify the objects of interest. Detection involves sub-processes like extraction of meaningful clusters, determining the 6D pose, and classification of the objects. The information of identified objects is further processed by a control unit. The control unit acts as a link between detection and arm control and is involved in assembly planning. The assembly planning is done using the information of the detected objects along with a construction plan which provides details of the objects that have to be used for a particular assembly. The assembly plan is executed through a state machine that communicates with the arm and takes control of different states of the assembly process. The current implementation integrates a real and a simulated arm. The simulated arm was used to perform the initial tests of the assembly system in a Gazebo simulation environment. The real-world evaluation and tests were done using a real RVM1 arm. The evaluation of the entire assembly system is done by verifying the detected object’s pose and by determining the correctness of object’s pose in an assembled structure. } } @mastersthesis { Nayak2021, author = { Anshuman Nayak }, title = { A Hierarchical Approach to Enhance Accuracy of Place Recognition }, month = { June }, year = { 2021 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { An intelligent autonomous navigation system must be able to accurately locate itself and recognize already visited places (loop-closure detection). In this regard, recent researches have put a lot of emphasis on appearance-based mapping. Kazmi et al. (2019) proposed a novel method which combines modified growing self-organizing maps (GSOM) with Bayesian framework to learn the representation of the environment incrementally using gist features. Gist features are global descriptor which are a single vector representation of an image. Images which are nearby and share similar global features are associated to a single entity called neuron. However, due to perceptual ambiguity, places which are far apart may appear similar and are mapped to a single neuron causing false detection of loop closure. In this master’s thesis, we aim to build upon Kazmi et al. (2019) and improve the accuracy of place recognition (loop-closure) by using a hierarchical approach. To this end, local features in images have been used to retrieve the best match from a set of places mapped to a neuron by using geometric validation. A set of methods are formulated to improve accuracy using local descriptors and robust error estimation methods and data sets are used to evaluate the system’s performance. The robustness of the proposed approach, in presence of perceptual ambiguity, is demonstrated by experimental results, where different evaluation measures like RANSAC, etc. are used. Various feature detector-descriptor combination are evaluated to find efficient ways of minimizing false detection. } } @mastersthesis { Neu2021, author = { Patrick Neugebauer }, title = { Optische Bestimmung der Strohverteilparameter bei der Strohverteilung des M{"a}hdreschers }, month = { June }, year = { 2021 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Harvesting the grain or hulled crop by the combine harvester is already the first step for cultivating the subsequent crop. The straw of the threshing process can be used in two ways. Firstly, the straw can be collected and laid down as a swath. Secondly, the combine can chop the straw and distribute it across the entire width of the cutterbar by an active distributor. Due to increasing cutterbar width and reduced tillage after harvesting, the spreaders have to spread the straw wider across and ensure an even distribution. However, there is no solution to measure this automatically. This thesis develops a measurement technology to determine the straw distribution in the field. For this purpose, images are automatically processed using machine learning methods. Different models are evaluated and a suitable model is selected. The new approach will be evaluated on several harvest experiments from different harvest seasons and will be compared to currently used measurement techniques. } } @mastersthesis { Nut2020, author = { Jan-Niclas Nutt }, title = { Robuste Objekterkennung auf Basis invarianter Konturmerkmale }, month = { June }, year = { 2021 }, school = { Paderborn University }, type = { Bachelor's thesis }, abstract = { Zur Objekterkennung ist die Nutzung von lokalen und globalen Bildmerkmalen g{"a}ngige Praxis. Dabei liefern lokale Bildmerkmale pr{"a}zisere Informationen zu einzelnen Bildelementen und sind vor allem robust gegen{"u}ber partieller Verdeckung. Ausgangspunkt dieser Arbeit ist ein neues Verfahren, dass skalierungs- und rotationsinvariante Merkmale in Form von lokalen Kontursegmenten extrahiert. Da Kontursegmente h{"a}ufig eine gewisse {"A}hnlichkeit aufweisen, ist die Verwendung geometrischer Informationen besonders wichtig. Der Detailgrad der Objektbeschreibung und die Laufzeit der Objekterkennung stehen in einem Kompromiss zueinander, der durch die Komplexit{"a}t der modellierten Beziehungen bestimmt wird. Im Rahmen dieser Arbeit wird ein Verfahren ausgew{"a}hlt, umgesetzt und getestet, das Objekte auf Basis der invarianten Konturmerkmale klassifiziert. Die Tests beziehen sich dabei speziell auf den MPEG-7 Datensatz und zeigen, dass der Erfolg des Verfahrens von der jeweiligen Klasse abh{"a}ngt. } } @mastersthesis { Shi2020, author = { Nikhitha Shivaswamy }, title = { Energy Optimization vs. Deep Learning: Segmentation of Underwater Robot Imagery }, month = { March }, year = { 2021 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Image segmentation of underwater images is an important and yet challenging field of research technique for machine vision. Due to the underwater environment, the segmentation of these images is more complex than that of common images. Our dataset consists of such deep-sea images of hydrothermal vent, black smokers, captured from multiple viewing angles. This master thesis explores novel Image Segmentation techniques to segment the black smoker images. Initially, we highlight the preparation of the dataset using the estimated depth maps of the images. Later, we discuss the use and modifications of convolutional neural networks which gives us the segmented mask of the images. These masks could be used to generate better and precise 3D model. } } @mastersthesis { Soh2020, author = { Asjad Sohail }, title = { Autonomous Robot Navigation in a Real Environment Using Reinforcement Learning }, month = { March }, year = { 2021 }, school = { Paderborn University }, type = { Master's thesis }, abstract = { Autonomous robots are getting increasingly popular these days because of the applications they have from cleaning floor of a house to transporting goods from one place to another. In such environments, obstacles can cause damage to the robot and can also cause interrupt the robot from performing an operation. A robot should be able to navigate in an unknown environment by avoiding collisions with the obstacles. In this thesis, an algorithm for robot navigation in cluttered environment will be developed. Camera and LIDAR will be used to acquire the distance with obstacles which will help the robot to avoid collision. The problem of an unknown environment can be solved through Reinforcement Learning algorithms. The performance of the algorithm will be evaluated on how much time it takes before the first collision, using the robot in GET Lab. } }