@mastersthesis { Avekar2023, author = { Pranav Avekar }, title = { Automated Planning for Behavior Control of Autonomous Rescue Robots }, month = { March }, year = { 2024 }, school = { Paderborn University }, type = { Master's Thesis }, abstract = { Rescue robots working in unknown environments should have a robust behavior control system to effectively deal with unforeseen events using the available resources and capabilities of the robot. Traditionally, finite state machines are utilized to construct behavior control systems. It is difficult to develop a complex yet flexible behavior control system with FSMs. However, automated planner-based systems can overcome the drawbacks of FSMs and ease the development of flexible behavior control systems. In this thesis, a behavior control system for autonomous robots was developed based on the Problem Domain Definition Language (PDDL) and the PlanSys2 framework. The developed system efficiently addresses newly occurring changes in the working environment by generating new plans as required. Furthermore, the behavior control system includes a graphical user interface, enhancing user interaction and facilitating the visualization of ongoing operations. } } @mastersthesis { Bunse2023, author = { Florian Bunse }, title = { Analyse von Verfahren zur Segmentierung von Videodaten }, month = { January }, year = { 2024 }, school = { Paderborn University }, type = { Bachelor's Thesis } } @mastersthesis { Joshi2023, author = { Yaznik Joshi }, title = { Image Classification using Deep Learning and Attention Mechanisms }, month = { March }, year = { 2024 }, school = { Paderborn University }, type = { Master's Thesis }, abstract = { This thesis aims to explore the role of different attention mechanisms in Vision Transformers to perform image classification as means of a pre-training to acquire rich visual features that can be used for other downstream tasks where annotated data may be scarce. One of the main objectives is developing an efficient Vision Transformer model for fast inference on resource-limited devices such as mobile rescue robots. To this end, the image classification will be done on disaster image datasets that are derived from the Incidents1M dataset. Based on the literature research, Vision Transformers such as SwiftFormer, MobileViTv2, and CrossFormer are selected as initial architectures for implementation. Their performance will be compared and evaluated in terms of accuracy, training time, model size, and computation cost. This evaluation will then be used to select an architecture for further evaluation and optimization. Furthermore, a proposed Transpose Additive Attention mechanism is integrated into the SwiftFormer architecture and, is experimented and analyzed with different datasets and configurations. The possible optimizations will be explored by compression of the attention mechanism in SwiftFormer and the proposed TAAFormer architectures to improve resource utilization and performance. In addition, visualizations of the attention maps will be created to provide interpretability of the architecture. } } @mastersthesis { Krue2023, author = { Kai Timo Kr{"u}ger }, title = { Deep Learning-based SIFT-like Keypoint Detection for Object Contours in Real Images }, month = { July }, year = { 2024 }, school = { Paderborn University }, type = { Master's Thesis }, abstract = { The objective of this work is to develop a Convolutional Neural Network (CNN)-based keypoint detection approach for object contours in real images. A hand-crafted detection method has been developed at GET Lab but requires given object contours. Like Scale-Invariant Feature Transform (SIFT), a scale-space analysis is performed to obtain scale and rotation-invariant keypoints and their characteristic scales. Another work at GET Lab developed a contour extraction method based on image segmentation to extract the object contours from real images. However, the sequential application of image segmentation and keypoint detection adds considerable difficulty and complexity to the overall process, thereby impeding its real-time capability. A CNN-based approach is investigated to address this drawback and aim for a better generalization. Examples of existing CNN-based keypoint detection methods are Key.Net and SobelNet, which are not only real-time capable but also achieve comparable results to traditional keypoint detection methods. However, they are not contour-based. In the context of this thesis, the entire process of contour-based keypoint extraction is replaced by a CNN-based approach. The approach detects keypoints’ scale and position. A U-net-based architecture is trained using the weighted binary cross-entropy loss. It achieves a detection rate of 6.8 % with a precision of 43.58 %. In addition, the proposed network is executable as a single unit and real-time capable. The CNN is trained in a supervised manner using images from the Segment Anything 1 Billion (SA-1B) dataset. The ground truth data, consisting of keypoint positions and their characteristic scales, is generated using the detection method from GET Lab. } } @mastersthesis { Kaliath2023, author = { Rensi Raghunath Nadakkakath Kaliath }, title = { Improving Robot Dexterity with a Hybrid Visual Servoing Approach }, month = { September }, year = { 2024 }, school = { Paderborn University }, type = { Master's Thesis }, abstract = { In the field of robotics, handling objects and interacting with the environment is important for a wide range of applications. The current setup at GET Lab for object handling tasks uses a camera mounted on the end-effector along with a closed loop control approach. The model-based tracking system fails as the end-effector gets closer to the object and the target object is not completely visible. This thesis tries to overcome this limitation by extending the current method so that tracking is possible until the end-effector can interact with the object. A solution with a fixed camera setup and a hybrid control approach integrating Image Based Visual Servoing (IBVS) into the current system is developed. Finally, the system is evaluated for its ability in performing dexterity tasks such as touch, inspect and grasp. } } @mastersthesis { Nietfeld2024, author = { Oliver Nietfeld }, title = { Verfolgung von Objekten auf Basis Kontur-basierter Interest Points }, month = { March }, year = { 2024 }, school = { Paderborn University }, type = { Master's Thesis } } @mastersthesis { Okonkwo2023, author = { Frankline Okonkwo }, title = { Energy management system for a rescue robot platform }, month = { January }, year = { 2024 }, school = { Paderborn University }, type = { Master's Thesis } } @mastersthesis { Schmidt2024, author = { Gerhardt Schmidt }, title = { Matching Local Contour-based Image Features in Real Images using Deep Descriptors }, month = { March }, year = { 2024 }, school = { Paderborn University }, type = { Master's Thesis } }