@inproceedings{TriputenGopalWeberetal.2018, author = {Triputen, Sergey and Gopal, Atmaraaj and Weber, Thomas and H{\"o}fert, Christian and R{\"a}tsch, Matthias and Schreve, Kristiaan}, title = {Methodology to analyze the accuracy of 3D objects reconstructed with collaborative robot based monocular LSD-SLAM}, series = {Advancing the frontier of intelligent autonomous systems : proceedings 2018 International Conference on Intelligent Autonomous Systems (ICoIAS 2018), March 1-3, 2018, Singapore}, booktitle = {Advancing the frontier of intelligent autonomous systems : proceedings 2018 International Conference on Intelligent Autonomous Systems (ICoIAS 2018), March 1-3, 2018, Singapore}, publisher = {IEEE}, address = {Piscataway, NJ}, isbn = {978-1-5386-6331-8}, doi = {10.1109/ICoIAS.2018.8494109}, pages = {185 -- 190}, year = {2018}, abstract = {SLAM systems are mainly applied for robot navigation while research on feasibility for motion planning with SLAM for tasks like bin-picking, is scarce. Accurate 3D reconstruction of objects and environments is important for planning motion and computing optimal gripper pose to grasp objects. In this work, we propose the methods to analyze the accuracy of a 3D environment reconstructed using a LSD-SLAM system with a monocular camera mounted onto the gripper of a collaborative robot. We discuss and propose a solution to the pose space conversion problem. Finally, we present several criteria to analyze the 3D reconstruction accuracy. These could be used as guidelines to improve the accuracy of 3D reconstructions with monocular LSD-SLAM and other SLAM based solutions.}, language = {en} } @inproceedings{WeberTriputenGopaletal.2019, author = {Weber, Thomas and Triputen, Sergey and Gopal, Atmaraaj and Eißler, Steffen and H{\"o}fert, Christian and Schreve, Kristiaan and R{\"a}tsch, Matthias}, title = {Efficient and robust 3D object reconstruction based on monocular SLAM and CNN semantic segmentation}, series = {RoboCup 2019: Robot World Cup XXIII. - (Lecture notes in artificial intelligence ; 11531)}, booktitle = {RoboCup 2019: Robot World Cup XXIII. - (Lecture notes in artificial intelligence ; 11531)}, editor = {Chalup, Stephan}, publisher = {Springer}, address = {Cham}, doi = {10.1007/978-3-030-35699-6_27}, pages = {351 -- 363}, year = {2019}, abstract = {Various applications implement slam technology, especially in the field of robot navigation. We show the advantage of slam technology for independent 3d object reconstruction. To receive a point cloud of every object of interest void of its environment, we leverage deep learning. We utilize recent cnn deep learning research for accurate semantic segmentation of objects. In this work, we propose two fusion methods for cnn-based semantic segmentation and slam for the 3d reconstruction of objects of interest in order to obtain a more robustness and efficiency. As a major novelty, we introduce a cnn-based masking to focus slam only on feature points belonging to every single object. Noisy, complex or even non-rigid features in the background are filtered out, improving the estimation of the camera pose and the 3d point cloud of each object. Our experiments are constrained to the reconstruction of industrial objects. We present an analysis of the accuracy and performance of each method and compare the two methods describing their pros and cons.}, language = {en} }