@inproceedings{sprenger-spi2021,
Author = {J. Sprenger and M. Neidhardt and M. Schlüter and S. Latus and T. Gosau and J. Kemmling and S. Feldhaus and U. Schumacher and A. Schlaefer},
Title = {In-vivo markerless motion detection from volumetric optical coherence tomography data using CNNs.},
Year = {(2021).},
Volume = {<strong>11598</strong>.},
Pages = {345 - 350},
Editor = {In Cristian A. Linte and Jeffrey H. Siewerdsen (Eds.)},
Publisher = {SPIE:},
Booktitle = {<em>Medical Imaging 2021: Image-Guided Procedures, Robotic Interventions, and Modeling</em>},
Organization = {International Society for Optics and Photonics},
Doi = {10.1117/12.2581023},
Url = {https://doi.org/10.1117/12.2581023},
Keywords = {deep learning, optical coherence tomography, markerless motion detection},
Abstract = {Precise navigation is an important task in robot-assisted and minimally invasive surgery. The need for optical markers and a lack of distinct anatomical features on skin or organs complicate tissue tracking with commercial tracking systems. Previous work has shown the feasibility of a 3D optical coherence tomography based system for this purpose. Furthermore, convolutional neural networks have been proven to precisely detect shifts between volumes. However, most experiments have been performed with phantoms or ex-vivo tissue. We introduce an experimental setup and perform measurements on perfused and non-perfused (dead) tissue of in-vivo xenograft tumors. We train 3D siamese deep learning models and evaluate the precision of the motion prediction. The network\'s ability to predict shifts for different motion magnitudes and also the performance for the different volume axes are compared. The root-mean-square errors are 0:12mm and 0:08mm on perfused and non-perfused tumor tissue, respectively}
}

@COMMENT{Bibtex file generated on 2026-5-15 with typo3 si_bibtex plugin. Data from https://www.tuhh.de/mtec/publications/2024-2020 }