% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Bhandary:276108,
      author       = {S. Bhandary and D. Kuhn$^*$ and Z. Babaiee and T.
                      Fechter$^*$ and M. Benndorf and C. Zamboglou$^*$ and A.-L.
                      Grosu$^*$ and R. Grosu},
      title        = {{I}nvestigation and benchmarking of {U}-{N}ets on prostate
                      segmentation tasks.},
      journal      = {Computerized medical imaging and graphics},
      volume       = {107},
      issn         = {0895-6111},
      address      = {Amsterdam [u.a.]},
      publisher    = {Elsevier Science},
      reportid     = {DKFZ-2023-01008},
      pages        = {102241},
      year         = {2023},
      abstract     = {In healthcare, a growing number of physicians and support
                      staff are striving to facilitate personalized radiotherapy
                      regimens for patients with prostate cancer. This is because
                      individual patient biology is unique, and employing a single
                      approach for all is inefficient. A crucial step for
                      customizing radiotherapy planning and gaining fundamental
                      information about the disease, is the identification and
                      delineation of targeted structures. However, accurate
                      biomedical image segmentation is time-consuming, requires
                      considerable experience and is prone to observer
                      variability. In the past decade, the use of deep learning
                      models has significantly increased in the field of medical
                      image segmentation. At present, a vast number of anatomical
                      structures can be demarcated on a clinician's level with
                      deep learning models. These models would not only unload
                      work, but they can offer unbiased characterization of the
                      disease. The main architectures used in segmentation are the
                      U-Net and its variants, that exhibit outstanding
                      performances. However, reproducing results or directly
                      comparing methods is often limited by closed source of data
                      and the large heterogeneity among medical images. With this
                      in mind, our intention is to provide a reliable source for
                      assessing deep learning models. As an example, we chose the
                      challenging task of delineating the prostate gland in
                      multi-modal images. First, this paper provides a
                      comprehensive review of current state-of-the-art
                      convolutional neural networks for 3D prostate segmentation.
                      Second, utilizing public and in-house CT and MR datasets of
                      varying properties, we created a framework for an objective
                      comparison of automatic prostate segmentation algorithms.
                      The framework was used for rigorous evaluations of the
                      models, highlighting their strengths and weaknesses.},
      subtyp        = {Review Article},
      keywords     = {Automatic prostate segmentation (Other) / Comparison
                      framework (Other) / Medical imaging (Other) / U-net
                      variations (Other)},
      cin          = {FR01},
      ddc          = {610},
      cid          = {I:(DE-He78)FR01-20160331},
      pnm          = {899 - ohne Topic (POF4-899)},
      pid          = {G:(DE-HGF)POF4-899},
      typ          = {PUB:(DE-HGF)16},
      pubmed       = {pmid:37201475},
      doi          = {10.1016/j.compmedimag.2023.102241},
      url          = {https://inrepo02.dkfz.de/record/276108},
}