% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Konnerth:304593,
author = {D. Konnerth and A. Altay-Langguth and D.-C. Dehelean and S.
H. Maier and M. Pazos and P. Rogowski and S. Schönecker and
C. Eze and S. Corradini and C. Belka and S. N.
Marschner$^*$},
title = {{CHAT}-{RT} study: {C}hat{GPT} in radiation oncology-a
survey on usage, perception, and impact among {DEGRO}
members.},
journal = {Radiation oncology},
volume = {20},
number = {1},
issn = {1748-717X},
address = {London},
publisher = {BioMed Central},
reportid = {DKFZ-2025-01916},
pages = {140},
year = {2025},
abstract = {Radiation oncology is increasingly turning to Artificial
Intelligence (AI) - and in particular Chat Generative
pre-trained transformer (ChatGPT) - for decision support,
patient education, and workflow efficiency. Despite
promising gains, questions about accuracy, General Data
Protection Regulation (GDPR)-compliance and ethical use
persist, especially in high-stakes cancer care. To clarify
real-world attitudes and practices, we surveyed members of
the German Society of Radiation Oncology (DEGRO) on their
use, perceptions, and concerns regarding ChatGPT across
clinical, research, communication, and administrative
tasks.An anonymous online survey was implemented via
LimeSurvey platform and distributed to all members of the
DEGRO in Germany, Austria, and Switzerland between April and
June 2024. The 40-item questionnaire-covering demographics,
radiotherapy experience, and ChatGPT's clinical, research,
communication, and administrative applications-was developed
through a narrative literature review, ChatGPT-assisted
drafting, back-translation, expert validation, and pilot
testing. Fully completed responses were used for descriptive
statistics and analysis.Of 213 respondents, 159 fully
completed the survey. Participants were predominantly based
in Germany $(92.5\%),$ worked in university hospitals
$(74.2\%),$ and identified as radiation oncologists
$(54.7\%),$ with a broad range of radiotherapy experience (<
1 year: $7.5\%;$ >15 years: $24.5\%).$ Awareness of ChatGPT
was high $(94.9\%),$ yet actual use varied: $32.1\%$ never
used it, while $35.2\%$ employed it regularly for
administrative tasks and $30.2\%$ for manuscript drafting.
Mid-career clinicians (6-10 years' experience) showed the
greatest $enthusiasm-44\%$ agreed it saves time and $72\%$
planned further integration-though all career stages
$(71.7\%$ overall) expressed strong interest in formal
training. Satisfaction was highest for administrative
$(94.6\%)$ and manuscript support $(91.7\%)$ but lower for
technical queries $(66.7\%).$ Major concerns included
misinformation $(69.2\%),$ erosion of critical thinking
$(57.9\%),$ and data-privacy risks $(57.2\%).Our$ survey
demonstrates high awareness and adoption of ChatGPT for
administrative and educational tasks, alongside more
cautious use in clinical decision-making. Widespread
concerns about misinformation, critical-thinking erosion,
and data privacy-especially among early- and mid-career
clinicians-underscore the need for targeted AI training,
rigorous validation, and transparent governance to ensure
safe, effective integration into patient care.},
keywords = {CHAT-GPT (Other) / DEGRO (Other) / LLM (Other) /
Questionnaire (Other) / Radiation oncology (Other)},
cin = {MU01},
ddc = {610},
cid = {I:(DE-He78)MU01-20160331},
pnm = {899 - ohne Topic (POF4-899)},
pid = {G:(DE-HGF)POF4-899},
typ = {PUB:(DE-HGF)16},
pubmed = {pmid:40954471},
doi = {10.1186/s13014-025-02721-9},
url = {https://inrepo02.dkfz.de/record/304593},
}