<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<!DOCTYPE GmsArticle SYSTEM "http://www.egms.de/dtd/2.0.34/GmsArticle.dtd">
<GmsArticle xmlns:xlink="http://www.w3.org/1999/xlink">
  <MetaData>
    <Identifier>zma001702</Identifier>
    <IdentifierDoi>10.3205/zma001702</IdentifierDoi>
    <IdentifierUrn>urn:nbn:de:0183-zma0017020</IdentifierUrn>
    <ArticleType language="en">commentary</ArticleType>
    <ArticleType language="de">Kommentar</ArticleType>
    <TitleGroup>
      <Title language="en">Legal aspects of generative artificial intelligence and large language models in examinations and theses</Title>
      <TitleTranslated language="de">Rechtliche Aspekte zu generativer K&#252;nstlicher Intelligenz und Large-Language-Modellen in Pr&#252;fungen und Abschlussarbeiten </TitleTranslated>
    </TitleGroup>
    <CreatorList>
      <Creator>
        <PersonNames>
          <Lastname>M&#228;rz</Lastname>
          <LastnameHeading>M&#228;rz</LastnameHeading>
          <Firstname>Maren</Firstname>
          <Initials>Maren</Initials>
        </PersonNames>
        <Address language="en">Charit&#233; &#8211; University Medicine Berlin, AG Progress Test Medicine, Teaching Division, Charit&#233;platz 1, D-10117 Berlin, Germany, Phone: &#43;49 (0)30&#47;450-576047<Affiliation>Charit&#233; &#8211; University Medicine Berlin, AG Progress Test Medicine, Teaching Division, Berlin, Germany</Affiliation></Address>
        <Address language="de">Charit&#233; &#8211; Universit&#228;tsmedizin Berlin, AG Progress Test Medizin, Gesch&#228;ftsbereich Lehre, Charit&#233;platz 1, 10117 Berlin, Deutschland, Tel.: &#43;49 (0)30&#47;450-576047<Affiliation>Charit&#233; &#8211; Universit&#228;tsmedizin Berlin, AG Progress Test Medizin, Gesch&#228;ftsbereich Lehre, Berlin, Deutschland</Affiliation></Address>
        <Email>maren.maerz&#64;charite.de</Email>
        <Creatorrole corresponding="yes" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Himmelbauer</Lastname>
          <LastnameHeading>Himmelbauer</LastnameHeading>
          <Firstname>Monika</Firstname>
          <Initials>M</Initials>
        </PersonNames>
        <Address language="en">
          <Affiliation>Medical University of Vienna, Teaching Centre, Vienna, Austria</Affiliation>
        </Address>
        <Address language="de">
          <Affiliation>Medizinische Universit&#228;t Wien, Teaching Center, Wien, &#214;sterreich</Affiliation>
        </Address>
        <Email>monika.himmelbauer&#64;meduniwien.ac.at</Email>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Boldt</Lastname>
          <LastnameHeading>Boldt</LastnameHeading>
          <Firstname>Kevin</Firstname>
          <Initials>K</Initials>
        </PersonNames>
        <Address language="en">
          <Affiliation>The State Commissioner for Data Protection and Freedom of Information Rhineland-Palatinate, Mainz, Germany</Affiliation>
        </Address>
        <Address language="de">
          <Affiliation>Der Landesbeauftragte f&#252;r den Datenschutz und die Informationsfreiheit Rheinland-Pfalz, Mainz, Deutschland</Affiliation>
        </Address>
        <Email>K.Boldt&#64;datenschutz.rlp.de</Email>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Oksche</Lastname>
          <LastnameHeading>Oksche</LastnameHeading>
          <Firstname>Alexander</Firstname>
          <Initials>A</Initials>
        </PersonNames>
        <Address language="en">
          <Affiliation>Institut f&#252;r medizinische und pharmazeutische Pr&#252;fungsfragen (IMPP), Mainz, Germany</Affiliation>
          <Affiliation>Justus Liebig University Giessen, Rudolf Buchheim Institute for Pharmacology, Giessen, Germany</Affiliation>
        </Address>
        <Address language="de">
          <Affiliation>Institut f&#252;r medizinische und pharmazeutische Pr&#252;fungsfragen (IMPP), Mainz, Deutschland</Affiliation>
          <Affiliation>Justus-Liebig-Universit&#228;t Giessen, Rudolf-Buchheim-Institut f&#252;r Pharmakologie, Giessen, Deutschland</Affiliation>
        </Address>
        <Email>aoksche&#64;impp.de</Email>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
    </CreatorList>
    <PublisherList>
      <Publisher>
        <Corporation>
          <Corporatename>German Medical Science GMS Publishing House</Corporatename>
        </Corporation>
        <Address>D&#252;sseldorf</Address>
      </Publisher>
    </PublisherList>
    <SubjectGroup>
      <SubjectheadingDDB>610</SubjectheadingDDB>
      <Keyword language="en">assessment</Keyword>
      <Keyword language="en">AI</Keyword>
      <Keyword language="en">large language models</Keyword>
      <Keyword language="en">legal framework</Keyword>
      <Keyword language="de">Pr&#252;fungen</Keyword>
      <Keyword language="de">KI</Keyword>
      <Keyword language="de">gro&#223;e Sprachmodelle</Keyword>
      <Keyword language="de">rechtliche Rahmenbedingungen</Keyword>
      <SectionHeading language="en">generative AI</SectionHeading>
      <SectionHeading language="de">generative KI</SectionHeading>
    </SubjectGroup>
    <DateReceived>20240411</DateReceived>
    <DateRevised>20240701</DateRevised>
    <DateAccepted>20240709</DateAccepted>
    <DatePublishedList>
      
    <DatePublished>20240916</DatePublished><DateRepublished>20240920</DateRepublished></DatePublishedList>
    <Language>engl</Language>
    <LanguageTranslation>germ</LanguageTranslation>
    <License license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
      <AltText language="en">This is an Open Access article distributed under the terms of the Creative Commons Attribution 4.0 License.</AltText>
      <AltText language="de">Dieser Artikel ist ein Open-Access-Artikel und steht unter den Lizenzbedingungen der Creative Commons Attribution 4.0 License (Namensnennung).</AltText>
    </License>
    <SourceGroup>
      <Journal>
        <ISSN>2366-5017</ISSN>
        <Volume>41</Volume>
        <Issue>4</Issue>
        <JournalTitle>GMS Journal for Medical Education</JournalTitle>
        <JournalTitleAbbr>GMS J Med Educ</JournalTitleAbbr>
      </Journal>
    </SourceGroup>
    <ArticleNo>47</ArticleNo>
    <Correction><DateLastCorrection>20240920</DateLastCorrection>Under Notes: Word corrected, correct English</Correction>
  </MetaData>
  <OrigData>
    <Abstract language="de" linked="yes"><Pgraph>Die hohe Leistungsf&#228;higkeit von generativer K&#252;nstlicher Intelligenz (KI) und gro&#223;en Sprachmodellen (LLM) in Pr&#252;fungskontexten hat eine intensive Debatte &#252;ber ihre Anwendungen, Auswirkungen und Risiken ausgel&#246;st. Welche rechtlichen Aspekte sind beim Einsatz von LLM in Lehre und Pr&#252;fungen zu ber&#252;cksichtigen&#63; Welche Chancen bieten Sprachmodelle&#63; </Pgraph><Pgraph>F&#252;r die rechtliche Bewertung des Einsatzes von LLM finden Satzungen und Gesetze Anwendung:</Pgraph><Pgraph>&#8211; Universit&#228;re Satzungen, Hochschulgesetze der L&#228;nder, Approbationsordnung f&#252;r &#196;rzte</Pgraph><Pgraph>&#8211; Urheberrechtsgesetz (UrhG)</Pgraph><Pgraph>&#8211; Datenschutz-Grundverordnung (DS-GVO)</Pgraph><Pgraph>&#8211; KI-Verordnung (KI-VO)</Pgraph><Pgraph>LLM und KI bieten Chancen, erfordern aber klare universit&#228;re Rahmenbedingungen. Diese sollten den legitimen Einsatz und die Bereiche, in denen die Nutzung untersagt ist, definieren. T&#228;uschungen und Plagiate versto&#223;en gegen die wissenschaftliche Praxis und das UrhG. Eine T&#228;uschung ist schwer nachzuweisen. Plagiate durch KI sind m&#246;glich. Nutzer&#42;innen der Produkte sind in der Verantwortung.</Pgraph><Pgraph>LLM sind effektive Tools zur Generierung von Pr&#252;fungsfragen. Dennoch ist ein sorgf&#228;ltiges Review notwendig, da selbst qualitativ hochwertig scheinende Produkte Fehler enthalten k&#246;nnen. Das Risiko von Urheberrechtsverletzungen bei KI-generierten Pr&#252;fungsaufgaben ist hingegen gering, da das Urheberrecht den Einsatz gesch&#252;tzter Werke f&#252;r Lehre und Pr&#252;fungen bis zu 15&#37; erlaubt.</Pgraph><Pgraph>Die Bewertung von Pr&#252;fungsinhalten unterliegt Hochschulgesetzen und -ordnungen und der DSGVO. Eine ausschlie&#223;lich computergest&#252;tzte Bewertung ohne menschliche &#220;berpr&#252;fung ist nicht zul&#228;ssig. F&#252;r Hochrisiko-Anwendungen in der beruflichen Lehre findet k&#252;nftig die KI-VO der EU Anwendung.</Pgraph><Pgraph>Im Umgang mit LLM in Pr&#252;fungen k&#246;nnen Bewertungskriterien bestehender Pr&#252;fungen angepasst werden, aber auch Pr&#252;fungsprogramme, z.B. um die T&#228;uschungsmotivation zu reduzieren. LLM k&#246;nnen zudem selbst Gegenstand der Pr&#252;fung werden. Lehrende sollten sich in KI weiterbilden und LLM als Erg&#228;nzung betrachten.</Pgraph></Abstract>
    <Abstract language="en" linked="yes"><Pgraph>The high performance of generative artificial intelligence (AI) and large language models (LLM) in examination contexts has triggered an intense debate about their applications, effects and risks. What legal aspects need to be considered when using LLM in teaching and assessment&#63; What possibilities do language models offer&#63; </Pgraph><Pgraph>Statutes and laws are used to assess the use of LLM:</Pgraph><Pgraph>&#8211; University statutes, state higher education laws, licensing regulations for doctors</Pgraph><Pgraph>&#8211; Copyright Act (UrhG)</Pgraph><Pgraph>&#8211; General Data Protection Regulation (DGPR)</Pgraph><Pgraph>&#8211; AI Regulation (EU AI Act)</Pgraph><Pgraph>LLM and AI offer opportunities but require clear university frameworks. These should define legitimate uses and areas where use is prohibited. Cheating and plagiarism violate good scientific practice and copyright laws. Cheating is difficult to detect. Plagiarism by AI is possible. Users of the products are responsible. </Pgraph><Pgraph>LLM are effective tools for generating exam questions. Nevertheless, careful review is necessary as even apparently high-quality products may contain errors. However, the risk of copyright infringement with AI-generated exam questions is low, as copyright law allows up to 15&#37; of protected works to be used for teaching and exams. </Pgraph><Pgraph>The grading of exam content is subject to higher education laws and regulations and the GDPR. Exclusively computer-based assessment without human review is not permitted. For high-risk applications in education, the EU&#39;s AI Regulation will apply in the future. </Pgraph><Pgraph>When dealing with LLM in assessments, evaluation criteria for existing assessments can be adapted, as can assessment programmes, e.g. to reduce the motivation to cheat. LLM can also become the subject of the examination themselves. Teachers should undergo further training in AI and consider LLM as an addition.</Pgraph></Abstract>
    <TextBlock language="en" linked="yes" name="Introduction">
      <MainHeadline>Introduction</MainHeadline><Pgraph>Artificial Intelligence (AI) is one of the key technologies of the fourth industrial revolution, which has the potential to fundamentally change industries and societies through global networking, digitalisation and the merging of the physical, digital and biological worlds <TextLink reference="1"></TextLink>. </Pgraph><Pgraph>Generative Artificial Intelligence (GAI) such as Large Language Models (LLM) is reaching a level of maturity that will impact healthcare. It could soon contribute to medical practice and empower patients to systematically shape their healthcare <TextLink reference="2"></TextLink>, <TextLink reference="3"></TextLink>, <TextLink reference="4"></TextLink>, <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="7"></TextLink>, <TextLink reference="8"></TextLink>. The rapid development, adoption and use of AI technologies in healthcare requires healthcare professionals to master experimental techniques, even if they are not yet recognised as standard <TextLink reference="9"></TextLink>. </Pgraph><Pgraph>GAI uses deep learning for content creation. LLM process natural language. They generate human-like text based on statistical principles that calculate the probability of a word or character depending on the context <TextLink reference="10"></TextLink>, <TextLink reference="11"></TextLink>, <TextLink reference="12"></TextLink>, <TextLink reference="13"></TextLink>. Models such as ChatGPT are optimised for dialogue using reinforcement learning with human feedback (RLHF) <TextLink reference="14"></TextLink>, <TextLink reference="15"></TextLink>, <TextLink reference="16"></TextLink>. LLM are used for translation and content production, automating literature reviews, identifying relevant studies, extracting key findings <TextLink reference="17"></TextLink>, <TextLink reference="18"></TextLink>, facilitating information retrieval and knowledge discovery, and providing decision support <TextLink reference="19"></TextLink>, <TextLink reference="20"></TextLink>.  </Pgraph><Pgraph>LLM achieve remarkable exam results: ChatGPT passed the United States Medical Licensing Examination <TextLink reference="21"></TextLink> and outperformed most students on the German progress test medizin <TextLink reference="22"></TextLink>. LLM outperformed first and second year students on free-text clinical reasoning exams <TextLink reference="23"></TextLink>, scored 75&#37; on the open-ended ENT and head and neck surgery specialist exam <TextLink reference="24"></TextLink>, 83&#37; on a simulated 500-question written neurosurgery exam <TextLink reference="25"></TextLink>, and around 60&#37; on the European core cardiology exam <TextLink reference="26"></TextLink>. GPT-4 significantly outperformed previous models such as GPT-3 and GPT-3.5 in all areas analysed, demonstrating the rapid evolution of LLM <TextLink reference="23"></TextLink>, <TextLink reference="24"></TextLink>, <TextLink reference="25"></TextLink>, <TextLink reference="27"></TextLink>, <TextLink reference="28"></TextLink>. GPT-3 was in the bottom 10&#37; of US uniform bar examination graduates, while GPT-4 was in the top 10&#37; <TextLink reference="15"></TextLink>, <TextLink reference="27"></TextLink>.   </Pgraph><Pgraph>Additionally, there are challenges and limitations. The quality of the underlying training data can lead to discriminatory, unfair, and inaccurate content <TextLink reference="29"></TextLink>. Training data should be accurate, complete, up-to-date, representative, and free from historical bias; however, these characteristics are often not fully known and therefore difficult to assess <TextLink reference="29"></TextLink>, <TextLink reference="30"></TextLink>. In rapidly developing areas, data may also have limited public availability. LLM then generate plausible-sounding but incorrect answers, which are known as &#8220;hallucination&#8221; <TextLink reference="31"></TextLink>. Previous measures such as retrieval LLM (RAG) reduce erroneous results, but do not completely prevent them <TextLink reference="17"></TextLink>, <TextLink reference="32"></TextLink>, <TextLink reference="33"></TextLink>. Therefore, it is essential to subject the generated content to careful scrutiny <TextLink reference="20"></TextLink>, <TextLink reference="34"></TextLink>, <TextLink reference="35"></TextLink>. Another weakness is the lack of transparency in LLM decision-making processes. These limitations have prompted a comprehensive debate about the applications, effects, and risks associated with these technologies <TextLink reference="23"></TextLink>, <TextLink reference="36"></TextLink>, <TextLink reference="37"></TextLink>, <TextLink reference="38"></TextLink>, <TextLink reference="39"></TextLink>.   </Pgraph><Pgraph>The issue of examinations is particularly prominent, especially where examination systems are centred on written forms <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>. With the rise of online examinations, there is growing concern about academic misuse, fuelled by anonymity, lack of supervision and access to electronic texts <TextLink reference="42"></TextLink>, <TextLink reference="43"></TextLink>, and LLM exacerbates existing challenges <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>, particularly for written work such as assignments, bachelor or master theses and dissertations. This is a complex issue, not only in terms of content, but also from a legal perspective. The following aspects need to be considered:  </Pgraph><SubHeadline>1. University statutes, higher education laws of the federal states, German Research Foundation guidelines, licensing regulations for doctors (AO) </SubHeadline><Pgraph>Universities regulate examination requirements and procedures in study and&#47;or examination regulations. They contain provisions on failures, breaches of regulations, performance assessment and grading, and can regulate the use of aids and define the use of unauthorised aids as cheating <TextLink reference="44"></TextLink>. The AO (2002) leaves the decision on the consequences of violations of regulations or attempts to cheat in examinations to the discretion of the relevant state examination office (cf. &#167;&#167; 14 para. 5, 15 para. 6). </Pgraph><Pgraph>The DFG guidelines for <Mark2>safeguarding</Mark2> good scientific practice apply to all researchers engaged in projects funded by the German Research Foundation (DFG). Furthermore, these guidelines are intended for implementation by universities and research institutions in Germany, which are expected to incorporate them into their own regulations <TextLink reference="45"></TextLink>.  </Pgraph><SubHeadline>2. Copyright act (UrhG) </SubHeadline><Pgraph>The legal framework governing copyright is based on an EU directive that has been transposed into national legislation by each member state. It protects personal intellectual creations, as set forth in Section 2 (2) of the German Copyright Act (UrhG). An author is always a natural person, that is to say, a human being. This confers upon them the exclusive right to use their work. The extent to which AI-generated output is protected by copyright is contingent upon the degree to which the individual utilises the computer as a technical aid. (cf. Dreier&#47;Schulze&#47;Schulze UrhG Section 2 para. 8) <TextLink reference="41"></TextLink>, <TextLink reference="42"></TextLink>. </Pgraph><SubHeadline>3. General Data Protection Regulation (GDPR) </SubHeadline><Pgraph>The General Data Protection Regulation (GDPR) is directly applicable in all EU member states. It regulates the protection of personal data and the free movement of data, as well as protecting the fundamental rights and freedoms of natural persons. Data processing must be legally justified (Art. 1, Art. 5 para. 1 lit. A, 6 para. 1 GDPR). Individuals whose data is processed have certain rights, including the controller&#39;s obligation to provide information and the right to access that information (Art. 13, 14, 15 GDPR). Furthermore, the regulation applies to companies outside the EU that process the data of EU citizens, in accordance with the &#8220;marketplace principle&#8221; (Art. 3 para. 2 GDPR).  </Pgraph><SubHeadline>4. AI regulation (EU AI Act) </SubHeadline><Pgraph>The AI Regulation establishes a legal framework for trustworthy AI. Its objectives include ensuring security, transparency, traceability, non-discrimination, and environmental friendliness. It was adopted by the European Parliament on 13 March 2024 and will apply in all EU member states from 2026. AI systems are categorised into four risk classes: unacceptable risk (prohibited), high, low, and minimal risk. &#8220;General-purpose AI systems&#8221; (GPAI), which in principle include many LLM, are initially classified as limited risk and must fulfil transparency and documentation obligations and a copyright policy (Art. 52, 52c AI Regulation) <TextLink reference="46"></TextLink>. GPAI with systemic risk are subject to additional requirements and need to be registered <TextLink reference="47"></TextLink>, <TextLink reference="48"></TextLink>. In addition, high-risk AI systems must implement measures such as supervision, quality and risk management, extensive documentation, and rigorous data quality and system security standards <TextLink reference="49"></TextLink>.  </Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Einf&#252;hrung">
      <MainHeadline>Einf&#252;hrung</MainHeadline><Pgraph>K&#252;nstliche Intelligenz (KI) ist eine der Schl&#252;sseltechnologien der vierten industriellen Revolution, die  zur globalen Vernetzung, Digitalisierung und durch Verschmelzung der physischen, digitalen und biologischen Welt das Potenzial hat, Industrien und Gesellschaften grundlegend zu ver&#228;ndern <TextLink reference="1"></TextLink>.</Pgraph><Pgraph>Generative K&#252;nstliche Intelligenz (GKI) wie Large Language Models (LLM) erreicht einen Reifegrad, der das Gesundheitswesen beeinflussen wird. Sie k&#246;nnte bald zur medizinischen Praxis beitragen und Patient&#42;innen bef&#228;higen, ihre Gesundheitsversorgung systematisch mitzugestalten <TextLink reference="2"></TextLink>, <TextLink reference="3"></TextLink>, <TextLink reference="4"></TextLink>, <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="7"></TextLink>, <TextLink reference="8"></TextLink>. Die rasche Entwicklung, Einf&#252;hrung und Nutzung von KI-Technologien im Gesundheitswesen erfordert, dass das Gesundheitspersonal experimentelle Techniken beherrscht, auch wenn diese noch nicht als Standard anerkannt sind <TextLink reference="9"></TextLink>.</Pgraph><Pgraph>GKI nutzt Deep Learning zur Inhaltserstellung. LLM verarbeiten die nat&#252;rliche Sprache. Sie erzeugen  menschen&#228;hnliche Texte basierend auf statistischen Prinzipien, die die Wahrscheinlichkeit eines Wortes oder Zeichens in Abh&#228;ngigkeit vom Kontext berechnen <TextLink reference="10"></TextLink>, <TextLink reference="11"></TextLink>, <TextLink reference="12"></TextLink>, <TextLink reference="13"></TextLink>. Modelle wie ChatGPT werden durch Reinforcement Learning mit menschlichem Feedback (RLHF) f&#252;r den Dialog optimiert <TextLink reference="14"></TextLink>, <TextLink reference="15"></TextLink>, <TextLink reference="16"></TextLink>. LLM werden f&#252;r &#220;bersetzung und Inhaltsproduktion eingesetzt, automatisieren Literatur&#252;bersichten, identifizieren relevante Studien, extrahieren Schl&#252;sselergebnisse <TextLink reference="17"></TextLink>, <TextLink reference="18"></TextLink>, f&#246;rdern Informationsbeschaffung, Wissensentdeckung und bieten Entscheidungsunterst&#252;tzung <TextLink reference="19"></TextLink>, <TextLink reference="20"></TextLink>. </Pgraph><Pgraph>LLM erzielen beachtliche Pr&#252;fungsergebnisse: ChatGPT bestand die United States Medical Licensing Examination <TextLink reference="21"></TextLink> und &#252;bertraf die meisten Studierenden im deutschsprachigen Progress Test Medizin <TextLink reference="22"></TextLink>. LLM &#252;bertrafen die Leistungen der Studierenden im ersten und zweiten Studienjahr in Freitextpr&#252;fungen im klinischen Denken <TextLink reference="23"></TextLink>, erreichten in der Fach&#228;rzt&#42;innenpr&#252;fung f&#252;r HNO und Kopf-Hals-Chirurgie mit offenen Fragen 75&#37; <TextLink reference="24"></TextLink>, in einer simulierten schriftlichen neurochirurgischen Pr&#252;fung mit 500 Fragen 83&#37; <TextLink reference="25"></TextLink> und im Europ&#228;ischen Examen in Core Cardiology ca. 60&#37; <TextLink reference="26"></TextLink>. GPT-4 &#252;bertraf fr&#252;here Modelle wie GPT-3 und GPT-3.5 in allen untersuchten Bereichen deutlich und zeigt die rasante Entwicklung von LLM <TextLink reference="23"></TextLink>, <TextLink reference="24"></TextLink>, <TextLink reference="25"></TextLink>, <TextLink reference="27"></TextLink>, <TextLink reference="28"></TextLink>. GPT-3 lag in den unteren 10&#37; der Absolvent&#42;innen der Uniform Bar Examination in den USA, GPT-4 hingegen in den oberen 10&#37; <TextLink reference="15"></TextLink>, <TextLink reference="27"></TextLink>. </Pgraph><Pgraph>Es gibt auch Herausforderungen und Einschr&#228;nkungen: Umfang und Qualit&#228;t der zugrunde liegenden Trainingsdaten k&#246;nnen zu diskriminierenden, unfairen und falschen Inhalten f&#252;hren <TextLink reference="29"></TextLink>. Die Trainingsdaten sollten korrekt, vollst&#228;ndig, aktuell, repr&#228;sentativ und frei von historischen Verzerrungen sein, sind aber oft nicht vollst&#228;ndig bekannt und daher schwer zu beurteilen <TextLink reference="29"></TextLink>, <TextLink reference="30"></TextLink>. In sich schnell entwickelnden Bereichen k&#246;nnen Daten zudem nur begrenzt &#246;ffentlich verf&#252;gbar sein. LLM erzeugen dann plausibel klingende, aber inhaltlich falsche Antworten (&#8222;Halluzination&#8220;) <TextLink reference="31"></TextLink>. Bisherige Ma&#223;nahmen wie abfragende LLM (RAG) reduzieren fehlerhafte Ergebnisse, verhindern sie jedoch nicht vollst&#228;ndig <TextLink reference="17"></TextLink>, <TextLink reference="32"></TextLink>, <TextLink reference="33"></TextLink>. Eine sorgf&#228;ltige Pr&#252;fung der generierten Inhalte bleibt daher unverzichtbar <TextLink reference="20"></TextLink>, <TextLink reference="34"></TextLink>, <TextLink reference="35"></TextLink>. Eine weitere Schw&#228;che liegt in der mangelnden Transparenz der Entscheidungsprozesse von LLM. Diese Einschr&#228;nkungen haben eine breite Debatte &#252;ber Anwendungen, Auswirkungen und Risiken ausgel&#246;st <TextLink reference="23"></TextLink>, <TextLink reference="36"></TextLink>, <TextLink reference="37"></TextLink>, <TextLink reference="38"></TextLink>, <TextLink reference="39"></TextLink>. </Pgraph><Pgraph>Das Thema Pr&#252;fungen nimmt einen besonders prominenten Platz ein, insbesondere wenn die Pr&#252;fungssysteme auf schriftliche Formen ausgerichtet sind <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>. Mit der Zunahme von Online-Pr&#252;fungen w&#228;chst die Sorge vor akademischem Missbrauch, der durch Anonymit&#228;t, mangelnde Aufsicht und Zugang zu elektronischen Texten beg&#252;nstigt wird <TextLink reference="42"></TextLink>, <TextLink reference="43"></TextLink> und LLM verst&#228;rken die bereits bestehenden Herausforderungen <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>, insbesondere bei schriftlichen Arbeiten wie Hausarbeiten, Bachelor- oder Masterarbeiten sowie Dissertationen. Dies ist nicht nur inhaltlich, sondern auch rechtlich komplex. Folgende Aspekte sind zu ber&#252;cksichtigen:</Pgraph><SubHeadline>1. Universit&#228;re Satzungen, Hochschulgesetze der L&#228;nder, Leitlinien der Deutschen Forschungsgemeinschaft, Approbationsordnung f&#252;r &#196;rzte (AO)</SubHeadline><Pgraph>Hochschulen regeln Pr&#252;fungsanforderungen und -verfahren in Studien- und&#47;oder Pr&#252;fungsordnungen. Sie enthalten Bestimmungen &#252;ber Vers&#228;umnisse, Ordnungsverst&#246;&#223;e, Leistungsbewertung und Notenbildung und k&#246;nnen die Verwendung von Hilfsmitteln regeln und die Benutzung nicht zugelassener Hilfsmittel als T&#228;uschung definieren <TextLink reference="44"></TextLink>. Die AO (2002) legt die Entscheidung &#252;ber Konsequenzen von Ordnungsverst&#246;&#223;en oder Betrugsversuchen bei Pr&#252;fungen in das Ermessen des jeweils zust&#228;ndigen Landespr&#252;fungsamtes (vgl. &#167;&#167; 14 Abs. 5, 15 Abs. 6).</Pgraph><Pgraph>Die DFG-Leitlinien zur Sicherung guter wissenschaftlicher Praxis gelten f&#252;r alle Wissenschaftler&#42;innen, die an von der Deutschen Forschungsgemeinschaft (DFG) gef&#246;rderten Projekten beteiligt sind. Sie richten sich auch an Hochschulen und Forschungseinrichtungen in Deutschland, die diese Leitlinien in ihren eigenen Regelwerken umsetzen sollen <TextLink reference="45"></TextLink>.</Pgraph><SubHeadline>2. Urheberrechtsgesetz (UrhG)</SubHeadline><Pgraph>Das Urheberrecht basiert auf einer EU-Richtlinie, die von den Mitgliedsstaaten in nationales Recht umgesetzt wurde. Es sch&#252;tzt pers&#246;nliche geistige Sch&#246;pfungen (vgl. &#167; 2 Abs. 2 UrhG DE). Urheber&#42;in ist grunds&#228;tzlich immer eine nat&#252;rliche Person, also ein Mensch. Die Person hat das ausschlie&#223;liche Recht zur Verwendung ihres Werkes. Ob KI-generierter Output urheberrechtlichen Schutz genie&#223;t, h&#228;ngt im Wesentlichen davon ab, inwieweit sich der Mensch des Computers als technisches Hilfsmittel bedient (vgl. Dreier&#47;Schulze&#47;Schulze UrhG &#167; 2 Rn. 8) <TextLink reference="41"></TextLink>, <TextLink reference="42"></TextLink>.</Pgraph><SubHeadline>3. Datenschutz-Grundverordnung (DS-GVO)</SubHeadline><Pgraph>Die Datenschutz-Grundverordnung (DSGVO) gilt unmittelbar in allen EU-Mitgliedsstaaten. Sie regelt den Schutz personenbezogener Daten und den freien Datenverkehr, sch&#252;tzt die Grundrechte und Grundfreiheiten nat&#252;rlicher Personen. Die Datenverarbeitung muss rechtlich begr&#252;ndet sein (Art. 1, Art 5 Abs. 1 lit. A, 6 Abs. 1 DS-GVO). Betroffene haben Rechte wie Informationspflichten der Verantwortlichen und das Recht auf Auskunft (Art. 13, 14, 15 DS-GVO). Die Verordnung gilt auch f&#252;r Unternehmen au&#223;erhalb der EU, die Daten von EU-B&#252;rgern verarbeiten (Marktortprinzip - Art. 3 Abs. 2 DS-GVO). </Pgraph><SubHeadline>4. KI-Verordnung (KI-VO)</SubHeadline><Pgraph>Die KI-Verordnung schafft einen Rechtsrahmen f&#252;r vertrauensw&#252;rdige KI. Sie soll Sicherheit, Transparenz, Nachvollziehbarkeit, Nicht-Diskriminierung und Umweltfreundlichkeit gew&#228;hrleisten. Sie wurde am 13. M&#228;rz 2024 vom Europ&#228;ische Parlament verabschiedet und gilt ab 2026 in allen EU-Mitgliedstaaten. KI-Systeme werden in vier Risikoklassen eingeteilt: unannehmbares Risiko (verboten), hohes, geringes und minimales Risiko. &#8222;General-purpose AI systems&#8220; (GPAI), zu denen grunds&#228;tzlich viele LLM geh&#246;ren, werden zun&#228;chst als begrenztes Risiko eingestuft und m&#252;ssen Transparenz-, Dokumentationspflichten und eine Urheberrechtspolitik erf&#252;llen (Art. 52, 52c KI-VO) <TextLink reference="46"></TextLink>. GPAI mit systemischem Risiko unterliegen zus&#228;tzlichen Anforderungen und werden registriert <TextLink reference="47"></TextLink>, <TextLink reference="48"></TextLink>. Hochrisiko-KI-Systeme erfordern zudem Ma&#223;nahmen wie Aufsicht, Qualit&#228;ts- und Risikomanagement, umfangreiche Dokumentation und hohe Anforderungen an Datenqualit&#228;t und Systemsicherheit <TextLink reference="49"></TextLink>.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Consideration of the legal aspects in detail">
      <MainHeadline>Consideration of the legal aspects in detail</MainHeadline><SubHeadline>Significance for examination candidates  </SubHeadline><Pgraph>The DFG guidelines ensure academic integrity in teaching and research <TextLink reference="45"></TextLink>. University statutes address academic offences such as plagiarism and cheating.</Pgraph><Pgraph>Plagiarism is an offence against good scientific practice and copyright law when works are used without appropriate attribution <TextLink reference="38"></TextLink>, <TextLink reference="44"></TextLink>, <TextLink reference="50"></TextLink>. Plagiarism occurs when copyrighted texts are included in the product <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>, <TextLink reference="51"></TextLink>. This can occur, for example, with &#8220;shake &#38; paste&#8221; plagiarism, in which text passages from different sources are combined <TextLink reference="38"></TextLink>. The use of AI-generated text certainly harbours the risk of plagiarism. The responsibility for these offences (i.e. the plagiarism) lies with the persons who adopt such texts without attribution <TextLink reference="52"></TextLink>. The providers of the LLM are held accountable for any infringements of copyright law (ongoing legal proceedings in the USA against OpenAI and Google) <TextLink reference="53"></TextLink>.</Pgraph><Pgraph>Cheating is defined as presenting a work produced with unauthorised resources as one&#39;s own. This is against good academic practice and the study or examination regulations <TextLink reference="38"></TextLink>.</Pgraph><Pgraph>In the case of digital or paper-based examinations supervised (without aids) in the presence of an examiner, or in the case of decentralised digital examinations with proctoring and (in some cases) the use of secure browsers, the risk of cheating through the use of LLM is reduced <TextLink reference="40"></TextLink>, <TextLink reference="44"></TextLink>. Oral and application-based examinations, such as OSCE, are less susceptible to the use of LLM. The design of such examinations is crucial to ensure that other forms of assessment error, such as subjectivity, do not become sources of error <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>, <TextLink reference="54"></TextLink>, <TextLink reference="55"></TextLink>, <TextLink reference="56"></TextLink>. LLM are particularly problematic for written assessments that are completed independently and without supervision, such as homework <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>. The reliability of synthetic text recognition remains variable, although it has improved significantly <TextLink reference="57"></TextLink>, <TextLink reference="58"></TextLink>, <TextLink reference="59"></TextLink>, <TextLink reference="60"></TextLink>, <TextLink reference="61"></TextLink>. In addition, human judgement can be crucial. One university successfully rejected a Master&#8217;s application because the essay was of unexpectedly high quality for a Bachelor&#8217;s graduate and appeared to be AI-generated <TextLink reference="62"></TextLink>.   </Pgraph><SubHeadline>Significance for universities and State Examination Office </SubHeadline><SubHeadline2>Creation of examination content and tasks </SubHeadline2><Pgraph>The use of LLM to create examination questions has been demonstrated <TextLink reference="63"></TextLink>, <TextLink reference="64"></TextLink>. For example, ChatGPT has the potential to generate MCQ of comparable quality for final medical examinations in a short time <TextLink reference="64"></TextLink>, <TextLink reference="65"></TextLink>, <TextLink reference="66"></TextLink>. However, it has been observed that questions that query higher levels of learning objectives show certain limitations <TextLink reference="63"></TextLink>. In general, it is worth experimenting with different prompts. Prompt engineering represents a systematic approach to effective communication with LLM, exerting a significant influence on the resulting output <TextLink reference="15"></TextLink>, <TextLink reference="66"></TextLink>, <TextLink reference="67"></TextLink>. Tasks created by AI must undergo a review process, as even linguistically well-constructed and plausible products, such as examination questions, can be erroneous <TextLink reference="66"></TextLink>. In contrast, the risk of copyright infringement when utilising AI to create examination tasks is relatively low. This is due to the fact that, in accordance with Section 60a of the German Copyright Act (UrhG), up to 15&#37; of a work may be made available for non-commercial purposes for illustration in class and for examinations.</Pgraph><SubHeadline2>Evaluation of examination content and tasks </SubHeadline2><Pgraph>The criteria for examinations are generally laid down in higher education legislation and are specified in examination and study regulations. In the event that independent assessment by the examiner is envisaged, the examiner must assess the examination performance independently. Furthermore, a language model can only be used for support purposes. It should be noted that there is also no independent assessment if the assessment is not adopted exactly but is based solely on the AI-generated result <TextLink reference="41"></TextLink>. From a data protection perspective, such an assessment is generally in violation of the ban on automated decision-making (Art. 22 GDPR). If a performance assessment is carried out by an AI, it is necessary to assume that a high-risk AI system is being used in accordance with the AI Regulation <TextLink reference="49"></TextLink>.  </Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Betrachtung der rechtlichen Aspekte im Einzelnen">
      <MainHeadline>Betrachtung der rechtlichen Aspekte im Einzelnen</MainHeadline><SubHeadline>Bedeutung f&#252;r Pr&#252;fungskandidat&#42;innen </SubHeadline><Pgraph>Die DFG-Leitlinien sichern die wissenschaftliche Integrit&#228;t in Lehre und Forschung <TextLink reference="45"></TextLink>,  universit&#228;re Satzungen adressieren akademische Verst&#246;&#223;e wie Plagiat und T&#228;uschung.</Pgraph><Pgraph>Plagiate sind Verst&#246;&#223;e gegen die gute wissenschaftliche Praxis und das Urheberrecht, wenn Werke ohne angemessene Zuordnung verwendet werden <TextLink reference="38"></TextLink>, <TextLink reference="44"></TextLink>, <TextLink reference="50"></TextLink>. </Pgraph><Pgraph>Plagiatserkennungssoftware hat Schwierigkeiten bei der Identifizierung von KI-generierten Texten, denn generative Modelle erzeugen auch bei identischen Eingabeaufforderungen unterschiedliche Texte <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>, <TextLink reference="51"></TextLink>. Ein Plagiat liegt vor, wenn urheberrechtlich gesch&#252;tzte Texte im Produkt enthalten sind. Dies kann zum Beispiel bei &#8222;Shake &#38; Paste&#8220;-Plagiaten auftreten, bei denen Textpassagen aus verschiedenen Quellen kombiniert werden <TextLink reference="38"></TextLink>. Die Nutzung von KI-generiertem Text birgt durchaus das Risiko des Plagiierens. Die Verantwortung f&#252;r diese Verst&#246;&#223;e (i.e. die Plagiate) liegt bei den Personen, die solche Texte ohne Zuordnung &#252;bernehmen <TextLink reference="52"></TextLink>. F&#252;r Verst&#246;&#223;e gegen Urheberrechte sind die Anbieter der LLM verantwortlich (laufende Verfahren in den USA gegen OpenAI und Google) <TextLink reference="53"></TextLink>.</Pgraph><Pgraph>Eine T&#228;uschung liegt vor, wenn jemand eine mit unerlaubten Hilfsmitteln erstellte Leistung als eigenst&#228;ndig vorgibt. Dies verst&#246;&#223;t gegen die gute wissenschaftliche Praxis und Studien- oder Pr&#252;fungsordnungen <TextLink reference="38"></TextLink>. </Pgraph><Pgraph>Bei digitalen oder papierbasierten Pr&#252;fungen unter Aufsicht (ohne Hilfsmittel) in Pr&#228;senz, oder bei dezentralen digitalen Pr&#252;fungen mit Proctoring und (teilweise) dem Einsatz sicherer Browser,  ist die Gefahr einer T&#228;uschung durch den Einsatz von LLM reduziert <TextLink reference="40"></TextLink>, <TextLink reference="44"></TextLink>. M&#252;ndliche und anwendungsorientierte Pr&#252;fungen, wie OSCEs, reduzieren die M&#246;glichkeit der LLM-Nutzung. Wichtig ist hier die Konzeption, damit nicht andere Beurteilungsfehler wie Subjektivit&#228;t zu Fehlerquellen f&#252;hren <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>, <TextLink reference="54"></TextLink>, <TextLink reference="55"></TextLink>, <TextLink reference="56"></TextLink>. Problematisch sind LLM vor allem f&#252;r schriftliche Pr&#252;fungen, die selbstst&#228;ndig und ohne Aufsicht absolviert werden, wie beispielsweise Hausarbeiten <TextLink reference="40"></TextLink>, <TextLink reference="41"></TextLink>. Die Erkennung von synthetischem Text ist noch unterschiedlich zuverl&#228;ssig, hat sich aber bereits deutlich verbessert <TextLink reference="57"></TextLink>, <TextLink reference="58"></TextLink>, <TextLink reference="59"></TextLink>, <TextLink reference="60"></TextLink>, <TextLink reference="61"></TextLink>. Auch menschliche Einsch&#228;tzungen k&#246;nnen ausschlaggebend sein. Eine Universit&#228;t lehnte erfolgreich eine Masterbewerbung ab, da der Essay unerwartet hochwertig f&#252;r einen Bachelorabsolventen erschien und augenscheinlich KI-generiert war <TextLink reference="62"></TextLink>. </Pgraph><SubHeadline>Bedeutung f&#252;r Hochschulen und LP&#196;</SubHeadline><SubHeadline2>Erstellung von Pr&#252;fungsinhalten und -aufgaben</SubHeadline2><Pgraph>LLM wurden zum Erstellen von Pr&#252;fungsfragen eingesetzt <TextLink reference="63"></TextLink>, <TextLink reference="64"></TextLink>. F&#252;r Wissensfragen hat z.B. ChatGPT das Potenzial, in kurzer Zeit MCQ von vergleichbarer Qualit&#228;t f&#252;r medizinische Abschlusspr&#252;fungen zu generieren <TextLink reference="64"></TextLink>, <TextLink reference="65"></TextLink>, <TextLink reference="66"></TextLink>. Fragen, die h&#246;here Lernzielebenen abfragen, zeigen dagegen gewisse Einschr&#228;nkungen <TextLink reference="63"></TextLink>. Grunds&#228;tzlich lohnt sich das Experimentieren mit unterschiedlichen Eingabeaufforderungen (&#8222;Prompts&#8220;). Prompt-Engineering ist ein systematischer Ansatz zur effektiven Kommunikation mit LLM mit gro&#223;em Einfluss auf das Ergebnis <TextLink reference="15"></TextLink>, <TextLink reference="66"></TextLink>, <TextLink reference="67"></TextLink>. Durch KI erstellte Aufgaben m&#252;ssen in einem Reviewprozess &#252;berpr&#252;ft werden, denn auch sprachlich gut und plausibel formulierte Produkte, z.B. Pr&#252;fungsfragen, k&#246;nnen fehlerhaft sein <TextLink reference="66"></TextLink>. Das Risiko einer Verletzung von Urheberrechten beim Einsatz von KI zur Erstellung von Pr&#252;fungsaufgaben ist dagegen eher gering, denn nach &#167; 60a UrhG d&#252;rfen bis zu 15&#37; eines Werkes f&#252;r nicht-kommerzielle Zwecke zur Veranschaulichung im Unterricht und f&#252;r Pr&#252;fungen zug&#228;nglich gemacht werden.</Pgraph><SubHeadline2>Bewertung von Pr&#252;fungsinhalten und -aufgaben</SubHeadline2><Pgraph>Bewertungskriterien f&#252;r Pr&#252;fungsleistungen sind grunds&#228;tzlich in Hochschulgesetzen festgelegt und werden in Pr&#252;fungs- und Studienordnungen konkretisiert. Ist eine eigenst&#228;ndige Bewertung durch die pr&#252;fende Person vorgesehen, muss diese die Pr&#252;fungsleistung eigenst&#228;ndig w&#252;rdigen. Ein Sprachmodell kann nur unterst&#252;tzend verwendet werden. Eine eigenst&#228;ndige Bewertung fehlt auch dann, wenn die Bewertung nicht exakt &#252;bernommen, aber allein auf Grundlage des KI-generierten Ergebnisses erfolgt <TextLink reference="41"></TextLink>. Aus datenschutzrechtlicher Sicht verst&#246;&#223;t eine solche Bewertung grunds&#228;tzlich gegen das Verbot der automatisierten Entscheidungsfindung (Art. 22 DSGVO). Erfolgt eine Leistungsbeurteilung durch eine KI, ist gem&#228;&#223; KI-VO zudem von einem Hochrisiko-KI-System auszugehen <TextLink reference="49"></TextLink>.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Options for action">
      <MainHeadline>Options for action</MainHeadline><Pgraph>LLM represent a challenge, but also an opportunity for examination procedures. Faculties can react to AI developments without necessarily reverting to restrictive formats <TextLink reference="41"></TextLink>. A general ban on AI applications is difficult to implement and hardly relevant; algorithms are already integrated into existing systems such as browsers or word processing programmes <TextLink reference="41"></TextLink>, <TextLink reference="68"></TextLink>. It may therefore make sense to specifically authorise or integrate AI applications.  </Pgraph><SubHeadline>Adaptation of regulations </SubHeadline><Pgraph>Framework conditions should define under which conditions or for which purposes the use is legitimate and authorised and in which areas the use is prohibited <TextLink reference="41"></TextLink>. Even if monitoring may be difficult, a clear explanation increases the binding nature and clarifies the consequences of rule violations, including in declarations of independence. Without these restrictions, no violation of the examination rules can be assumed <TextLink reference="28"></TextLink>, <TextLink reference="41"></TextLink>, <TextLink reference="44"></TextLink>, <TextLink reference="52"></TextLink>, <TextLink reference="62"></TextLink>. Students should be aware that they bear responsibility for errors such as copyright infringements <TextLink reference="28"></TextLink>, <TextLink reference="41"></TextLink>, <TextLink reference="52"></TextLink> (see attachment 1 <AttachmentLink attachmentNo="1"/> for sources of sample texts and checklists).  </Pgraph><SubHeadline>Adaptation of assessments </SubHeadline><Pgraph>Evaluation criteria can be adapted for existing assessments. The critical use of sources and positioning in the specialised discourse could be weighted more heavily, linguistic correctness and expression could lose importance <TextLink reference="41"></TextLink>, <TextLink reference="52"></TextLink>. LLM still produce incorrect or unweighted source references <TextLink reference="17"></TextLink>, <TextLink reference="69"></TextLink>. A review would therefore appear to be useful. </Pgraph><SubHeadline>Adaptation of assessment programmes </SubHeadline><Pgraph>The motivation to cheat can be reduced by changing the assessment programmes. Intrinsically motivated, performance-orientated students cheat less often than extrinsically motivated students who primarily want to pass <TextLink reference="43"></TextLink>, <TextLink reference="70"></TextLink>, <TextLink reference="71"></TextLink> or are stressed <TextLink reference="69"></TextLink>. One approach could be to reduce the stakes of individual examinations and at the same time increase the relevance of the content. Assessments should relate to real experiences and knowledge and therefore be authentic. Formats such as MC questions can certainly be used <TextLink reference="41"></TextLink>, <TextLink reference="52"></TextLink>, <TextLink reference="72"></TextLink>, <TextLink reference="73"></TextLink>. A &#8220;moral anchor&#8221; and moral awareness can reduce cheating, promoted by exemplary teachers and training in self-awareness, ethics and decision-making <TextLink reference="43"></TextLink>, <TextLink reference="68"></TextLink>. </Pgraph><SubHeadline>LLM as an examination subject, LLM as a learning aid  </SubHeadline><Pgraph>In the spirit of &#8220;AI literacy&#8221;, teachers should view LLM as a supplement and undergo continuous further training. In addition to knowledge about AI, the application, such as prompt generation, and the critical evaluation or validation of AI-generated texts could also be tested <TextLink reference="18"></TextLink>, <TextLink reference="67"></TextLink>, <TextLink reference="74"></TextLink>, <TextLink reference="75"></TextLink>. </Pgraph><Pgraph>LLM can identify knowledge gaps in formative testing environments through thematic text analysis of assessment data and provide individualised, timely and continuous feedback, similar to a constantly available tutor <TextLink reference="74"></TextLink>, <TextLink reference="76"></TextLink>. </Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Handlungsm&#246;glichkeiten">
      <MainHeadline>Handlungsm&#246;glichkeiten</MainHeadline><Pgraph>LLM stellen eine Herausforderung, aber auch eine Chance f&#252;r Pr&#252;fungsabl&#228;ufe dar. Fakult&#228;ten k&#246;nnen auf KI-Entwicklungen reagieren, ohne notwendigerweise zu restriktiven Formaten zur&#252;ckzukehren <TextLink reference="41"></TextLink>. Ein generelles Verbot von KI-Anwendungen ist schwer umsetzbar und kaum sachdienlich, Algorithmen sind bereits jetzt in bestehende Systeme, wie Browser oder Textverarbeitungsprogramme integriert <TextLink reference="41"></TextLink>, <TextLink reference="68"></TextLink>. Somit kann es sinnvoll sein, KI-Anwendungen gezielt zu erlauben oder zu integrieren. </Pgraph><SubHeadline>Anpassung von Ordnungen</SubHeadline><Pgraph>Rahmenbedingungen sollten definieren, unter welchen Bedingungen oder f&#252;r welche Zwecke der Einsatz legitim und freigegeben und in welchen Bereichen der Nutzen untersagt ist <TextLink reference="41"></TextLink>. Auch wenn die Kontrolle unter Umst&#228;nden schwierig ist, erh&#246;ht eine klare Erl&#228;uterung die Verbindlichkeit und verdeutlicht die Konsequenzen bei Regelverst&#246;&#223;en, auch in Eigenst&#228;ndigkeitserkl&#228;rungen. Ohne diese Einschr&#228;nkungen kann kein Versto&#223; gegen die Pr&#252;fungsregeln angenommen werden <TextLink reference="28"></TextLink>, <TextLink reference="41"></TextLink>, <TextLink reference="44"></TextLink>, <TextLink reference="52"></TextLink>, <TextLink reference="62"></TextLink>. Studierenden sollte bewusst sein, dass sie die Verantwortung f&#252;r Fehler, wie z.B. Urheberrechtsverletzungen tragen <TextLink reference="28"></TextLink>, <TextLink reference="41"></TextLink>, <TextLink reference="52"></TextLink> (siehe Anhang 1 <AttachmentLink attachmentNo="1"/> f&#252;r Quellen zu Mustertexten und Checklisten). </Pgraph><SubHeadline>Anpassung von Pr&#252;fungen</SubHeadline><Pgraph>Bei bestehenden Pr&#252;fungen k&#246;nnen Bewertungskriterien angepasst werden. Der kritische Umgang mit Quellen und die Positionierung im Fachdiskurs k&#246;nnten st&#228;rker gewichtet werden, sprachliche Korrektheit und Ausdruck an Bedeutung verlieren <TextLink reference="41"></TextLink>, <TextLink reference="52"></TextLink>. Noch generieren LLM auch fehlerhafte oder ungewichtete Quellenangaben <TextLink reference="17"></TextLink>, <TextLink reference="69"></TextLink>, eine &#220;berpr&#252;fung scheint daher sinnvoll.</Pgraph><SubHeadline>Anpassung von Pr&#252;fungsprogrammen</SubHeadline><Pgraph>Durch eine Ver&#228;nderung der Pr&#252;fungen kann die Motivation zum T&#228;uschen gesenkt werden. Intrinsisch motivierte, leistungsorientierte Studierende t&#228;uschen seltener als extrinsisch motivierte, die vor allem bestehen wollen <TextLink reference="43"></TextLink>, <TextLink reference="70"></TextLink>, <TextLink reference="71"></TextLink> oder gestresst sind <TextLink reference="69"></TextLink>. Als Ansatz k&#246;nnten die Anforderungen der Einzelpr&#252;fungen reduziert und gleichzeitig die inhaltliche Relevanz gesteigert werden. Pr&#252;fungen sollten einen Bezug zu realen Erfahrungen und Erkenntnissen aufweisen und somit authentisch sein. Formate, wie MC-Fragen k&#246;nnen durchaus eingesetzt werden <TextLink reference="41"></TextLink>, <TextLink reference="52"></TextLink>, <TextLink reference="72"></TextLink>, <TextLink reference="73"></TextLink>. Ein &#8222;moralischer Anker&#8220; und ein moralisches Bewusstsein k&#246;nnen T&#228;uschungen verringern, gef&#246;rdert durch vorbildliche Lehrende und Trainings in Selbstbewusstsein, Ethik und Entscheidungsfindung <TextLink reference="43"></TextLink>, <TextLink reference="69"></TextLink>.</Pgraph><SubHeadline>LLM als Pr&#252;fungsgegenstand, LLM als Lernhilfe </SubHeadline><Pgraph>Im Sinne der &#8222;AI-Literacy&#8220; sollten Lehrende LLM als Erg&#228;nzung betrachten und sich kontinuierlich weiterbilden. Neben Wissen &#252;ber KI k&#246;nnte auch die Anwendung, wie z.B. die Prompt-Generierung, und die kritische Bewertung oder Validierung von KI-generierten Texten gepr&#252;ft werden <TextLink reference="18"></TextLink>, <TextLink reference="67"></TextLink>, <TextLink reference="74"></TextLink>, <TextLink reference="75"></TextLink>.</Pgraph><Pgraph>LLM k&#246;nnen in formativen Testumgebungen Wissensl&#252;cken durch eine thematische Textanalyse von Bewertungsdaten identifizieren und individuelles, zeitnahes und kontinuierliches Feedback bereitstellen, &#228;hnlich einem st&#228;ndig verf&#252;gbaren Tutor <TextLink reference="74"></TextLink>, <TextLink reference="76"></TextLink>.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Limitations">
      <MainHeadline>Limitations</MainHeadline><Pgraph>The field of GAI is dynamic and developing rapidly. The performance capabilities and limitations mentioned could soon become obsolete. However, all developments must be in line with the applicable legal framework, including UrhG, DS-GVO and KI-VO. In the DACH region, this applies to Germany and Austria. For Switzerland, the legal framework is not known to the authors and must be checked before using GAIs.</Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Limitationen">
      <MainHeadline>Limitationen</MainHeadline><Pgraph>Der Bereich der GKI ist dynamisch und entwickelt sich rasant. Angesprochene Leistungsf&#228;higkeiten und Limitationen k&#246;nnten bald veraltet sein. Alle Entwicklungen m&#252;ssen jedoch im Einklang mit dem geltenden Rechtsrahmen stehen, einschlie&#223;lich UrhG, DS-GVO und KI-VO. Dies gilt in der DACH-Region f&#252;r Deutschland und &#214;sterreich. F&#252;r die Schweiz sind die rechtlichen Rahmenbedingungen den Autor&#42;innen nicht bekannt und m&#252;ssen vor dem Einsatz von GKI gepr&#252;ft werden.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Acknowledgements">
      <MainHeadline>Acknowledgements</MainHeadline><Pgraph>We would like to thank Daniel Bauer, Daniel Tolks and Katharina von der Wense for their critical reading and expert advice.</Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Danksagung">
      <MainHeadline>Danksagung</MainHeadline><Pgraph>Wir bedanken uns bei Daniel Bauer, Daniel Tolks und Katharina von der Wense f&#252;r das kritische Durchlesen und die fachlichen Hinweise.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Notes">
      <MainHeadline>Notes</MainHeadline><SubHeadline>Translation</SubHeadline><Pgraph>DeepL was used for English translation. For editing, DeepL, ChatGPT and Copilot were used (prompts: &#8220;Summarise the following text&#8221; and &#8220;Improve the following text&#8221;).</Pgraph><SubHeadline>Authors&#8217; ORCIDs</SubHeadline><Pgraph><UnorderedList><ListItem level="1">Maren M&#228;rz: &#91;<Hyperlink href="https:&#47;&#47;orcid.org&#47;0000-0002-2661-5076">0000-0002-2661-5076</Hyperlink>&#93;</ListItem><ListItem level="1">Monika Himmelbauer: &#91;<Hyperlink href="https:&#47;&#47;orcid.org&#47;0000-0001-5516-1993">0000-0001-5516-1993</Hyperlink>&#93;</ListItem><ListItem level="1">Alexander Oksche: &#91;<Hyperlink href="https:&#47;&#47;orcid.org&#47;0000-0003-4592-1770">0000-0003-4592-1770</Hyperlink>&#93;</ListItem></UnorderedList></Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Anmerkungen">
      <MainHeadline>Anmerkungen</MainHeadline><SubHeadline>&#220;bersetzung</SubHeadline><Pgraph>F&#252;r die englische &#220;bersetzung wurde DeepL genutzt. Zur Editierung wurden DeepL, ChatGPT und Copilot genutzt (Prompts: &#8222;Fasse folgenden Text zusammen&#8220; und &#8222;Verbessere folgenden Text&#8220;).</Pgraph><SubHeadline>ORCIDs der Autor&#42;innen</SubHeadline><Pgraph><UnorderedList><ListItem level="1">Maren M&#228;rz: &#91;<Hyperlink href="https:&#47;&#47;orcid.org&#47;0000-0002-2661-5076">0000-0002-2661-5076</Hyperlink>&#93;</ListItem><ListItem level="1">Monika Himmelbauer: &#91;<Hyperlink href="https:&#47;&#47;orcid.org&#47;0000-0001-5516-1993">0000-0001-5516-1993</Hyperlink>&#93;</ListItem><ListItem level="1">Alexander Oksche: &#91;<Hyperlink href="https:&#47;&#47;orcid.org&#47;0000-0003-4592-1770">0000-0003-4592-1770</Hyperlink>&#93;</ListItem></UnorderedList></Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Competing interests">
      <MainHeadline>Competing interests</MainHeadline><Pgraph>The authors declare that they have no competing interests. </Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Interessenkonflikt">
      <MainHeadline>Interessenkonflikt</MainHeadline><Pgraph>Die Autor&#42;innen erkl&#228;ren, dass sie keinen Interessenkonflikt im Zusammenhang mit diesem Artikel haben.</Pgraph></TextBlock>
    <References linked="yes">
      <Reference refNo="1">
        <RefAuthor>Majumdar D</RefAuthor>
        <RefAuthor>Banerji PK</RefAuthor>
        <RefAuthor>Chakrabarti S</RefAuthor>
        <RefTitle>Technology Analysis &#38; Strategic Management Disruptive technology and disruptive innovation?: ignore at your peril?&#33;</RefTitle>
        <RefYear>2018</RefYear>
        <RefJournal>Technol Anal Strateg Manag</RefJournal>
        <RefPage>1247-1255</RefPage>
        <RefTotal>Majumdar D, Banerji PK, Chakrabarti S. Technology Analysis &#38; Strategic Management Disruptive technology and disruptive innovation?: ignore at your peril?&#33; Technol Anal Strateg Manag. 2018;7325(11):1247-1255. DOI: 10.1080&#47;09537325.2018.1523384</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1080&#47;09537325.2018.1523384</RefLink>
      </Reference>
      <Reference refNo="2">
        <RefAuthor>Clusmann J</RefAuthor>
        <RefAuthor>Kolbinger FR</RefAuthor>
        <RefAuthor>Muti HS</RefAuthor>
        <RefAuthor>Carrero ZI</RefAuthor>
        <RefAuthor>Eckardt JN</RefAuthor>
        <RefAuthor>Laleh NG</RefAuthor>
        <RefAuthor>L&#246;ffler CM</RefAuthor>
        <RefAuthor>Schwarzkopf SC</RefAuthor>
        <RefAuthor>Unger M</RefAuthor>
        <RefAuthor>Veldjuizen GP</RefAuthor>
        <RefAuthor>Wagner SJ</RefAuthor>
        <RefAuthor>Kather JN</RefAuthor>
        <RefTitle>The future landscape of large language models in medicine</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Commun Med (Lond)</RefJournal>
        <RefPage>141</RefPage>
        <RefTotal>Clusmann J, Kolbinger FR, Muti HS, Carrero ZI, Eckardt JN, Laleh NG, L&#246;ffler CM, Schwarzkopf SC, Unger M, Veldjuizen GP, Wagner SJ, Kather JN. The future landscape of large language models in medicine. Commun Med (Lond). 2023;3(1):141. DOI: 10.1038&#47;s43856-023-00370-1</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1038&#47;s43856-023-00370-1</RefLink>
      </Reference>
      <Reference refNo="3">
        <RefAuthor>Webster P</RefAuthor>
        <RefTitle>Six ways large language models are changing healthcare</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Nat Med</RefJournal>
        <RefPage>2969-2971</RefPage>
        <RefTotal>Webster P. Six ways large language models are changing healthcare. Nat Med. 2023;29(12):2969-2971. DOI: 10.1038&#47;s41591-023-02700-1</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1038&#47;s41591-023-02700-1</RefLink>
      </Reference>
      <Reference refNo="4">
        <RefAuthor>Singhal K</RefAuthor>
        <RefAuthor>Azizi S</RefAuthor>
        <RefAuthor>Tu T</RefAuthor>
        <RefAuthor>Mahdavi SS</RefAuthor>
        <RefAuthor>Wei J</RefAuthor>
        <RefAuthor>Chung HW</RefAuthor>
        <RefAuthor>Scales N</RefAuthor>
        <RefAuthor>Tanwani A</RefAuthor>
        <RefAuthor>Cole-Lewis H</RefAuthor>
        <RefAuthor>Pfohl S</RefAuthor>
        <RefAuthor>Payne P</RefAuthor>
        <RefAuthor>Seneviratne M</RefAuthor>
        <RefAuthor>Gamble P</RefAuthor>
        <RefAuthor>Kelly C</RefAuthor>
        <RefAuthor>Babiker A</RefAuthor>
        <RefAuthor>Sch&#228;rli N</RefAuthor>
        <RefAuthor>Chowdhery A</RefAuthor>
        <RefAuthor>Mansfield P</RefAuthor>
        <RefAuthor>Demner-Fushman D</RefAuthor>
        <RefAuthor>Arcas BA</RefAuthor>
        <RefAuthor>Webster D</RefAuthor>
        <RefAuthor>Corrado GS</RefAuthor>
        <RefAuthor>Matias Y</RefAuthor>
        <RefAuthor>Chou K</RefAuthor>
        <RefAuthor>Gottweis J</RefAuthor>
        <RefAuthor>Tomasev N</RefAuthor>
        <RefAuthor>Liu Y</RefAuthor>
        <RefAuthor>Rajkomar A</RefAuthor>
        <RefAuthor>Barral J</RefAuthor>
        <RefAuthor>Emturs C</RefAuthor>
        <RefAuthor>Karthikesalingam A</RefAuthor>
        <RefAuthor>Natarajan V</RefAuthor>
        <RefTitle>Large language models encode clinical knowledge</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Nature</RefJournal>
        <RefPage>172-180</RefPage>
        <RefTotal>Singhal K, Azizi S, Tu T, Mahdavi SS, Wei J, Chung HW, Scales N, Tanwani A, Cole-Lewis H, Pfohl S, Payne P, Seneviratne M, Gamble P, Kelly C, Babiker A, Sch&#228;rli N, Chowdhery A, Mansfield P, Demner-Fushman D, Arcas BA, Webster D, Corrado GS, Matias Y, Chou K, Gottweis J, Tomasev N, Liu Y, Rajkomar A, Barral J, Emturs C, Karthikesalingam A, Natarajan V. Large language models encode clinical knowledge. Nature. 2023;620(7972):172-180. DOI: 10.1038&#47;s41586-023-06291-2</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1038&#47;s41586-023-06291-2</RefLink>
      </Reference>
      <Reference refNo="5">
        <RefAuthor>Thirunavukarasu AJ</RefAuthor>
        <RefAuthor>Ting DSJ</RefAuthor>
        <RefAuthor>Elangovan K</RefAuthor>
        <RefAuthor>Gutierrez L</RefAuthor>
        <RefAuthor>Tan TF</RefAuthor>
        <RefAuthor>Ting DS</RefAuthor>
        <RefTitle>Large language models in medicine</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Nat Med</RefJournal>
        <RefPage>1930-1940</RefPage>
        <RefTotal>Thirunavukarasu AJ, Ting DSJ, Elangovan K, Gutierrez L, Tan TF, Ting DS. Large language models in medicine. Nat Med. 2023;29(8):1930-1940. DOI: 10.1038&#47;s41591-023-02448-8</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1038&#47;s41591-023-02448-8</RefLink>
      </Reference>
      <Reference refNo="6">
        <RefAuthor>Chi EA</RefAuthor>
        <RefAuthor>Chi G</RefAuthor>
        <RefAuthor>Tsui CT</RefAuthor>
        <RefAuthor>Jiang Y</RefAuthor>
        <RefAuthor>Jarr K</RefAuthor>
        <RefAuthor>Kulkarni CV</RefAuthor>
        <RefAuthor>Zhang M</RefAuthor>
        <RefAuthor>Long J</RefAuthor>
        <RefAuthor>Ng AY</RefAuthor>
        <RefAuthor>Rajpurkar P</RefAuthor>
        <RefAuthor>Sinha SR</RefAuthor>
        <RefTitle>Development and Validation of an Artificial Intelligence System to Optimize Clinician Review of Patient Records</RefTitle>
        <RefYear>2021</RefYear>
        <RefJournal>JAMA Netw Open</RefJournal>
        <RefPage>e2117391</RefPage>
        <RefTotal>Chi EA, Chi G, Tsui CT, Jiang Y, Jarr K, Kulkarni CV, Zhang M, Long J, Ng AY, Rajpurkar P, Sinha SR. Development and Validation of an Artificial Intelligence System to Optimize Clinician Review of Patient Records. JAMA Netw Open. 2021;4(7):e2117391. DOI: 10.1001&#47;jamanetworkopen.2021.17391</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1001&#47;jamanetworkopen.2021.17391</RefLink>
      </Reference>
      <Reference refNo="7">
        <RefAuthor>Savage T</RefAuthor>
        <RefAuthor>Nayak A</RefAuthor>
        <RefAuthor>Gallo R</RefAuthor>
        <RefAuthor>Rangan E</RefAuthor>
        <RefAuthor>Chen JH</RefAuthor>
        <RefTitle>Diagnostic Reasoning Prompts Reveal the Potential for Large Language Model Interpretability in Medicine</RefTitle>
        <RefYear>2024</RefYear>
        <RefJournal>NPJ Digt Med</RefJournal>
        <RefPage>20</RefPage>
        <RefTotal>Savage T, Nayak A, Gallo R, Rangan E, Chen JH. Diagnostic Reasoning Prompts Reveal the Potential for Large Language Model Interpretability in Medicine. NPJ Digt Med. 2024;7(1):20. DOI: 10.1038&#47;s41746-024-01010-1</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1038&#47;s41746-024-01010-1</RefLink>
      </Reference>
      <Reference refNo="8">
        <RefAuthor>Li R</RefAuthor>
        <RefAuthor>Kumar A</RefAuthor>
        <RefAuthor>Chen JH</RefAuthor>
        <RefTitle>How Chatbots and Large Language Model Artificial Intelligence Systems Will Reshape Modern Medicine: Fountain of Creativity or Pandora&#8217;s Box&#63;</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>JAMA Intern Med</RefJournal>
        <RefPage>596-597</RefPage>
        <RefTotal>Li R, Kumar A, Chen JH. How Chatbots and Large Language Model Artificial Intelligence Systems Will Reshape Modern Medicine: Fountain of Creativity or Pandora&#8217;s Box&#63; JAMA Intern Med. 2023;183(6):596-597. DOI: 10.1001&#47;jamainternmed.2023.1835</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1001&#47;jamainternmed.2023.1835</RefLink>
      </Reference>
      <Reference refNo="9">
        <RefAuthor>Rampton V</RefAuthor>
        <RefAuthor>Mittelman M</RefAuthor>
        <RefAuthor>Goldhahn J</RefAuthor>
        <RefTitle>Implications of artificial intelligence for medical education</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>Lancet Digit Heal</RefJournal>
        <RefPage>e111-e122</RefPage>
        <RefTotal>Rampton V, Mittelman M, Goldhahn J. Implications of artificial intelligence for medical education. Lancet Digit Heal. 2020;2(3):e111-e122. DOI: 10.1016&#47;S2589-7500(20)30023-6</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;S2589-7500(20)30023-6</RefLink>
      </Reference>
      <Reference refNo="10">
        <RefAuthor>Bender EM</RefAuthor>
        <RefAuthor>Gebru T</RefAuthor>
        <RefAuthor>McMillan-Major A</RefAuthor>
        <RefAuthor>Shmitchell S</RefAuthor>
        <RefTitle>On the dangers of stochastic parrots: Can language models be too big&#63;</RefTitle>
        <RefYear>2021</RefYear>
        <RefBookTitle>FAccT 2021: Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency. March 2021</RefBookTitle>
        <RefPage>610-623</RefPage>
        <RefTotal>Bender EM, Gebru T, McMillan-Major A, Shmitchell S. On the dangers of stochastic parrots: Can language models be too big&#63; In: FAccT 2021: Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency. March 2021. p.610-623. DOI: 10.1145&#47;3442188.3445922</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1145&#47;3442188.3445922</RefLink>
      </Reference>
      <Reference refNo="11">
        <RefAuthor>Bhavya B</RefAuthor>
        <RefAuthor>Xiong J</RefAuthor>
        <RefAuthor>Zhai C</RefAuthor>
        <RefTitle>Analogy Generation by Prompting Large Language Models: A Case Study of InstructGPT</RefTitle>
        <RefYear>2022</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Bhavya B, Xiong J, Zhai C. Analogy Generation by Prompting Large Language Models: A Case Study of InstructGPT. arXiv. 2022. DOI: 10.48550&#47;arXiv.2210.04186</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2210.04186</RefLink>
      </Reference>
      <Reference refNo="12">
        <RefAuthor>Brown TB</RefAuthor>
        <RefAuthor>Mann B</RefAuthor>
        <RefAuthor>Ryder N</RefAuthor>
        <RefAuthor>Subbiah M</RefAuthor>
        <RefAuthor>Kaplan J</RefAuthor>
        <RefAuthor>Dhariwal P</RefAuthor>
        <RefAuthor>Neelakantan A</RefAuthor>
        <RefAuthor>Shyam P</RefAuthor>
        <RefAuthor>Sastry G</RefAuthor>
        <RefAuthor>Askell A</RefAuthor>
        <RefAuthor>Agarwal S</RefAuthor>
        <RefAuthor>Herbert-Voss A</RefAuthor>
        <RefAuthor>Krueger G</RefAuthor>
        <RefAuthor>Henighan T</RefAuthor>
        <RefAuthor>Child R</RefAuthor>
        <RefAuthor>Ramesh A</RefAuthor>
        <RefAuthor>Ziegler DM</RefAuthor>
        <RefAuthor>Wu J</RefAuthor>
        <RefAuthor>Winter C</RefAuthor>
        <RefAuthor>Hesse C</RefAuthor>
        <RefAuthor>Chen M</RefAuthor>
        <RefAuthor>Sigler E</RefAuthor>
        <RefAuthor>Litwin M</RefAuthor>
        <RefAuthor>Gray S</RefAuthor>
        <RefAuthor>Chess B</RefAuthor>
        <RefAuthor>Clark J</RefAuthor>
        <RefAuthor>Berner C</RefAuthor>
        <RefAuthor>McCandish S</RefAuthor>
        <RefAuthor>Radford A</RefAuthor>
        <RefAuthor>Sutskever I</RefAuthor>
        <RefAuthor>Amodei D</RefAuthor>
        <RefTitle>Language Models are Few-Shot Learners</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Brown TB, Mann B, Ryder N, Subbiah M, Kaplan J, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Askell A, Agarwal S, Herbert-Voss A, Krueger G, Henighan T, Child R, Ramesh A, Ziegler DM, Wu J, Winter C, Hesse C, Chen M, Sigler E, Litwin M, Gray S, Chess B, Clark J, Berner C, McCandish S, Radford A, Sutskever I, Amodei D. Language Models are Few-Shot Learners. arXiv. 2020. DOI: 10.48550&#47;arXiv.2005.14165</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2005.14165</RefLink>
      </Reference>
      <Reference refNo="13">
        <RefAuthor>Vaswani A</RefAuthor>
        <RefAuthor>Shazeer N</RefAuthor>
        <RefAuthor>Parmar N</RefAuthor>
        <RefAuthor>Uszkoreit J</RefAuthor>
        <RefAuthor>Jones L</RefAuthor>
        <RefAuthor>Gomez AN</RefAuthor>
        <RefAuthor>Kaiser L</RefAuthor>
        <RefAuthor>Polosukhin I</RefAuthor>
        <RefTitle>Attention is all you need</RefTitle>
        <RefYear>2017</RefYear>
        <RefJournal>Adv Neural Inf Process Syst</RefJournal>
        <RefPage>5999-6009</RefPage>
        <RefTotal>Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser L, Polosukhin I. Attention is all you need. Adv Neural Inf Process Syst. 2017:5999-6009. DOI: 10.48550&#47;arXiv.1706.03762</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.1706.03762</RefLink>
      </Reference>
      <Reference refNo="14">
        <RefAuthor>Bai Y</RefAuthor>
        <RefAuthor>Jones A</RefAuthor>
        <RefAuthor>Ndousse K</RefAuthor>
        <RefAuthor>Askell A</RefAuthor>
        <RefAuthor>Chen A</RefAuthor>
        <RefAuthor>DasSarma N</RefAuthor>
        <RefAuthor>Drain D</RefAuthor>
        <RefAuthor>Fort S</RefAuthor>
        <RefAuthor>Ganguli D</RefAuthor>
        <RefAuthor>Henighan T</RefAuthor>
        <RefAuthor>Joseph N</RefAuthor>
        <RefAuthor>Kadavath S</RefAuthor>
        <RefAuthor>Kernion J</RefAuthor>
        <RefAuthor>Conerly T</RefAuthor>
        <RefAuthor>El-Showk S</RefAuthor>
        <RefAuthor>Elhage N</RefAuthor>
        <RefAuthor>Hatfield-Doods Z</RefAuthor>
        <RefAuthor>Hernandez D</RefAuthor>
        <RefAuthor>Hume T</RefAuthor>
        <RefAuthor>Johnston S</RefAuthor>
        <RefAuthor>Kravec S</RefAuthor>
        <RefAuthor>Lovitt L</RefAuthor>
        <RefAuthor>Nanda N</RefAuthor>
        <RefAuthor>Olsson C</RefAuthor>
        <RefAuthor>Amodei D</RefAuthor>
        <RefAuthor>Brown T</RefAuthor>
        <RefAuthor>Clark J</RefAuthor>
        <RefAuthor>McCandish S</RefAuthor>
        <RefAuthor>Olah C</RefAuthor>
        <RefAuthor>Mann B</RefAuthor>
        <RefAuthor>Kaplan J</RefAuthor>
        <RefTitle>Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback</RefTitle>
        <RefYear>2022</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Bai Y, Jones A, Ndousse K, Askell A, Chen A, DasSarma N, Drain D, Fort S, Ganguli D, Henighan T, Joseph N, Kadavath S, Kernion J, Conerly T, El-Showk S, Elhage N, Hatfield-Doods Z, Hernandez D, Hume T, Johnston S, Kravec S, Lovitt L, Nanda N, Olsson C, Amodei D, Brown T, Clark J, McCandish S, Olah C, Mann B, Kaplan J. Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback. arXiv. 2022. DOI: 10.48550&#47;arXiv.2204.05862</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2204.05862</RefLink>
      </Reference>
      <Reference refNo="15">
        <RefAuthor>Open AI</RefAuthor>
        <RefAuthor>Achiam J</RefAuthor>
        <RefAuthor>Adler S</RefAuthor>
        <RefAuthor>Agarwal S</RefAuthor>
        <RefAuthor></RefAuthor>
        <RefTitle>GPT-4 Technical Report</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage>1-100</RefPage>
        <RefTotal>Open AI, Achiam J, Adler S, Agarwal S, et al. GPT-4 Technical Report. arXiv. 2023;4:1-100. DOI: 10.48550&#47;arXiv.2303.08774</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2303.08774</RefLink>
      </Reference>
      <Reference refNo="16">
        <RefAuthor>Ouyang L</RefAuthor>
        <RefAuthor>Wu J</RefAuthor>
        <RefAuthor>Jiang X</RefAuthor>
        <RefAuthor>Almeida D</RefAuthor>
        <RefAuthor>Wainwright CL</RefAuthor>
        <RefAuthor>Mishkin P</RefAuthor>
        <RefAuthor>Zhang C</RefAuthor>
        <RefAuthor>Agarwal S</RefAuthor>
        <RefAuthor>Slama K</RefAuthor>
        <RefAuthor>Ray A</RefAuthor>
        <RefAuthor>Schulman J</RefAuthor>
        <RefAuthor>Hilton J</RefAuthor>
        <RefAuthor>Kelton F</RefAuthor>
        <RefAuthor>Miller L</RefAuthor>
        <RefAuthor>Simens M</RefAuthor>
        <RefAuthor>Askell A</RefAuthor>
        <RefAuthor>Welinder P</RefAuthor>
        <RefAuthor>Christiano P</RefAuthor>
        <RefAuthor>Leike J</RefAuthor>
        <RefAuthor>Lowe R</RefAuthor>
        <RefTitle>Training language models to follow instructions with human feedback</RefTitle>
        <RefYear>2022</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Ouyang L, Wu J, Jiang X, Almeida D, Wainwright CL, Mishkin P, Zhang C, Agarwal S, Slama K, Ray A, Schulman J, Hilton J, Kelton F, Miller L, Simens M, Askell A, Welinder P, Christiano P, Leike J, Lowe R. Training language models to follow instructions with human feedback. arXiv. 2022. DOI: 10.48550&#47;arXiv.2203.02155</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2203.02155</RefLink>
      </Reference>
      <Reference refNo="17">
        <RefAuthor>Sallam M</RefAuthor>
        <RefTitle>ChatGPT Utility in Healthcare Education, Research, and Practice: Systematic Review on the Promising Perspectives and Valid Concerns</RefTitle>
        <RefYear>20239</RefYear>
        <RefJournal>Healthcare (Basel)</RefJournal>
        <RefPage>887</RefPage>
        <RefTotal>Sallam M. ChatGPT Utility in Healthcare Education, Research, and Practice: Systematic Review on the Promising Perspectives and Valid Concerns. Healthcare (Basel). 20239;11(6):887. DOI: 10.3390&#47;healthcare11060887</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3390&#47;healthcare11060887</RefLink>
      </Reference>
      <Reference refNo="18">
        <RefAuthor>Meyer JG</RefAuthor>
        <RefAuthor>Urbanowicz RJ</RefAuthor>
        <RefAuthor>Martin PCN</RefAuthor>
        <RefAuthor>O&#8217;Connor K</RefAuthor>
        <RefAuthor>Li R</RefAuthor>
        <RefAuthor>Peng PC</RefAuthor>
        <RefAuthor>Bright TJ</RefAuthor>
        <RefAuthor>Tatonetti N</RefAuthor>
        <RefAuthor>Won KJ</RefAuthor>
        <RefAuthor>Gonzalez-Hernandez G</RefAuthor>
        <RefAuthor>Moore JH</RefAuthor>
        <RefTitle>ChatGPT and large language models in academia: opportunities and challenges</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>BioData Min</RefJournal>
        <RefPage>20</RefPage>
        <RefTotal>Meyer JG, Urbanowicz RJ, Martin PCN, O&#8217;Connor K, Li R, Peng PC, Bright TJ, Tatonetti N, Won KJ, Gonzalez-Hernandez G, Moore JH. ChatGPT and large language models in academia: opportunities and challenges. BioData Min. 2023;16(1):20. DOI: 10.1186&#47;s13040-023-00339-9</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1186&#47;s13040-023-00339-9</RefLink>
      </Reference>
      <Reference refNo="19">
        <RefAuthor>Thapa S</RefAuthor>
        <RefAuthor>Adhikari S</RefAuthor>
        <RefTitle>ChatGPT, Bard, and Large Language Models for Biomedical Research: Opportunities and Pitfalls</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Ann Biomed Eng</RefJournal>
        <RefPage>2647-2651</RefPage>
        <RefTotal>Thapa S, Adhikari S. ChatGPT, Bard, and Large Language Models for Biomedical Research: Opportunities and Pitfalls. Ann Biomed Eng. 2023;51(12):2647-2651. DOI: 10.1007&#47;s10439-023-03284-0</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10439-023-03284-0</RefLink>
      </Reference>
      <Reference refNo="20">
        <RefAuthor>Watkins R</RefAuthor>
        <RefTitle>Guidance for researchers and peer-reviewers on the ethical use of Large Language Models (LLMs) in scientific research workflows</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>AI Ethics</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Watkins R. Guidance for researchers and peer-reviewers on the ethical use of Large Language Models (LLMs) in scientific research workflows. AI Ethics. 2023. DOI: 10.1007&#47;s43681-023-00294-5</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s43681-023-00294-5</RefLink>
      </Reference>
      <Reference refNo="21">
        <RefAuthor>Kung TH</RefAuthor>
        <RefAuthor>Cheatham M</RefAuthor>
        <RefAuthor>Medenilla A</RefAuthor>
        <RefAuthor>Sillos C</RefAuthor>
        <RefAuthor>De Leon L</RefAuthor>
        <RefAuthor>Elepa&#241;o C</RefAuthor>
        <RefAuthor>Madriaga M</RefAuthor>
        <RefAuthor>Aggabao R</RefAuthor>
        <RefAuthor>Diaz-Candido G</RefAuthor>
        <RefAuthor>Maningo J</RefAuthor>
        <RefAuthor>Tseng V</RefAuthor>
        <RefTitle>Performance of ChatGPT on USMLE: Potential for AI-assisted medical education using large language models</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>PLOS Digit Heal</RefJournal>
        <RefPage>e0000198</RefPage>
        <RefTotal>Kung TH, Cheatham M, Medenilla A, Sillos C, De Leon L, Elepa&#241;o C, Madriaga M, Aggabao R, Diaz-Candido G, Maningo J, Tseng V. Performance of ChatGPT on USMLE: Potential for AI-assisted medical education using large language models. PLOS Digit Heal. 2023;2(2):e0000198. DOI: 10.1371&#47;journal.pdig.0000198</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1371&#47;journal.pdig.0000198</RefLink>
      </Reference>
      <Reference refNo="22">
        <RefAuthor>Friederichs H</RefAuthor>
        <RefAuthor>Friederichs WJ</RefAuthor>
        <RefAuthor>M&#228;rz M</RefAuthor>
        <RefTitle>ChatGPT in medical school: how successful is AI in progress testing?&#63;</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Med Educ Online</RefJournal>
        <RefPage>2220920</RefPage>
        <RefTotal>Friederichs H, Friederichs WJ, M&#228;rz M. ChatGPT in medical school: how successful is AI in progress testing?&#63; Med Educ Online. 2023;28(1):2220920. DOI: 10.1080&#47;10872981.2023.2220920</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1080&#47;10872981.2023.2220920</RefLink>
      </Reference>
      <Reference refNo="23">
        <RefAuthor>Strong E</RefAuthor>
        <RefAuthor>DiGiammarino A</RefAuthor>
        <RefAuthor>Weng Y</RefAuthor>
        <RefAuthor>Kumar A</RefAuthor>
        <RefAuthor>Hosamani P</RefAuthor>
        <RefAuthor>Hom J</RefAuthor>
        <RefAuthor>Chen JH</RefAuthor>
        <RefTitle>Chatbot vs Medical Student Performance on Free-Response Clinical Reasoning Examinations</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>JAMA Intern Med</RefJournal>
        <RefPage>1028-1030</RefPage>
        <RefTotal>Strong E, DiGiammarino A, Weng Y, Kumar A, Hosamani P, Hom J, Chen JH. Chatbot vs Medical Student Performance on Free-Response Clinical Reasoning Examinations. JAMA Intern Med. 2023;183(9):1028-1030. DOI: 10.1001&#47;jamainternmed.2023.2909</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1001&#47;jamainternmed.2023.2909</RefLink>
      </Reference>
      <Reference refNo="24">
        <RefAuthor>Long C</RefAuthor>
        <RefAuthor>Lowe K</RefAuthor>
        <RefAuthor>Santos A dos</RefAuthor>
        <RefAuthor>Zhang J</RefAuthor>
        <RefAuthor>Alanazi A</RefAuthor>
        <RefAuthor>O&#8217;Brien D</RefAuthor>
        <RefAuthor>Wright E</RefAuthor>
        <RefAuthor>Cote D</RefAuthor>
        <RefTitle>Evaluating ChatGPT-4 in Otolaryngology&#8211;Head and Neck Surgery Board Examination using the CVSA Model</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>medRxiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Long C, Lowe K, Santos A dos, Zhang J, Alanazi A, O&#8217;Brien D, Wright E, Cote D. Evaluating ChatGPT-4 in Otolaryngology&#8211;Head and Neck Surgery Board Examination using the CVSA Model. medRxiv. 2023. DOI: 10.1101&#47;2023.05.30.23290758</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1101&#47;2023.05.30.23290758</RefLink>
      </Reference>
      <Reference refNo="25">
        <RefAuthor>Ali R</RefAuthor>
        <RefAuthor>Tang OY</RefAuthor>
        <RefAuthor>Connolly ID</RefAuthor>
        <RefAuthor>Zadnik Sullivan PL</RefAuthor>
        <RefAuthor>Shin JH</RefAuthor>
        <RefAuthor>Fridley JS</RefAuthor>
        <RefAuthor>Asaad WF</RefAuthor>
        <RefAuthor>Cielo D</RefAuthor>
        <RefAuthor>Oyeles AA</RefAuthor>
        <RefAuthor>Doberstein CE</RefAuthor>
        <RefAuthor>Gokaslan ZL</RefAuthor>
        <RefAuthor>Telfeian AE</RefAuthor>
        <RefTitle>Performance of ChatGPT and GPT-4 on Neurosurgery Written Board Examinations</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Neurosurgery</RefJournal>
        <RefPage>1353-1365</RefPage>
        <RefTotal>Ali R, Tang OY, Connolly ID, Zadnik Sullivan PL, Shin JH, Fridley JS, Asaad WF, Cielo D, Oyeles AA, Doberstein CE, Gokaslan ZL, Telfeian AE. Performance of ChatGPT and GPT-4 on Neurosurgery Written Board Examinations. Neurosurgery. 2023;93(6):1353-1365. DOI: 10.1227&#47;neu.0000000000002632 </RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1227&#47;neu.0000000000002632</RefLink>
      </Reference>
      <Reference refNo="26">
        <RefAuthor>Skalidis I</RefAuthor>
        <RefAuthor>Cagnina A</RefAuthor>
        <RefAuthor>Luangphiphat W</RefAuthor>
        <RefAuthor>Mahendiran T</RefAuthor>
        <RefAuthor>Muller O</RefAuthor>
        <RefAuthor>Abbe E</RefAuthor>
        <RefAuthor>Fournier S</RefAuthor>
        <RefTitle>ChatGPT takes on the European Exam in Core Cardiology: an artificial intelligence success story&#63;</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Eur Hear J Digit Heal</RefJournal>
        <RefPage>279-281</RefPage>
        <RefTotal>Skalidis I, Cagnina A, Luangphiphat W, Mahendiran T, Muller O, Abbe E, Fournier S. ChatGPT takes on the European Exam in Core Cardiology: an artificial intelligence success story&#63; Eur Hear J Digit Heal. 2023;4(3):279-281. DOI: 10.1093&#47;ehjdh&#47;ztad029</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1093&#47;ehjdh&#47;ztad029</RefLink>
      </Reference>
      <Reference refNo="27">
        <RefAuthor>Katz DM</RefAuthor>
        <RefAuthor>Bommarito MJ</RefAuthor>
        <RefAuthor>Gao S</RefAuthor>
        <RefAuthor>Arredondo P</RefAuthor>
        <RefTitle>GPT-4 Passes the Bar Exam</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>SSRN Electron J</RefJournal>
        <RefPage>1-35</RefPage>
        <RefTotal>Katz DM, Bommarito MJ, Gao S, Arredondo P. GPT-4 Passes the Bar Exam. SSRN Electron J. 2023:1-35. DOI: 10.2139&#47;ssrn.4389233</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.2139&#47;ssrn.4389233</RefLink>
      </Reference>
      <Reference refNo="28">
        <RefAuthor>FernUni</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>A teacher&#8217;s guide to ChatGPT and remote assessments</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>FernUni. A teacher&#8217;s guide to ChatGPT and remote assessments. FernUni.ch. 2023.</RefTotal>
      </Reference>
      <Reference refNo="29">
        <RefAuthor>Neutatz F</RefAuthor>
        <RefAuthor>Abedjan Z</RefAuthor>
        <RefTitle>Whats is &#8220;Good&#8221; Training Data</RefTitle>
        <RefYear>2022</RefYear>
        <RefBookTitle>K&#252;nstliche Intelligenz Wie gelingt eine vertrauensw&#252;rdige Verwendung</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Neutatz F, Abedjan Z. Whats is &#8220;Good&#8221; Training Data. In: Bundesministerium f&#252;r Umwelt, Naturschutz, nukleare Sicherheit und Verbraucherschutz, Rostalski F, editors. K&#252;nstliche Intelligenz Wie gelingt eine vertrauensw&#252;rdige Verwendung. T&#252;bingen: Mohr Siebeck; 2022. DOI: 10.1628&#47;978-3-16-161299-2</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1628&#47;978-3-16-161299-2</RefLink>
      </Reference>
      <Reference refNo="30">
        <RefAuthor>Stoyanovich J</RefAuthor>
        <RefAuthor>Howe B</RefAuthor>
        <RefAuthor>Jagadish HV</RefAuthor>
        <RefTitle>Responsible data management</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>Proc VLDB Endow</RefJournal>
        <RefPage>3474-3488</RefPage>
        <RefTotal>Stoyanovich J, Howe B, Jagadish HV. Responsible data management. Proc VLDB Endow. 2020 ;13(12):3474-3488. DOI: 10.14778&#47;3415478.3415570</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.14778&#47;3415478.3415570</RefLink>
      </Reference>
      <Reference refNo="31">
        <RefAuthor>Heaven WD</RefAuthor>
        <RefTitle>Geoffrey Hinton tells us why he&#8217;s now scared of the tech he helped build</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>MIT Technol Rev</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Heaven WD. Geoffrey Hinton tells us why he&#8217;s now scared of the tech he helped build. MIT Technol Rev. 2023. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.technologyreview.com&#47;2023&#47;05&#47;02&#47;1072528&#47;geoffrey-hinton-google-why-scared-ai&#47;</RefTotal>
        <RefLink>https:&#47;&#47;www.technologyreview.com&#47;2023&#47;05&#47;02&#47;1072528&#47;geoffrey-hinton-google-why-scared-ai&#47;</RefLink>
      </Reference>
      <Reference refNo="32">
        <RefAuthor>Goodman RS</RefAuthor>
        <RefAuthor>Patrinely JR</RefAuthor>
        <RefAuthor>Stone CA</RefAuthor>
        <RefAuthor>Zimmerman E</RefAuthor>
        <RefAuthor>Donald RR</RefAuthor>
        <RefAuthor>Chang SS</RefAuthor>
        <RefAuthor>Berkowitz ST</RefAuthor>
        <RefAuthor>Finn AP</RefAuthor>
        <RefAuthor>Jahangir E</RefAuthor>
        <RefAuthor>Scoville EA</RefAuthor>
        <RefAuthor>Reese TX</RefAuthor>
        <RefAuthor>Friedmann DL</RefAuthor>
        <RefAuthor>Bastarache JA</RefAuthor>
        <RefAuthor>van der Heijden YF</RefAuthor>
        <RefAuthor>Wrigth JJ</RefAuthor>
        <RefAuthor>Ye F</RefAuthor>
        <RefAuthor>Carter N</RefAuthor>
        <RefAuthor>Alexander MR</RefAuthor>
        <RefAuthor>Choe JH</RefAuthor>
        <RefAuthor>Chastain CA</RefAuthor>
        <RefAuthor>Zic JA</RefAuthor>
        <RefAuthor>Horst SN</RefAuthor>
        <RefAuthor>Turker I</RefAuthor>
        <RefAuthor>Agarwal R</RefAuthor>
        <RefAuthor>Osmundson E</RefAuthor>
        <RefAuthor>Idrees K</RefAuthor>
        <RefAuthor>Kiernan CM</RefAuthor>
        <RefAuthor>Padmanabhan C</RefAuthor>
        <RefAuthor>Bailey CE</RefAuthor>
        <RefAuthor>Schlegel CE</RefAuthor>
        <RefAuthor>Chabless LB</RefAuthor>
        <RefAuthor>Gibson MK</RefAuthor>
        <RefAuthor>Osterman TJ</RefAuthor>
        <RefAuthor>Wheless LE</RefAuthor>
        <RefAuthor>Johnson DB</RefAuthor>
        <RefTitle>Accuracy and Reliability of Chatbot Responses to Physician Questions</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>JAMA Netw Open</RefJournal>
        <RefPage>e2336483</RefPage>
        <RefTotal>Goodman RS, Patrinely JR, Stone CA, Zimmerman E, Donald RR, Chang SS, Berkowitz ST, Finn AP, Jahangir E, Scoville EA, Reese TX, Friedmann DL, Bastarache JA, van der Heijden YF, Wrigth JJ, Ye F, Carter N, Alexander MR, Choe JH, Chastain CA, Zic JA, Horst SN, Turker I, Agarwal R, Osmundson E, Idrees K, Kiernan CM, Padmanabhan C, Bailey CE, Schlegel CE, Chabless LB, Gibson MK, Osterman TJ, Wheless LE, Johnson DB. Accuracy and Reliability of Chatbot Responses to Physician Questions. JAMA Netw Open. 2023;6(10):e2336483. DOI: 10.1001&#47;jamanetworkopen.2023.36483</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1001&#47;jamanetworkopen.2023.36483</RefLink>
      </Reference>
      <Reference refNo="33">
        <RefAuthor>George C</RefAuthor>
        <RefAuthor>Stuhlm&#252;ller A</RefAuthor>
        <RefTitle>Factored Verification: Detecting and Reducing Hallucination in Summaries of Academic Papers</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>George C, Stuhlm&#252;ller A. Factored Verification: Detecting and Reducing Hallucination in Summaries of Academic Papers. arXiv. 2023. DOI: https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2310.10627</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2310.10627https:&#47;&#47;doi.org&#47;</RefLink>
      </Reference>
      <Reference refNo="34">
        <RefAuthor>Huang J</RefAuthor>
        <RefAuthor>Chen X</RefAuthor>
        <RefAuthor>Mishra S</RefAuthor>
        <RefAuthor>Zheng HS</RefAuthor>
        <RefAuthor>Yu AW</RefAuthor>
        <RefAuthor>Song X</RefAuthor>
        <RefAuthor>Zhou D</RefAuthor>
        <RefTitle>Large Language Models Cannot Self-Correct Reasoning Yet</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage>1-19</RefPage>
        <RefTotal>Huang J, Chen X, Mishra S, Zheng HS, Yu AW, Song X, Zhou D. Large Language Models Cannot Self-Correct Reasoning Yet. arXiv. 2023;1:1-19. DOI: 10.48550&#47;arXiv.2310.01798</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2310.01798</RefLink>
      </Reference>
      <Reference refNo="35">
        <RefAuthor>Shen Y</RefAuthor>
        <RefAuthor>Heacock L</RefAuthor>
        <RefAuthor>Elias J</RefAuthor>
        <RefAuthor>Hentel KD</RefAuthor>
        <RefAuthor>Reig B</RefAuthor>
        <RefAuthor>Shih G</RefAuthor>
        <RefAuthor>Moy L</RefAuthor>
        <RefTitle>ChatGPT and Other Large Language Models Are Double-edged Swords</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Radiology</RefJournal>
        <RefPage>e230163</RefPage>
        <RefTotal>Shen Y, Heacock L, Elias J, Hentel KD, Reig B, Shih G, Moy L. ChatGPT and Other Large Language Models Are Double-edged Swords. Radiology. 2023;307(2):e230163. DOI: 10.1148&#47;radiol.230163</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1148&#47;radiol.230163</RefLink>
      </Reference>
      <Reference refNo="36">
        <RefAuthor>Topol EJ</RefAuthor>
        <RefTitle>High-performance medicine: the convergence of human and artificial intelligence</RefTitle>
        <RefYear>2019</RefYear>
        <RefJournal>Nat Med</RefJournal>
        <RefPage>44-56</RefPage>
        <RefTotal>Topol EJ. High-performance medicine: the convergence of human and artificial intelligence. Nat Med. 2019;25(1):44-56. DOI: 10.1038&#47;s41591-018-0300-7</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1038&#47;s41591-018-0300-7</RefLink>
      </Reference>
      <Reference refNo="37">
        <RefAuthor>Cotton DRE</RefAuthor>
        <RefAuthor>Cotton PA</RefAuthor>
        <RefAuthor>Shipway JR</RefAuthor>
        <RefTitle>Chatting and cheating: Ensuring academic integrity in the era of ChatGPT</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Innov Educ Teach Int</RefJournal>
        <RefPage>1-12</RefPage>
        <RefTotal>Cotton DRE, Cotton PA, Shipway JR. Chatting and cheating: Ensuring academic integrity in the era of ChatGPT. Innov Educ Teach Int. 2023;00(00):1-12. DOI: 10.1080&#47;14703297.2023.2190148</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1080&#47;14703297.2023.2190148</RefLink>
      </Reference>
      <Reference refNo="38">
        <RefAuthor>Georg-August-Universit&#228;t G&#246;ttingen</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2021</RefYear>
        <RefBookTitle>Handreichung zum Umgang mit Plagiaten f&#252;r Lehrende an der Sozialwissenschaftlichen Fakult&#228;t der Georg-August-Universit&#228;t G&#246;ttingen</RefBookTitle>
        <RefPage>1-2</RefPage>
        <RefTotal>Georg-August-Universit&#228;t G&#246;ttingen. Handreichung zum Umgang mit Plagiaten f&#252;r Lehrende an der Sozialwissenschaftlichen Fakult&#228;t der Georg-August-Universit&#228;t G&#246;ttingen. G&#246;ttingen: Georg-August-Universit&#228;t G&#246;ttingen; 2021. p.1-2.</RefTotal>
      </Reference>
      <Reference refNo="39">
        <RefAuthor>Hochschulforum Digitalisierung</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>ChatGPT im Hochschulkontext &#8211; eine kommentierte Linksammlung</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Hochschulforum Digitalisierung. ChatGPT im Hochschulkontext &#8211; eine kommentierte Linksammlung. Essen: Hochschulforum Digitalisierung; 2023. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;hochschulforumdigitalisierung.de&#47;chatgpt-im-hochschulkontext-eine-kommentierte-linksammlung&#47;</RefTotal>
        <RefLink>https:&#47;&#47;hochschulforumdigitalisierung.de&#47;chatgpt-im-hochschulkontext-eine-kommentierte-linksammlung&#47;</RefLink>
      </Reference>
      <Reference refNo="40">
        <RefAuthor>Moritz S</RefAuthor>
        <RefAuthor>Romeike B</RefAuthor>
        <RefAuthor>Stosch C</RefAuthor>
        <RefAuthor>Tolks D</RefAuthor>
        <RefTitle>Generative AI (gAI) in medical education: Chat-GPT DQG and co</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>GMS J Med Educ</RefJournal>
        <RefPage>Doc54</RefPage>
        <RefTotal>Moritz S, Romeike B, Stosch C, Tolks D. Generative AI (gAI) in medical education: Chat-GPT DQG and co. GMS J Med Educ. 2023;40(4):Doc54. DOI: 10.3205&#47;zma001636</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3205&#47;zma001636</RefLink>
      </Reference>
      <Reference refNo="41">
        <RefAuthor>Salden P</RefAuthor>
        <RefAuthor>Leschke J</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>Didaktische und rechtliche Persprektiven auf KI-gest&#252;tzes Schreiben in der Hochschulbildung</RefBookTitle>
        <RefPage>41</RefPage>
        <RefTotal>Salden P, Leschke J. Didaktische und rechtliche Persprektiven auf KI-gest&#252;tzes Schreiben in der Hochschulbildung. Bochum: RUB; 2023. p.41. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;hss-opus.ub.ruhr-uni-bochum.de&#47;opus4&#47;frontdoor&#47;deliver&#47;index&#47;docId&#47;9734&#47;file&#47;2023&#95;03&#95;06&#95;Didaktik&#95;Recht&#95;KI&#95;Hochschulbildung.pdf</RefTotal>
        <RefLink>https:&#47;&#47;hss-opus.ub.ruhr-uni-bochum.de&#47;opus4&#47;frontdoor&#47;deliver&#47;index&#47;docId&#47;9734&#47;file&#47;2023&#95;03&#95;06&#95;Didaktik&#95;Recht&#95;KI&#95;Hochschulbildung.pdf</RefLink>
      </Reference>
      <Reference refNo="42">
        <RefAuthor>Stabsstelle IT-Recht der bayerischen staatlichen Universit&#228;ten und Hochschulen</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2022</RefYear>
        <RefBookTitle>Pr&#252;fungsrechtliche Fragen zu ChatGPT</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Stabsstelle IT-Recht der bayerischen staatlichen Universit&#228;ten und Hochschulen. Pr&#252;fungsrechtliche Fragen zu ChatGPT. W&#252;rzburg: Universit&#228;t W&#252;rzburg; 2022. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.rz.uni-wuerzburg.de&#47;fileadmin&#47;42010000&#47;2023&#47;ChatGPT&#95;und&#95;Pruefungsrecht.pdf</RefTotal>
        <RefLink>https:&#47;&#47;www.rz.uni-wuerzburg.de&#47;fileadmin&#47;42010000&#47;2023&#47;ChatGPT&#95;und&#95;Pruefungsrecht.pdf</RefLink>
      </Reference>
      <Reference refNo="43">
        <RefAuthor>Simkin MG</RefAuthor>
        <RefAuthor>McLeod A</RefAuthor>
        <RefTitle>Why do college students cheat&#63;</RefTitle>
        <RefYear>2010</RefYear>
        <RefJournal>J Bus Ethics</RefJournal>
        <RefPage>441-453</RefPage>
        <RefTotal>Simkin MG, McLeod A. Why do college students cheat&#63; J Bus Ethics. 2010;94(3):441-453. DOI: 10.1007&#47;s10551-009-0275-x</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10551-009-0275-x</RefLink>
      </Reference>
      <Reference refNo="44">
        <RefAuthor>Radcke A</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>Der Einsatz von KI in Hochschulpr&#252;fungen und dessen pr&#252;fungsrechtlichen Auswirkungen</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Radcke A. Der Einsatz von KI in Hochschulpr&#252;fungen und dessen pr&#252;fungsrechtlichen Auswirkungen. Potsdam: Universit&#228;t Potsdam; 2023. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.uni-potsdam.de&#47;fileadmin&#47;projects&#47;zfq&#47;Lehre&#95;und&#95;Medien&#47;E-Assessment&#47;Auswirkung&#95;KI&#95;auf&#95;Pruefungen&#95;20230524.pdf</RefTotal>
        <RefLink>https:&#47;&#47;www.uni-potsdam.de&#47;fileadmin&#47;projects&#47;zfq&#47;Lehre&#95;und&#95;Medien&#47;E-Assessment&#47;Auswirkung&#95;KI&#95;auf&#95;Pruefungen&#95;20230524.pdf</RefLink>
      </Reference>
      <Reference refNo="45">
        <RefAuthor>Deutsche Forschungsgemeinschaft</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2019</RefYear>
        <RefBookTitle>Leitlinien zur Sicherung guter wissenschaftlicher Praxis</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Deutsche Forschungsgemeinschaft. Leitlinien zur Sicherung guter wissenschaftlicher Praxis. Bonn: DFG; 2019. DOI: 10.5281&#47;zenodo.3923601</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.5281&#47;zenodo.3923601</RefLink>
      </Reference>
      <Reference refNo="46">
        <RefAuthor>European Commission</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>Artificial Intelligence &#8211; Questions and Answers</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>European Commission. Artificial Intelligence &#8211; Questions and Answers. Brussels: European Commision; 2023. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;ec.europa.eu&#47;commission&#47;presscorner&#47;detail&#47;en&#47;qanda&#95;21&#95;1683</RefTotal>
        <RefLink>https:&#47;&#47;ec.europa.eu&#47;commission&#47;presscorner&#47;detail&#47;en&#47;qanda&#95;21&#95;1683</RefLink>
      </Reference>
      <Reference refNo="47">
        <RefAuthor>Europ&#228;isches Parlament</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>KI-Gesetz: erste Regulierung der k&#252;nstlichen Intelligenz</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Europ&#228;isches Parlament. KI-Gesetz: erste Regulierung der k&#252;nstlichen Intelligenz. Br&#252;ssel: Europ&#228;isches Parlament; 2023. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.europarl.europa.eu&#47;news&#47;de&#47;headlines&#47;society&#47;20230601STO93804&#47;ki-gesetz-erste-regulierung-der-kunstlichen-intelligenz</RefTotal>
        <RefLink>https:&#47;&#47;www.europarl.europa.eu&#47;news&#47;de&#47;headlines&#47;society&#47;20230601STO93804&#47;ki-gesetz-erste-regulierung-der-kunstlichen-intelligenz</RefLink>
      </Reference>
      <Reference refNo="48">
        <RefAuthor>Maximilian Borkowsky</RefAuthor>
        <RefTitle>Der EU AI Act: Was bedeutet er f&#252;r k&#252;nstliche Intelligenz in Unternehmen&#63;</RefTitle>
        <RefYear>2023</RefYear>
        <RefTotal>Maximilian Borkowsky. Der EU AI Act: Was bedeutet er f&#252;r k&#252;nstliche Intelligenz in Unternehmen&#63; 2023. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.melibo.de&#47;blog&#47;der-eu-ai-act-was-bedeutet-er-fur-kunstliche-intelligenz-in-unternehmen</RefTotal>
        <RefLink>https:&#47;&#47;www.melibo.de&#47;blog&#47;der-eu-ai-act-was-bedeutet-er-fur-kunstliche-intelligenz-in-unternehmen</RefLink>
      </Reference>
      <Reference refNo="49">
        <RefAuthor>Madiega TA</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2024</RefYear>
        <RefBookTitle>Artificial intelligence act</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Madiega TA. Artificial intelligence act. Brussels: Europ&#228;isches Parlemant; 2024. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.europarl.europa.eu&#47;thinktank&#47;en&#47;document&#47;EPRS&#95;BRI(2021)698792</RefTotal>
        <RefLink>https:&#47;&#47;www.europarl.europa.eu&#47;thinktank&#47;en&#47;document&#47;EPRS&#95;BRI(2021)698792</RefLink>
      </Reference>
      <Reference refNo="50">
        <RefAuthor>Fishman T</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2009</RefYear>
        <RefBookTitle>&#8220;We know it when we see it &#8221; is not good enough?: toward a standard definition of plagiarism that transcends theft , fraud , and copyright</RefBookTitle>
        <RefPage>28-30</RefPage>
        <RefTotal>Fishman T. &#8220;We know it when we see it &#8221; is not good enough?: toward a standard definition of plagiarism that transcends theft , fraud , and copyright. Wollongong (NSW, Aust): University of Wollongong; 2009. p.28-30.</RefTotal>
      </Reference>
      <Reference refNo="51">
        <RefAuthor>Khalil M</RefAuthor>
        <RefAuthor>Er E</RefAuthor>
        <RefTitle>Will ChatGPT get you caught&#63; Rethinking of Plagiarism Detection</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Khalil M, Er E. Will ChatGPT get you caught&#63; Rethinking of Plagiarism Detection. arXiv. 2023. DOI: </RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;</RefLink>
      </Reference>
      <Reference refNo="52">
        <RefAuthor>Gimpel H</RefAuthor>
        <RefAuthor>Ruiner C</RefAuthor>
        <RefAuthor>Schoch M</RefAuthor>
        <RefAuthor>Schoop M</RefAuthor>
        <RefAuthor>Hall K</RefAuthor>
        <RefAuthor>Eymann T</RefAuthor>
        <RefAuthor>R&#246;glinger M</RefAuthor>
        <RefAuthor>Vandirk S</RefAuthor>
        <RefAuthor>L&#228;mmermann L</RefAuthor>
        <RefAuthor>Urbach N</RefAuthor>
        <RefAuthor>M&#228;dche A</RefAuthor>
        <RefAuthor>Decker S</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>Unlocking the Power of Generative AI Models and Systems such as GPT-4 and ChatGPT for Higher Education</RefBookTitle>
        <RefPage>47</RefPage>
        <RefTotal>Gimpel H, Ruiner C, Schoch M, Schoop M, Hall K, Eymann T, R&#246;glinger M, Vandirk S, L&#228;mmermann L, Urbach N, M&#228;dche A, Decker S. Unlocking the Power of Generative AI Models and Systems such as GPT-4 and ChatGPT for Higher Education. Hohenheim: Universit&#228;t Hohenheim; 2023. p.47.</RefTotal>
      </Reference>
      <Reference refNo="53">
        <RefAuthor>Daniel J Sokolov</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>Large Language Models: US-Autoren verklagen OpenAI wegen Copyright-Verletzung</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Daniel J Sokolov. Large Language Models: US-Autoren verklagen OpenAI wegen Copyright-Verletzung. 2023. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.heise.de&#47;news&#47;Large-Language-Models-US-Autoren-verklagen-OpenAI-wegen-Copyright-Verletzung-9301736.html</RefTotal>
        <RefLink>https:&#47;&#47;www.heise.de&#47;news&#47;Large-Language-Models-US-Autoren-verklagen-OpenAI-wegen-Copyright-Verletzung-9301736.html</RefLink>
      </Reference>
      <Reference refNo="54">
        <RefAuthor>Susnjak T</RefAuthor>
        <RefTitle>ChatGPT: The End of Online Exam Integrity&#63;</RefTitle>
        <RefYear>2022</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Susnjak T. ChatGPT: The End of Online Exam Integrity&#63; arXiv. 2022. DOI: 10.48550&#47;arXiv.2212.09292</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2212.09292</RefLink>
      </Reference>
      <Reference refNo="55">
        <RefAuthor>Davis MH</RefAuthor>
        <RefAuthor>Karunathilake I</RefAuthor>
        <RefTitle>The place of the oral examination in today&#8217;s assessment systems</RefTitle>
        <RefYear>2005</RefYear>
        <RefJournal>Med Teach</RefJournal>
        <RefPage>294-297</RefPage>
        <RefTotal>Davis MH, Karunathilake I. The place of the oral examination in today&#8217;s assessment systems. Med Teach. 2005;27(4):294-297. DOI: 10.1080&#47;01421590500126437</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1080&#47;01421590500126437</RefLink>
      </Reference>
      <Reference refNo="56">
        <RefAuthor>Wass V</RefAuthor>
        <RefAuthor>Van Der Vleuten C</RefAuthor>
        <RefAuthor>Shatzer J</RefAuthor>
        <RefAuthor>Jones R</RefAuthor>
        <RefTitle>Assessment of clinical competence</RefTitle>
        <RefYear>2001</RefYear>
        <RefJournal>Lancet</RefJournal>
        <RefPage>945-949</RefPage>
        <RefTotal>Wass V, Van Der Vleuten C, Shatzer J, Jones R. Assessment of clinical competence. Lancet. 2001;357(9260):945-949. DOI: 10.1016&#47;S0140-6736(00)04221-5</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;S0140-6736(00)04221-5</RefLink>
      </Reference>
      <Reference refNo="57">
        <RefAuthor>Sadasivan VS</RefAuthor>
        <RefAuthor>Kumar A</RefAuthor>
        <RefAuthor>Balasubramanian S</RefAuthor>
        <RefAuthor>Wang W</RefAuthor>
        <RefAuthor>Feizi S</RefAuthor>
        <RefTitle>Can AI-Generated Text be Reliably Detected&#63;</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Sadasivan VS, Kumar A, Balasubramanian S, Wang W, Feizi S. Can AI-Generated Text be Reliably Detected&#63; arXiv. 2023. DOI: 10.48550&#47;arXiv.2303.11156</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2303.11156</RefLink>
      </Reference>
      <Reference refNo="58">
        <RefAuthor>Krishna K</RefAuthor>
        <RefAuthor>Song Y</RefAuthor>
        <RefAuthor>Karpinska M</RefAuthor>
        <RefAuthor>Wieting J</RefAuthor>
        <RefAuthor>Iyyer M</RefAuthor>
        <RefTitle>Paraphrasing evades detectors of AI-generated text, but retrieval is an effective defense</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Krishna K, Song Y, Karpinska M, Wieting J, Iyyer M. Paraphrasing evades detectors of AI-generated text, but retrieval is an effective defense. arXiv. 2023. DOI: 10.48550&#47;arXiv.2303.13408</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2303.13408</RefLink>
      </Reference>
      <Reference refNo="59">
        <RefAuthor>Wu J</RefAuthor>
        <RefAuthor>Yang S</RefAuthor>
        <RefAuthor>Zhan R</RefAuthor>
        <RefAuthor>Yuan Y</RefAuthor>
        <RefAuthor>Wong DF</RefAuthor>
        <RefAuthor>Chao LS</RefAuthor>
        <RefTitle>A Survey on LLM-Generated Text Detection: Necessity, Methods, and Future Directions</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>arXiv</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Wu J, Yang S, Zhan R, Yuan Y, Wong DF, Chao LS. A Survey on LLM-Generated Text Detection: Necessity, Methods, and Future Directions. arXiv. 2023. DOI: 10.48550&#47;arXiv.2310.14724</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.48550&#47;arXiv.2310.14724</RefLink>
      </Reference>
      <Reference refNo="60">
        <RefAuthor>Mo Y</RefAuthor>
        <RefAuthor>Qin H</RefAuthor>
        <RefAuthor>Dong Y</RefAuthor>
        <RefAuthor>Zhu Z</RefAuthor>
        <RefAuthor>Li Z</RefAuthor>
        <RefTitle>Large Language Model (LLM) AI Text Generation Detection based on Transformer Deep Learning Algorithm</RefTitle>
        <RefYear>2024</RefYear>
        <RefJournal>Int J Eng Manag Res</RefJournal>
        <RefPage>154-159</RefPage>
        <RefTotal>Mo Y, Qin H, Dong Y, Zhu Z, Li Z. Large Language Model (LLM) AI Text Generation Detection based on Transformer Deep Learning Algorithm. Int J Eng Manag Res. 2024;14(2):154-159. DOI: 10.5281&#47;zenodo.11124440</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.5281&#47;zenodo.11124440</RefLink>
      </Reference>
      <Reference refNo="61">
        <RefAuthor>Nguyen TT</RefAuthor>
        <RefAuthor>Hatua A</RefAuthor>
        <RefAuthor>Sung AH</RefAuthor>
        <RefTitle>How to Detect AI-Generated Texts&#63;</RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>2023 IEEE 14th Annual Ubiquitous Computing, Electronics &#38; Mobile Communication Conference (UEMCON)</RefBookTitle>
        <RefPage>0464&#8211;0471</RefPage>
        <RefTotal>Nguyen TT, Hatua A, Sung AH. How to Detect AI-Generated Texts&#63; In: 2023 IEEE 14th Annual Ubiquitous Computing, Electronics &#38; Mobile Communication Conference (UEMCON). IEEE; 2023. p.0464&#8211;0471. DOI: 10.1109&#47;UEMCON59035.2023.10316132</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1109&#47;UEMCON59035.2023.10316132</RefLink>
      </Reference>
      <Reference refNo="62">
        <RefAuthor>Zenth&#246;fer J</RefAuthor>
        <RefTitle>Erstes Urteil zu ChatGPT an Hochschulen</RefTitle>
        <RefYear>2024</RefYear>
        <RefJournal>Frankfurter Allgemeine</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Zenth&#246;fer J. Erstes Urteil zu ChatGPT an Hochschulen. Frankfurter Allgemeine. 2024. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.faz.net&#47;aktuell&#47;karriere-hochschule&#47;erstes-urteil-zu-chatgpt-an-hochschulen-student-benutzte-ki-fuer-bewerbung-19564795.html</RefTotal>
        <RefLink>https:&#47;&#47;www.faz.net&#47;aktuell&#47;karriere-hochschule&#47;erstes-urteil-zu-chatgpt-an-hochschulen-student-benutzte-ki-fuer-bewerbung-19564795.html</RefLink>
      </Reference>
      <Reference refNo="63">
        <RefAuthor>Agarwal M</RefAuthor>
        <RefAuthor>Sharma P</RefAuthor>
        <RefAuthor>Goswami A</RefAuthor>
        <RefTitle>Analysing the Applicability of ChatGPT, Bard, and Bing to Generate Reasoning-Based Multiple-Choice Questions in Medical Physiology</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Cureus</RefJournal>
        <RefPage>e40977</RefPage>
        <RefTotal>Agarwal M, Sharma P, Goswami A. Analysing the Applicability of ChatGPT, Bard, and Bing to Generate Reasoning-Based Multiple-Choice Questions in Medical Physiology. Cureus. 2023;15(6):e40977. DOI: 10.7759&#47;cureus.40977</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.7759&#47;cureus.40977</RefLink>
      </Reference>
      <Reference refNo="64">
        <RefAuthor>Cheung BH</RefAuthor>
        <RefAuthor>Lau GK</RefAuthor>
        <RefAuthor>Wong GT</RefAuthor>
        <RefAuthor>Lee EY</RefAuthor>
        <RefAuthor>Kulkarni D</RefAuthor>
        <RefAuthor>Seow CS</RefAuthor>
        <RefAuthor>Wong R</RefAuthor>
        <RefAuthor>Co MT</RefAuthor>
        <RefTitle>ChatGPT versus human in generating medical graduate exam multiple choice questions&#8212;A multinational prospective study (Hong Kong S. A.R., Singapore, Ireland, and the United Kingdom)</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>PLoS One</RefJournal>
        <RefPage>e0290691</RefPage>
        <RefTotal>Cheung BH, Lau GK, Wong GT, Lee EY, Kulkarni D, Seow CS, Wong R, Co MT. ChatGPT versus human in generating medical graduate exam multiple choice questions&#8212;A multinational prospective study (Hong Kong S. A.R., Singapore, Ireland, and the United Kingdom). PLoS One. 2023;18(8):e0290691. DOI: 10.1371&#47;journal.pone.0290691</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1371&#47;journal.pone.0290691</RefLink>
      </Reference>
      <Reference refNo="65">
        <RefAuthor>Klang E</RefAuthor>
        <RefAuthor>Portugez S</RefAuthor>
        <RefAuthor>Gross R</RefAuthor>
        <RefAuthor>Kassif Lerner R</RefAuthor>
        <RefAuthor>Brenner A</RefAuthor>
        <RefAuthor>Gilboa M</RefAuthor>
        <RefAuthor>Ortal T</RefAuthor>
        <RefAuthor>Ron S</RefAuthor>
        <RefAuthor>Robinzon V</RefAuthor>
        <RefAuthor>Meiri H</RefAuthor>
        <RefAuthor>Segal G</RefAuthor>
        <RefTitle>Advantages and pitfalls in utilizing artificial intelligence for crafting medical examinations: a medical education pilot study with GPT-4</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>BMC Med Educ</RefJournal>
        <RefPage>772</RefPage>
        <RefTotal>Klang E, Portugez S, Gross R, Kassif Lerner R, Brenner A, Gilboa M, Ortal T, Ron S, Robinzon V, Meiri H, Segal G. Advantages and pitfalls in utilizing artificial intelligence for crafting medical examinations: a medical education pilot study with GPT-4. BMC Med Educ. 2023;23(1):772. DOI: 10.1186&#47;s12909-023-04752-w</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1186&#47;s12909-023-04752-w</RefLink>
      </Reference>
      <Reference refNo="66">
        <RefAuthor>Laupichler MC</RefAuthor>
        <RefAuthor>Rother JF</RefAuthor>
        <RefAuthor>Grunwald Kadow IC</RefAuthor>
        <RefAuthor>Ahmadi S</RefAuthor>
        <RefAuthor>Raupach T</RefAuthor>
        <RefTitle>Large Language Models in Medical Education: Comparing ChatGPT- to Human-Generated Exam Questions</RefTitle>
        <RefYear>2024</RefYear>
        <RefJournal>Acad Med</RefJournal>
        <RefPage>508-512</RefPage>
        <RefTotal>Laupichler MC, Rother JF, Grunwald Kadow IC, Ahmadi S, Raupach T. Large Language Models in Medical Education: Comparing ChatGPT- to Human-Generated Exam Questions. Acad Med. 2024;99(5):508-512. DOI: 10.1097&#47;ACM.0000000000005626</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1097&#47;ACM.0000000000005626</RefLink>
      </Reference>
      <Reference refNo="67">
        <RefAuthor>Heston TF</RefAuthor>
        <RefAuthor>Khun C</RefAuthor>
        <RefTitle>Prompt Engineering in Medical Education</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Int Med Educ</RefJournal>
        <RefPage>198-205</RefPage>
        <RefTotal>Heston TF, Khun C. Prompt Engineering in Medical Education. Int Med Educ. 2023;2(3):198-205. DOI: 10.3390&#47;ime2030019</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3390&#47;ime2030019</RefLink>
      </Reference>
      <Reference refNo="68">
        <RefAuthor>Crawford J</RefAuthor>
        <RefAuthor>Cowling M</RefAuthor>
        <RefAuthor>Allen KA</RefAuthor>
        <RefTitle>Leadership is needed for ethical ChatGPT: Character, assessment, and learning using artificial intelligence (AI)</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>J Univ Teach Learn Pract</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Crawford J, Cowling M, Allen KA. Leadership is needed for ethical ChatGPT: Character, assessment, and learning using artificial intelligence (AI). J Univ Teach Learn Pract. 2023;20(3). DOI: 10.53761&#47;1.20.3.02</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.53761&#47;1.20.3.02</RefLink>
      </Reference>
      <Reference refNo="69">
        <RefAuthor>Goodman RS</RefAuthor>
        <RefAuthor>Patrinely JR</RefAuthor>
        <RefAuthor>Osterman T</RefAuthor>
        <RefAuthor>Wheless L</RefAuthor>
        <RefAuthor>Johnson DB</RefAuthor>
        <RefTitle>On the cusp: Considering the impact of artificial intelligence language models in healthcare</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Med</RefJournal>
        <RefPage>139-140</RefPage>
        <RefTotal>Goodman RS, Patrinely JR, Osterman T, Wheless L, Johnson DB. On the cusp: Considering the impact of artificial intelligence language models in healthcare. Med. 2023;4(3):139-140. DOI: 10.1016&#47;j.medj.2023.02.008</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;j.medj.2023.02.008</RefLink>
      </Reference>
      <Reference refNo="70">
        <RefAuthor>Anderman EM</RefAuthor>
        <RefAuthor>Koenka AC</RefAuthor>
        <RefTitle>The Relation Between Academic Motivation and Cheating</RefTitle>
        <RefYear>2017</RefYear>
        <RefJournal>Theory Pract</RefJournal>
        <RefPage>95-102</RefPage>
        <RefTotal>Anderman EM, Koenka AC. The Relation Between Academic Motivation and Cheating. Theory Pract. 2017;56(2):95-102. DOI: 10.1080&#47;00405841.2017.1308172</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1080&#47;00405841.2017.1308172</RefLink>
      </Reference>
      <Reference refNo="71">
        <RefAuthor>Hsiao YP</RefAuthor>
        <RefAuthor>Klijn N</RefAuthor>
        <RefAuthor>Chiu MS</RefAuthor>
        <RefTitle>Developing a framework to re-design writing assignment assessment for the era of Large Language Models</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Learn Res Pract</RefJournal>
        <RefPage>148-158</RefPage>
        <RefTotal>Hsiao YP, Klijn N, Chiu MS. Developing a framework to re-design writing assignment assessment for the era of Large Language Models. Learn Res Pract. 2023;9(2):148-158. DOI: 10.1080&#47;23735082.2023.2257234</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1080&#47;23735082.2023.2257234</RefLink>
      </Reference>
      <Reference refNo="72">
        <RefAuthor>Gonsalves C</RefAuthor>
        <RefTitle>ChatGPT: what promise remains for multiple choice assessment&#63;</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>J Learn Dev High Educ</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Gonsalves C. On ChatGPT: what promise remains for multiple choice assessment&#63; J Learn Dev High Educ. 2023;27. DOI: 10.47408&#47;jldhe.vi27.1009</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.47408&#47;jldhe.vi27.1009</RefLink>
      </Reference>
      <Reference refNo="73">
        <RefAuthor>Schuwirth LW</RefAuthor>
        <RefAuthor>Van Der Vleuten CP</RefAuthor>
        <RefTitle>Programmatic assessment: From assessment of learning to assessment for learning</RefTitle>
        <RefYear>2011</RefYear>
        <RefJournal>Med Teach</RefJournal>
        <RefPage>478-485</RefPage>
        <RefTotal>Schuwirth LW, Van Der Vleuten CP. Programmatic assessment: From assessment of learning to assessment for learning. Med Teach. 2011;33(6):478-485. DOI: 10.3109&#47;0142159X.2011.565828</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3109&#47;0142159X.2011.565828</RefLink>
      </Reference>
      <Reference refNo="74">
        <RefAuthor>Boscardin CK</RefAuthor>
        <RefAuthor>Gin B</RefAuthor>
        <RefAuthor>Golde PB</RefAuthor>
        <RefAuthor>Hauer KE</RefAuthor>
        <RefTitle>ChatGPT and Generative Artificial Intelligence for Medical Education: Potential Impact and Opportunity</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Acad Med</RefJournal>
        <RefPage>22-27</RefPage>
        <RefTotal>Boscardin CK, Gin B, Golde PB, Hauer KE. ChatGPT and Generative Artificial Intelligence for Medical Education: Potential Impact and Opportunity. Acad Med. 2023;99(1):22-27. DOI: 10.1097&#47;acm.0000000000005439</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1097&#47;acm.0000000000005439</RefLink>
      </Reference>
      <Reference refNo="75">
        <RefAuthor>Busse B</RefAuthor>
        <RefAuthor>Kleiber I</RefAuthor>
        <RefAuthor>Eickhoff FC</RefAuthor>
        <RefAuthor>Andree K</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2023</RefYear>
        <RefBookTitle>Hinweise zu textgenerierenden KI- Systemen im Kontext von Lehre und Lernen</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Busse B, Kleiber I, Eickhoff FC, Andree K. Hinweise zu textgenerierenden KI- Systemen im Kontext von Lehre und Lernen. K&#246;ln: Universit&#228;t zu K&#246;ln; 2023. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;uni-koeln.sciebo.de&#47;s&#47;7uwYRX5a92eznVl&#35;pdfviewer DOI: 10.13140&#47;RG.2.2.35392.61449&#47;1</RefTotal>
        <RefLink>https:&#47;&#47;uni-koeln.sciebo.de&#47;s&#47;7uwYRX5a92eznVl&#35;pdfviewer DOI: 10.13140&#47;RG.2.2.35392.61449&#47;1</RefLink>
      </Reference>
      <Reference refNo="76">
        <RefAuthor>Cardona MA</RefAuthor>
        <RefAuthor>Rodr&#237;guez RJ</RefAuthor>
        <RefAuthor>Ishmael K</RefAuthor>
        <RefTitle>Artificial Intelligence and Future of Teaching and Learning: Insights and Recommendations</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>US Dep Educ Off Educ Technol</RefJournal>
        <RefPage>1-71</RefPage>
        <RefTotal>Cardona MA, Rodr&#237;guez RJ, Ishmael K. Artificial Intelligence and Future of Teaching and Learning: Insights and Recommendations. US Dep Educ Off Educ Technol. 2023;(1):1-71. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www2.ed.gov&#47;documents&#47;ai-report&#47;ai-report.pdf</RefTotal>
        <RefLink>https:&#47;&#47;www2.ed.gov&#47;documents&#47;ai-report&#47;ai-report.pdf</RefLink>
      </Reference>
    </References>
    <Media>
      <Tables>
        <NoOfTables>0</NoOfTables>
      </Tables>
      <Figures>
        <NoOfPictures>0</NoOfPictures>
      </Figures>
      <InlineFigures>
        <NoOfPictures>0</NoOfPictures>
      </InlineFigures>
      <Attachments>
        <Attachment>
          <MediaNo>1</MediaNo>
          <MediaID filename="zma001702.a1en.pdf" language="en" mimeType="application/pdf" origFilename="Attachment&#95;1.pdf" size="134272" url="">1en</MediaID>
          <MediaID filename="zma001702.a1de.pdf" language="de" mimeType="application/pdf" origFilename="Anhang&#95;1.pdf" size="136208" url="">1de</MediaID>
          <AttachmentTitle language="en">Model guidelines, link collections</AttachmentTitle>
          <AttachmentTitle language="de">Musterrichtlinien, Linksammlungen</AttachmentTitle>
        </Attachment>
        <NoOfAttachments>1</NoOfAttachments>
      </Attachments>
    </Media>
  </OrigData>
</GmsArticle>