<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<!DOCTYPE GmsArticle SYSTEM "http://www.egms.de/dtd/2.0.34/GmsArticle.dtd">
<GmsArticle xmlns:xlink="http://www.w3.org/1999/xlink">
  <MetaData>
    <Identifier>zma001642</Identifier>
    <IdentifierDoi>10.3205/zma001642</IdentifierDoi>
    <IdentifierUrn>urn:nbn:de:0183-zma0016429</IdentifierUrn>
    <ArticleType language="en">article</ArticleType>
    <ArticleType language="de">Artikel</ArticleType>
    <TitleGroup>
      <Title language="en">Webcam-based eye-tracking to measure visual expertise of medical students during online histology training</Title>
      <TitleTranslated language="de">Webcam-basiertes Eye-Tracking zur Messung von visueller Expertise bei Medizinstudierenden in einem Online-Histologiekurs</TitleTranslated>
    </TitleGroup>
    <CreatorList>
      <Creator>
        <PersonNames>
          <Lastname>Darici</Lastname>
          <LastnameHeading>Darici</LastnameHeading>
          <Firstname>Dogus</Firstname>
          <Initials>D</Initials>
          <AcademicTitle>Dr.</AcademicTitle>
        </PersonNames>
        <Address language="en">Westf&#228;lische-Wilhelms-University, Institute of Anatomy and Neurobiology, Vesaliusweg 2-4, D-48149 M&#252;nster, Germany<Affiliation>Westf&#228;lische-Wilhelms-University, Institute of Anatomy and Neurobiology, M&#252;nster, Germany</Affiliation></Address>
        <Address language="de">Westf&#228;lische-Wilhelms-Universit&#228;t, Institut f&#252;r Anatomie and Neurobiologie, Vesaliusweg 2-4, 48149 M&#252;nster, Deutschland<Affiliation>Westf&#228;lische-Wilhelms-Universit&#228;t, Institut f&#252;r Anatomie and Neurobiologie, M&#252;nster, Deutschland</Affiliation></Address>
        <Email>darici&#64;uni-muenster.de</Email>
        <Creatorrole corresponding="yes" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Reissner</Lastname>
          <LastnameHeading>Reissner</LastnameHeading>
          <Firstname>Carsten</Firstname>
          <Initials>C</Initials>
        </PersonNames>
        <Address language="en">
          <Affiliation>Westf&#228;lische-Wilhelms-University, Institute of Anatomy and Neurobiology, M&#252;nster, Germany</Affiliation>
        </Address>
        <Address language="de">
          <Affiliation>Westf&#228;lische-Wilhelms-Universit&#228;t, Institut f&#252;r Anatomie and Neurobiologie, M&#252;nster, Deutschland</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Missler</Lastname>
          <LastnameHeading>Missler</LastnameHeading>
          <Firstname>Markus</Firstname>
          <Initials>M</Initials>
          <AcademicTitle>Prof. Dr.</AcademicTitle>
        </PersonNames>
        <Address language="en">Westf&#228;lische-Wilhelms-University, Institute of Anatomy and Neurobiology, Vesaliusweg 2-4, D-48149 M&#252;nster, Germany<Affiliation>Westf&#228;lische-Wilhelms-University, Institute of Anatomy and Neurobiology, M&#252;nster, Germany</Affiliation></Address>
        <Address language="de">Westf&#228;lische-Wilhelms-Universit&#228;t, Institut f&#252;r Anatomie and Neurobiologie, Vesaliusweg 2-4, 48149 M&#252;nster, Deutschland<Affiliation>Westf&#228;lische-Wilhelms-Universit&#228;t, Institut f&#252;r Anatomie and Neurobiologie, M&#252;nster, Deutschland</Affiliation></Address>
        <Email>markus.missler&#64;uni-muenster.de</Email>
        <Creatorrole corresponding="yes" presenting="no">author</Creatorrole>
      </Creator>
    </CreatorList>
    <PublisherList>
      <Publisher>
        <Corporation>
          <Corporatename>German Medical Science GMS Publishing House</Corporatename>
        </Corporation>
        <Address>D&#252;sseldorf</Address>
      </Publisher>
    </PublisherList>
    <SubjectGroup>
      <SubjectheadingDDB>610</SubjectheadingDDB>
      <Keyword language="en">digital histology</Keyword>
      <Keyword language="en">eye-tracking study</Keyword>
      <Keyword language="en">histology training</Keyword>
      <Keyword language="en">online education</Keyword>
      <Keyword language="en">visual expertise</Keyword>
      <Keyword language="en">visual expertise development</Keyword>
      <Keyword language="en">webcam eye-tracking</Keyword>
      <Keyword language="en">webcam eye-tracking methodology</Keyword>
      <Keyword language="de">digitale Histologie</Keyword>
      <Keyword language="de">Eye-Tracking-Studie</Keyword>
      <Keyword language="de">Histologietraining</Keyword>
      <Keyword language="de">Online-Bildung</Keyword>
      <Keyword language="de">visuelle Expertise</Keyword>
      <Keyword language="de">Entwicklung visueller Expertise</Keyword>
      <Keyword language="de">Webcam-Eye-Tracking</Keyword>
      <Keyword language="de">Webcam-Eye-Tracking-Methodik</Keyword>
      <SectionHeading language="en">expertise development</SectionHeading>
      <SectionHeading language="de">Expertiseentwicklung</SectionHeading>
    </SubjectGroup>
    <DateReceived>20220930</DateReceived>
    <DateRevised>20230606</DateRevised>
    <DateAccepted>20230707</DateAccepted>
    <DatePublishedList>
      
    <DatePublished>20230915</DatePublished></DatePublishedList>
    <Language>engl</Language>
    <LanguageTranslation>germ</LanguageTranslation>
    <License license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
      <AltText language="en">This is an Open Access article distributed under the terms of the Creative Commons Attribution 4.0 License.</AltText>
      <AltText language="de">Dieser Artikel ist ein Open-Access-Artikel und steht unter den Lizenzbedingungen der Creative Commons Attribution 4.0 License (Namensnennung).</AltText>
    </License>
    <SourceGroup>
      <Journal>
        <ISSN>2366-5017</ISSN>
        <Volume>40</Volume>
        <Issue>5</Issue>
        <JournalTitle>GMS Journal for Medical Education</JournalTitle>
        <JournalTitleAbbr>GMS J Med Educ</JournalTitleAbbr>
      </Journal>
    </SourceGroup>
    <ArticleNo>60</ArticleNo>
    <Fundings>
      <Funding>Land Nordrhein-Westfalen</Funding>
    </Fundings>
  </MetaData>
  <OrigData>
    <Abstract language="de" linked="yes"><Pgraph><Mark1>Ziel: </Mark1>Visuelle Expertise spielt eine wichtige Rolle bei der Prozessierung von Bildern, wie sie h&#228;ufig in der Radiologie oder Histologie vorkommen. Studien zeigen, dass sich mit zunehmender visueller Expertise die Augenbewegungen der Untersuchenden ver&#228;ndern. Die Entwicklungen im Webcam-Eye-Tracking erm&#246;glichen seit einiger Zeit den kosteng&#252;nstigen und fl&#228;chendeckenden Einsatz dieser Methode. Die vorliegende Studie untersuchte die Qualit&#228;t dieser Technologie in einem Histologie-Kurs im Sommersemester 2021.</Pgraph><Pgraph><Mark1>Methoden:</Mark1> An zwei Zeitpunkten wurden Medizinstudierende im dritten Semester gebeten, eine Reihe histologischer Pr&#228;parate zu beurteilen. W&#228;hrenddessen wurden ihre Augenbewegungen aufgezeichnet und gemeinsam mit der Performanz, sowie behavioralen Parameter mittels Varianzanalysen und multipler Regressionsmodelle analysiert.</Pgraph><Pgraph><Mark1>Ergebnisse: </Mark1>Die Ergebnisse zeigten, dass Webcam-Eye-Tracking qualitativ hochwertige Daten liefern kann (mittlere Genauigkeit&#61;115.7 px&#177;31.1). Au&#223;erdem spiegelten Augenbewegungen die F&#228;higkeit der Teilnehmenden wider, relevante Bildbereiche zu finden (Fixationsanzahl relevanter Bereiche&#61;6.96&#177;1.56 vs. irrelevanter Bereiche &#61; 4.50&#177;1.25). Schlie&#223;lich konnten die Augenbewegungen der Teilnehmenden deren Leistung vorhersagen (R<Superscript>2</Superscript><Subscript>adj</Subscript>&#61;0.39, p&#60;0.001).</Pgraph><Pgraph><Mark1>Diskussion:</Mark1> Diese Studie unterst&#252;tzt den Einsatz von Webcam-Eye-Tracking Technologie zur Erfassung von visueller Expertise. Da auch die Akzeptanz hoch war, werden m&#246;gliche Implementierungsoptionen diskutiert.</Pgraph></Abstract>
    <Abstract language="en" linked="yes"><Pgraph><Mark1>Objectives:</Mark1> Visual expertise is essential for image-based tasks that rely on visual cues, such as in radiology or histology. Studies suggest that eye movements are related to visual expertise and can be measured by near-infrared eye-tracking. With the popularity of device-embedded webcam eye-tracking technology, cost-effective use in educational contexts has recently become amenable. This study investigated the feasibility of such methodology in a curricular online-only histology course during the 2021 summer term.</Pgraph><Pgraph><Mark1>Methods: </Mark1>At two timepoints (t1 and t2), third-semester medical students were asked to diagnose a series of histological slides while their eye movements were recorded. Students&#8217; eye metrics, performance and behavioral measures were analyzed using variance analyses and multiple regression models.</Pgraph><Pgraph><Mark1>Results: </Mark1>First, webcam-eye tracking provided eye movement data with satisfactory quality (<Mark2>mean accuracy</Mark2>&#61;115.7 px&#177;31.1). Second, the eye movement metrics reflected the students&#8217; proficiency in finding relevant image sections (<Mark2>fixation count on relevant areas</Mark2>&#61;6.96&#177;1.56 vs. irrelevant areas&#61;4.50&#177;1.25). Third, students&#8217; eye movement metrics successfully predicted their performance (R<Superscript>2</Superscript><Subscript>adj</Subscript>&#61;0.39, p&#60;0.001).</Pgraph><Pgraph><Mark1>Conclusion:</Mark1> This study supports the use of webcam-eye-tracking expanding the range of educational tools available in the (digital) classroom. As the students&#8217; interest in using the webcam eye-tracking was high, possible areas of implementation will be discussed.</Pgraph></Abstract>
    <TextBlock language="en" linked="yes" name="Introduction">
      <MainHeadline>Introduction</MainHeadline><Pgraph>To date, several studies have investigated the eye movement patterns of expert and novice diagnosticians <TextLink reference="4"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="23"></TextLink>, <TextLink reference="26"></TextLink>, <TextLink reference="27"></TextLink>, <TextLink reference="29"></TextLink>, <TextLink reference="30"></TextLink>, <TextLink reference="33"></TextLink>, <TextLink reference="35"></TextLink>, <TextLink reference="39"></TextLink>, <TextLink reference="49"></TextLink>. Differences in eye movement have been associated with levels of medical expertise and related to diagnostic accuracy <TextLink reference="12"></TextLink>, <TextLink reference="39"></TextLink>. For example, expert diagnosticians in histopathology direct their focus to relevant image sections more frequently, longer, and faster than novices <TextLink reference="4"></TextLink>, <TextLink reference="35"></TextLink>. These differences in visual behavior have been identified in a variety of domains, ranging from chess <TextLink reference="18"></TextLink> to pilot training <TextLink reference="62"></TextLink>, as well as medical applications such as radiography <TextLink reference="14"></TextLink>, <TextLink reference="33"></TextLink>, <TextLink reference="36"></TextLink>, electrocardiogram interpretation <TextLink reference="53"></TextLink>, diagnostic decision-making <TextLink reference="52"></TextLink> and histopathology <TextLink reference="4"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="26"></TextLink>, <TextLink reference="35"></TextLink>. Most of these studies have been carried out using modern near-infrared eye-tracking devices, which precisely record eye movements and make them available for an in-depth examination.</Pgraph><Pgraph>Although this methodology seems to have the potential in measuring visual expertise, its widespread use in medical education has been sparse due to several implementation hurdles <TextLink reference="16"></TextLink>, <TextLink reference="32"></TextLink>. First, modern eye trackers are expensive, i. e., often exceeding &#36;10,000 per device. Second, the application of such technology is very personnel intensive, as it is necessary to have a trained scientist present during both the calibration and the study procedure. Third, the analysis of the data requires special training since large amounts of data are collected and more advanced statistical methods are applied. As a result, such trackers have mostly been used in specialized laboratories in a controlled environment. To overcome these limitations, computer scientists have recently sought to develop new solutions. The most promising approach has been conducted by Papoutsaki and colleagues. They developed an open-source JavaScript code that captures eye movements using web cameras built into everyday devices such as laptops, tablets, and cell phones <TextLink reference="44"></TextLink>, <TextLink reference="45"></TextLink>. Since then, this technology has found its way into user-friendly online platforms with graphical user interfaces. In addition, execution and calibration were automated. This approach combines numerous advantages at the same time; i.e., it is less expensive, widely available, and easy to use. It enables participants to engage in online studies in a more naturalistic home environment at convenient times, hence increasing the likelihood of successful data collection. However, especially eye-tracking based investigations are vulnerable when unsupervised, because variations in participants&#8217; behavior greatly affects the data quality <TextLink reference="25"></TextLink>.</Pgraph><Pgraph>The present study attempts to address these limitations by examining the use of webcam eye trackers in a curricular histology course. During the online only course &#8211; as necessitated by the COVID-19 pandemic &#8211; a large cohort of medical students was assessed. Conceptually, this study refers to visual expertise, a construct that has a long history in the eye-tracking literature <TextLink reference="24"></TextLink>, <TextLink reference="46"></TextLink> and has been studied in the field of histopathology training <TextLink reference="4"></TextLink>, <TextLink reference="26"></TextLink>. Visual expertise provides a suitable interpretive framework for the collected data, allows the metrics to be placed in a theoretical context, and turns abstract concepts into measurable observations.</Pgraph><SubHeadline>Visual expertise in histopathology</SubHeadline><Pgraph>Visual expertise can be defined as the complex interplay between perceptual and cognitive processes that evolves by training and leads to higher accuracy in image search, recognition, and decision-making <TextLink reference="15"></TextLink>. Originating in chess research, it is now assumed to be one of the main learning goals in courses that rely on visual cueing, such as histopathology, gross anatomy, and radiology <TextLink reference="47"></TextLink>, <TextLink reference="59"></TextLink>. Capturing eye movements is a promising method for measuring visual expertise, as it is considered to occur involuntarily and allows for the in-depth evaluation of visual pattern recognition competencies <TextLink reference="32"></TextLink>. Thus, the eye-tracking methodology enables us to gain a better understanding of the mechanisms underlying visual expertise development. This approach is supported by the eye-mind hypothesis, which postulates a direct connection between eye movements and &#8220;what the mind is engaged with&#8221; <TextLink reference="1"></TextLink>, <TextLink reference="28"></TextLink>.</Pgraph><Pgraph>Existing literature on visual expertise in histopathology studied the eye movement behavior of expert pathologists <TextLink reference="6"></TextLink>, <TextLink reference="26"></TextLink>. Usually, these measurements were compared with those of novices in an expert-novice paradigm. It is assumed that novices develop visual expertise when their visual behaviors reach the level of those of experts. These processes are assumed to occur unconsciously, thereby reflecting procedural pattern-recognition competencies and tacit knowledge <TextLink reference="8"></TextLink>, <TextLink reference="31"></TextLink>, <TextLink reference="32"></TextLink>, <TextLink reference="34"></TextLink>.</Pgraph><SubHeadline>Theoretical concepts of visual expertise</SubHeadline><Pgraph>The most influential and empirically supported theories that try to explain visual expertise are holistic processing theory and the information reduction model <TextLink reference="19"></TextLink>, <TextLink reference="36"></TextLink>, <TextLink reference="51"></TextLink>. Holistic processing postulates that visual experts show a more integrated image perception, which allows them to rapidly focus on diagnostically relevant areas of interest (<Mark2>dAOI</Mark2>) on the slide. The time it takes to direct the gaze on dAOIs (<Mark2>time to first fixate dAOIs</Mark2>) has thus been associated with a higher level of visual expertise <TextLink reference="51"></TextLink>. This visual behavior is neurobiologically facilitated through an increased parafoveal vision <TextLink reference="51"></TextLink>, which implies that expert diagnosticians capture a wider field of view of information when viewing images. This enables them to discover important areas earlier and move their gaze on them more quickly. It is therefore to be expected that the time it takes to solve a task (<Mark2>view time</Mark2>) will likewise shorten with an increase in expertise <TextLink reference="4"></TextLink>, <TextLink reference="6"></TextLink>. Another prominent theory is the information-reduction model, which closely relates to the idea of selective processing <TextLink reference="19"></TextLink>. Briefly, this theory assumes that expert diagnosticians &#8211; in order to save mental resources &#8211; neglect diagnostically irrelevant information, while shifting their focus to dAOIs. In contrast, novice learners fail to detect dAOIs but move their gaze toward visually salient yet diagnostically redundant areas (<Mark2>vAOI</Mark2>) <TextLink reference="6"></TextLink>. Thus, according to the information-reduction model, successful visual expertise development can be operationalized by more frequent and longer <Mark2>fixations of dAOIs</Mark2>, as well as less frequent and shorter <Mark2>fixations of vAOIs</Mark2>.</Pgraph><Pgraph>Many findings related to the differences between experts and novices can usefully be explained by an overlap of the abovementioned theories, in which eye-tracking enables the measurability of important propositions of these theories.</Pgraph><SubHeadline>Research questions</SubHeadline><SubHeadline2>Research question 1: How accurately can webcam eye-tracking detect eye movements&#63; </SubHeadline2><Pgraph>A suitable methodology would translate into good <Mark2>accuracy, precision</Mark2> measurements, high <Mark2>data integrity</Mark2> (&#61;little data loss), and acceptable <Mark2>sampling rates</Mark2> <TextLink reference="24"></TextLink>, <TextLink reference="25"></TextLink>. </Pgraph><SubHeadline2>Research question 2: How valid is webcam eye-tracking in regard to capturing changes in visual expertise&#63; </SubHeadline2><Pgraph>We expected that students would develop visual expertise in the histology course. According to holistic processing theory and information reduction model, this development would show up in increased <Mark2>test scores</Mark2> and reduced view times but also in changes in eye movements, such as a reduced<Mark2> time to first fixation of dAOI</Mark2>, as well as higher <Mark2>fixation counts on dAOI</Mark2>. We hypothesized an opposite trend with visually salient but task-redundant regions of <Mark2>vAOIs</Mark2>.</Pgraph><SubHeadline2>Research question 3: How reliably can webcam eye-tracking distinguish between low and high performances&#63; </SubHeadline2><Pgraph>As eye movements are a predictor of visual expertise <TextLink reference="4"></TextLink>, interindividual differences should be predictable based on eye movement. Thus, statistical models should predict the students&#8217; test scores based on their eye movement data.</Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Einleitung">
      <MainHeadline>Einleitung</MainHeadline><Pgraph>In zahlreichen Studien wurden die Augenbewegungsmuster erfahrener und unerfahrener Diagnostiker miteinander verglichen <TextLink reference="4"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="23"></TextLink>, <TextLink reference="26"></TextLink>, <TextLink reference="27"></TextLink>, <TextLink reference="29"></TextLink>, <TextLink reference="30"></TextLink>, <TextLink reference="33"></TextLink>, <TextLink reference="35"></TextLink>, <TextLink reference="39"></TextLink>, <TextLink reference="49"></TextLink>. Die Ergebnisse zeigen eindr&#252;cklich, dass Unterschiede in den Augenbewegungen mit dem Grad der Expertise in Verbindung stehen und die diagnostische Genauigkeit widerspiegeln k&#246;nnen <TextLink reference="12"></TextLink>, <TextLink reference="39"></TextLink>. Zum Beispiel richten erfahrene Histopathologen ihren Blick h&#228;ufiger, l&#228;nger und schneller auf relevante Bildbereiche als Novizen <TextLink reference="4"></TextLink>, <TextLink reference="35"></TextLink>. Solche Unterschiede im visuellen Verhalten wurden auch in anderen Bereichen festgestellt, wie dem Schachspiel <TextLink reference="18"></TextLink>, der Pilotenausbildung <TextLink reference="62"></TextLink>, in medizinischen Bereichen wie Radiologie <TextLink reference="14"></TextLink>, <TextLink reference="33"></TextLink>, <TextLink reference="36"></TextLink>, EKG-Interpretation <TextLink reference="53"></TextLink>, diagnostischer Entscheidungsfindung <TextLink reference="52"></TextLink> und Histopathologie <TextLink reference="4"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="26"></TextLink>, <TextLink reference="35"></TextLink>. F&#252;r die Durchf&#252;hrung dieser Studien wurden in der Regel moderne Nahinfrarot-Eye-Tracking-Ger&#228;te verwendet, die eine pr&#228;zise und zuverl&#228;ssige Erfassung der Augenbewegungen am Bildschirm erm&#246;glichen.</Pgraph><Pgraph>Trotz dieser vielversprechenden Vorbefunde wird Eye-Tracking in der medizinischen Ausbildung bisher aufgrund verschiedener Implementierungsh&#252;rden nur selten eingesetzt <TextLink reference="16"></TextLink>, <TextLink reference="32"></TextLink>. Zum einen sind moderne Eye-Tracking System relativ teuer, mit Kosten weit &#252;ber 10.000 US-Dollar pro Ger&#228;t. Zum anderen erfordert der Einsatz dieser Technologie betr&#228;chtliche personelle Ressourcen, da geschultes Personal f&#252;r die Kalibrierung und Durchf&#252;hrung erforderlich ist. Da zudem gro&#223;e Datenmengen gesammelt werden, m&#252;ssen komplexe statistische Methoden angewendet werden. Aus diesen Gr&#252;nden wurden Eye-Tracker bisher haupts&#228;chlich in spezialisierten Laboren und unter kontrollierten Laborbedingungen eingesetzt. Um diese H&#252;rden zu &#252;berwinden, arbeitet die Informatik kontinuierlich an neuen technischen L&#246;sungen. Ein vielversprechender Ansatz wurde von Papoutsaki und Kollegen vorgeschlagen <TextLink reference="44"></TextLink>, <TextLink reference="45"></TextLink>. Sie entwickelten einen OpenSource Programmiercode in JavaScript, mit dem die Augenbewegungen mithilfe von eingebauten Webcams in allt&#228;glichen Ger&#228;ten wie Laptops, Tablets und Mobiltelefonen erfassen werden k&#246;nnen. Diese Webcam-Eye-Tracking Technologie wurde schlie&#223;lich in benutzerfreundliche Online-Plattformen mit grafischen Benutzeroberfl&#228;chen integriert und die Ausf&#252;hrung sowie die Kalibrierung vollautomatisiert. Dieser Ansatz vereint mehrere Vorteile: Er ist kosteng&#252;nstiger, weit verbreitet und einfach zu bedienen. Dadurch k&#246;nnen Teilnehmer an Online-Eye-Tracking-Studien auch von zu Hause und zu passenden Zeiten teilnehmen, was die Wahrscheinlichkeit der Studienteilnahme erh&#246;ht. Es ist jedoch wichtig, bei Eye-Tracking-basierten Untersuchungen besonders auf die Datenqualit&#228;t zu achten, da Unterschiede im Verhalten der Teilnehmer w&#228;hrend der Durchf&#252;hrung die Datenqualit&#228;t beeinflussen k&#246;nnen <TextLink reference="25"></TextLink>.</Pgraph><Pgraph>Die vorliegende Studie untersuchte den Einsatz von Webcam-Eye-Tracking in einem curricularen Online-Histologiekurs, der aufgrund der COVID-19-Pandemie online durchgef&#252;hrt wurde. Als theoretisches Framework diente das Konstrukt der visuellen Expertise, das eine lange Tradition in der Eye-Tracking-Literatur hat <TextLink reference="24"></TextLink>, <TextLink reference="46"></TextLink> und zu dem bereits Vorbefunde in der Histopathologie vorliegen <TextLink reference="4"></TextLink>, <TextLink reference="26"></TextLink>. Visuelle Expertise bietet dabei vor allem einen interpretativen Rahmen f&#252;r die erhobenen Daten, erm&#246;glicht es, die Augenbewegungen in einen theoretischen Kontext zu stellen und abstrakte Konzepte zu operationalisieren.</Pgraph><SubHeadline>Visuelle Expertise in der Histopathologie</SubHeadline><Pgraph>Visuelle Expertise umfasst ein komplexes Zusammenspiel zwischen wahrnehmenden und kognitiven Prozessen <TextLink reference="15"></TextLink>. Sie ist domainspezifisch, entwickelt sich durch Training und f&#252;hrt zu einer h&#246;heren Pr&#228;zision bei der Bildsuche, -erkennung und -entscheidung <TextLink reference="15"></TextLink>. Urspr&#252;nglich aus der Schachforschung stammend, wird sie heute als eines der Hauptlernziele in Trainings angesehen, bei denen die Beurteilung visueller Informationen zentral ist, wie in der Histopathologie, makroskopischen Anatomie oder Radiologie <TextLink reference="47"></TextLink>, <TextLink reference="59"></TextLink>. Zur Erfassung von visueller Expertise bieten sich Augenbewegungen an, da sie oft unwillk&#252;rlich erfolgen und die F&#228;higkeit zur Erkennung von Mustern widerspiegeln <TextLink reference="32"></TextLink>. Dieser Ansatz wird vor allem durch die <Mark2>Eye-Mind-Hypothese</Mark2> gest&#252;tzt, die postuliert, dass es eine enge Verbindung gibt zwischen dem, was die Augen sehen und dem, womit sich das Gehirn in dem Moment &#8222;besch&#228;ftigt&#8220; <TextLink reference="1"></TextLink>, <TextLink reference="28"></TextLink>.</Pgraph><Pgraph>Bisherige Studien zur visuellen Expertise in der Histopathologie charakterisierten vor allem die Augenbewegungen erfahrener Pathologen <TextLink reference="6"></TextLink>, <TextLink reference="26"></TextLink>. In einem Experten-Novizen-Paradigma wurden die Augenbewegungen der Pathologen mit denen von Novizen verglichen. Es wird im Allgemeinen angenommen, dass Novizen visuelle Expertise entwickeln, wenn sich ihr visuelles Verhalten dem des Experten ann&#228;hert. Diese Entwicklungsprozesse sind gr&#246;&#223;tenteils unbewusst und spiegeln prozedurale Mustererkennungsf&#228;higkeiten und implizites Wissen (<Mark2>&#8222;tacit knowledge&#8220;</Mark2>) wider <TextLink reference="8"></TextLink>, <TextLink reference="31"></TextLink>, <TextLink reference="32"></TextLink>, <TextLink reference="34"></TextLink>.</Pgraph><SubHeadline>Theoretische Konzepte von visueller Expertise</SubHeadline><Pgraph>Einflussreiche und empirisch-gest&#252;tzte Theorien zur Erkl&#228;rung von visueller Expertise sind die holistische Verarbeitungstheorie und das Informationsreduktionsmodell <TextLink reference="19"></TextLink>, <TextLink reference="36"></TextLink>, <TextLink reference="51"></TextLink>. Die holistische Verarbeitungstheorie postuliert, dass Personen mit einer hohen visuellen Expertise eine st&#228;rker integrierte Bildwahrnehmung aufweisen, die es ihnen erm&#246;glicht, den Fokus schnell auf <Mark2>diagnostisch relevante Bereiche (dAOIs)</Mark2> auf dem Bild zu lenken. Die Zeit, die ben&#246;tigt wird, um den Blick erstmals auf ein dAOI zu lenken (<Mark2>Zeit bis zur ersten Fixation von dAOIs</Mark2>), wurde daher mit einem h&#246;heren Ma&#223; an visueller Expertise in Verbindung gebracht <TextLink reference="51"></TextLink>. Dieses visuelle Verhalten wird neurobiologisch vermutlich durch ein gr&#246;&#223;eres parafoveales Sichtfeld erm&#246;glicht <TextLink reference="51"></TextLink>, d.h., Experten erfassen und prozessieren Bildinformationen aus einem breiteren Sichtfeld. Dies erm&#246;glicht es ihnen, wichtige Bildbereiche fr&#252;her zu entdecken und ihren Blick schneller darauf zu lenken. Daher ist bei einer Zunahme von Expertise zu erwarten, dass sich die <Mark2>Zeit zur L&#246;sung einer Aufgabe (Betrachtungszeit)</Mark2> verk&#252;rzt <TextLink reference="4"></TextLink>, <TextLink reference="6"></TextLink>. Eine weitere prominente Theorie ist das Informationsreduktionsmodell, das eng mit der Idee der selektiven Verarbeitung verkn&#252;pft ist <TextLink reference="19"></TextLink>. Diese Theorie geht davon aus, dass Experten diagnostisch irrelevante Informationen vernachl&#228;ssigen und stattdessen ihren Fokus auf dAOIs lenken, um mentale Ressourcen zu sparen. Im Gegensatz dazu erkennen Novizen dAOIs nicht, sondern lenken ihren Blick vor allem auf visuell auff&#228;llige, jedoch <Mark2>diagnostisch irrelevante Bereiche (vAOIs)</Mark2> <TextLink reference="6"></TextLink>. Daher kann nach dem Informationsreduktionsmodell eine erfolgreiche Entwicklung visueller Expertise durch h&#228;ufigere und l&#228;ngere Fixationen von dAOIs, sowie weniger h&#228;ufige und k&#252;rzere Fixationen von vAOIs operationalisiert werden.</Pgraph><Pgraph>Viele Unterschiede zwischen Experten und Anf&#228;ngern k&#246;nnen durch eine Kombination der oben genannten Theorien erkl&#228;rt werden, wobei Eye-Tracking die Messbarkeit wichtiger Aussagen dieser Theorien &#252;berhaupt erm&#246;glicht.</Pgraph><SubHeadline>Forschungsfragen</SubHeadline><SubHeadline2>Forschungsfrage 1: Wie genau kann Webcam-Eye-Tracking Augenbewegungen erfassen&#63;</SubHeadline2><Pgraph>Eine hohe Genauigkeit w&#252;rde erreicht, wenn eine hohe Akkuratheit und Pr&#228;zision, sowie eine hohe Datenintegrit&#228;t (&#61;wenig Datenverlust), sowie eine hohe Bildrate erzielt werden <TextLink reference="24"></TextLink>, <TextLink reference="25"></TextLink>.</Pgraph><SubHeadline2>Forschungsfrage 2: Wie zuverl&#228;ssig kann Webcam-Eye-Tracking Unterschiede in der visuellen Expertise erfassen&#63;</SubHeadline2><Pgraph>Wir erwarteten, dass Studierende im Laufe des Histologiekurses nat&#252;rlicherweise visuelle Expertise entwickeln. Dies w&#252;rde sich in einer <Mark2>k&#252;rzeren Betrachtungszeit</Mark2> histologischer Bilder und <Mark2>h&#246;heren Testscores</Mark2> zeigen. Entsprechend der holistischen Verarbeitungstheorie, sowie dem Informationsreduktionsmodell sollten sich diese Unterschiede auch in den Augenbewegungen zeigen, wie z. B. in einer<Mark2> reduzierten Zeit bis zur ersten Fixation von dAOIs</Mark2>, sowie einer <Mark2>h&#246;heren Fixationanzahl von dAOIs</Mark2>. Wir postulierten entgegengesetzte Effekte f&#252;r visuell saliente, jedoch diagnostisch irrelevante <Mark2>vAOIs.</Mark2></Pgraph><SubHeadline2>Forschungsfrage 3: Wie zuverl&#228;ssig kann Webcam-Eye-Tracking zwischen Performanzen unterscheiden&#63;</SubHeadline2><Pgraph>Da Augenbewegungen ein Surrogat f&#252;r visuelle Expertise sind <TextLink reference="4"></TextLink>, sollten interindividuelle Differenzen anhand von Augenbewegungen vorhersagbar sein. Daher sollten statistische Modelle die Testleistun</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Methods">
      <MainHeadline>Methods</MainHeadline><Pgraph>This study was conducted at the Westf&#228;lische Wilhelms-University in M&#252;nster during the summer term of 2021. At two particular timepoints, one third-semester preclinical cohort was evaluated longitudinally alongside an online-only histology course. More details can be retrieved from the supplementary files (see attachment 1 <AttachmentLink attachmentNo="1"/>).</Pgraph><SubHeadline>Participating students</SubHeadline><Pgraph>The first measurement (t1) was conducted after 10 three-hour sessions. Here, <Mark2>N</Mark2>&#61;51 students (age <Mark2>mean</Mark2> 21.56&#177;2.21 years; 35 females) were included for data analysis. The second measurement (t2) was conducted after 20 three-hour sessions immediately before a written examination, as an improvement in students&#8217; visual expertise can be expected at such a point. N&#61;77 students (age <Mark2>mean</Mark2> 21.97&#177;2.25 years; 59 females) were included in t2. Informed consent was received from all students. This study was carried out in accordance with the Declaration of Helsinki. The study protocol was reviewed by the ethics committee (&#8220;Ethik-Kommission der &#196;rztekammer Westfalen-Lippe und der Westf&#228;lischen Wilhelms-University&#8221;) and deemed not to require formal medical ethics approval.</Pgraph><SubHeadline>Study procedure of the webcam eye-tracking study</SubHeadline><Pgraph>The study design corresponds to a single-group pre-post intervention design with a measurement interval of 7 weeks (&#61;10 course sessions) (see attachment 1 <AttachmentLink attachmentNo="1"/>). Due to the COVID-19 pandemic, the complete semester cohort was obliged to participate in the synchronous online-only course. A run-through pilot study was performed with two participants to optimize the eye-tracking environment. Here, the main focus was to adjust the duration of the presentation time and to assess the behavior during the study. The actual study participants were recruited during the online course and received a hyperlink that led to the online study. They could perform the study at home anytime during a period of one week around the two timepoints. After starting the study, the students passed a 40-point eye-tracking calibration and a 4-point test for accuracy (see figure 1 <ImgLink imgNo="1" imgType="figure"/> and figure 2 a <ImgLink imgNo="2" imgType="figure"/>). The participants next looked at six histology slides for a maximum of 15 seconds each. After each slide, the participants were prompted to identify the organ on the slide. Meanwhile, the test score, view time, and eye movements were recorded.</Pgraph><SubHeadline>Online eye-tracking with web cameras</SubHeadline><Pgraph>An open-source JavaScript code (WebGazer) was used to record the binocular gaze position <TextLink reference="45"></TextLink>. The study took approximately 10-15 minutes and ran entirely on a web browser in full view mode; no additional software was needed. No personal image data were transmitted during the session, as the JavaScript code runs locally on the participant&#8217;s computer. The output provided the respective binocular X and Y coordinates with a timestamp, and subject IDs. We offered e-mail support for students with technical problems (<Mark2>n</Mark2>&#61;1 at t<Subscript>2</Subscript>).</Pgraph><SubHeadline>Description of the stimuli and instruction </SubHeadline><Pgraph>Six different histological slides were shown at each of the two timepoints, and care was taken to ensure that the level of difficulty was approximately the same (see figure 2 <ImgLink imgNo="2" imgType="figure"/>). Different slides were used at both time points to prevent the recognition of the slides based on nonspecific patterns (e.g., staining). These slides were instructed with a slide identification task: &#8220;identify the following organ&#8221;. From our own experience in oral examinations, we think that the rapid identification of histological slides is a selective task for novice students. Slides were presented in the same order and for a maximum duration of 15 seconds. The view time was deliberately kept short, both to increase the overall difficulty and to capture early search behavior and rapid pattern recognition competencies. Scrolling or zooming was disabled to reduce the complexity for the students and enhance comparability at the expense of authenticity. Students who finished the task in less than 15 seconds could skip to the questioning to prevent idle eye movements. Returning to an image was not possible.</Pgraph><SubHeadline>Description of the test score</SubHeadline><Pgraph>To reduce the probability of incidental answers <TextLink reference="21"></TextLink> and to make sure students did not simply guess the right multiple-choice answer by chance, the participants were asked free text questions (e.g., &#8220;Which organ did you identify&#63;&#8221;) after each slide. This approach meant that correct answers had to be actively produced by the students. The written answers were evaluated manually and blindly by the first author. Correct answers were rewarded with one point. The test score was calculated as the sum of all the correct answers (max. 6 points). At the end of the study, students received sample solutions as feedback to reward them for their participation (see table 1 <ImgLink imgNo="1" imgType="table"/>).</Pgraph><SubHeadline>Procedure for data analysis</SubHeadline><Pgraph>Visualizations of the eye-tracking data were performed using RStudio software (Version 1.3.1093, RStudio Team, 2020) with the scan path extension <TextLink reference="61"></TextLink>. Statistical analyses were performed with SPSS version 28 (IBM Corp., Armonk, NY). All statistics were performed under a significance value of &#945;&#61;0.05 and specified by a two-tailed <Mark2>p value</Mark2>, and an effect size (partial) &#951;<Superscript>2</Superscript>. A &#951;<Superscript>2 </Superscript>greater than 0.14 was considered a strong effect. To capture mean differences, a two-sided t-test or ANOVA (&#62;2 variables) for was performed with Bonferroni correction for multiple testing to counteract the likelihood of incorrectly rejecting a null hypothesis. To identify the discrete predictive value of each eye movement variable (independent variables) with the test score (dependent variable), a multivariate regression analysis was performed for each timepoint. </Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Methoden">
      <MainHeadline>Methoden</MainHeadline><Pgraph>Diese Studie wurde im Sommersemester 2021 an der Westf&#228;lischen Wilhelms-Universit&#228;t in M&#252;nster durchgef&#252;hrt. An zwei Messzeitpunkten wurden Medizinstudierende im dritten Fachsemester longitudinal und parallel zu einem curricularen Online-Histologiekurs untersucht (siehe Anhang 1 <AttachmentLink attachmentNo="1"/>).</Pgraph><SubHeadline>Stichprobe</SubHeadline><Pgraph>Die erste Messung (t1) wurde nach 10 dreist&#252;ndigen Kursveranstaltungen durchgef&#252;hrt. F&#252;r die Datenanalyse wurden hierf&#252;r 51 Studierende einbezogen (Durchschnittsalter 21.56&#177;2.21 Jahre; 35 weiblich). Die zweite Messung (t2) wurde nach 20 dreist&#252;ndigen Sitzungen und unmittelbar vor einer schriftlichen Abschlusspr&#252;fung durchgef&#252;hrt, da wir an diesem sp&#228;ten Punkt eine deutliche Verbesserung der visuellen Expertise der Studierenden erwarten konnten. An t2 wurden 77 Studierende rekrutiert (Durchschnittsalter 21.97&#177;2.25 Jahre; 59 weiblich). Von allen Studierenden wurde eine informierte Einwilligung eingeholt. Diese Studie wurde gem&#228;&#223; der Deklaration von Helsinki durchgef&#252;hrt. Das Studienprotokoll wurde von der Ethikkommission (&#8222;Ethik-Kommission der &#196;rztekammer Westfalen-Lippe und der Westf&#228;lischen Wilhelms-Universit&#228;t&#8220;) akzeptiert.</Pgraph><SubHeadline>Studienablauf der Webcam-Eye-Tracking-Studie</SubHeadline><Pgraph>Das Studiendesign entspricht einem Ein-Gruppen Pr&#228;-Post-Interventionsdesign mit einem Messintervall von 7 Wochen (&#61; 10 Kurseinheiten) (siehe Anhang 1 <AttachmentLink attachmentNo="1"/>). Aufgrund der COVID-19-Pandemie nahmen alle Teilnehmenden online an dem Kurs teil. Es wurde zun&#228;chst eine Pilotstudie mit zwei Personen durchgef&#252;hrt, um die Eye-Tracking-Umgebung zu optimieren. Hier lag der Schwerpunkt auf der Anpassung der Dauer der Pr&#228;sentationszeit und der Beurteilung des Verhaltens w&#228;hrend der Studie. Die eigentlichen Studienteilnehmenden wurden w&#228;hrend des Online-Kurses rekrutiert und erhielten einen Hyperlink, der zur Online-Studie f&#252;hrte. Sie konnten die Studie flexibel in einem vier w&#246;chigen Zeitraum von zu Hause aus durchf&#252;hren. Nachdem der Hyperlink aufgerufen wurde, absolvierten die Teilnehmenden zun&#228;chst eine 40-Punkte Eye-Tracking-Kalibrierung und einen 4-Punkte-Genauigkeitstest (siehe Abbildung 1 <ImgLink imgNo="1" imgType="figure"/> und Abbildung 2 a <ImgLink imgNo="2" imgType="figure"/>). Anschlie&#223;end betrachteten die Teilnehmenden sechs histologische Pr&#228;parate, jeweils maximal 15 Sekunden lang. Nach jedem Pr&#228;parat wurden die Teilnehmenden aufgefordert, das Organ auf dem Pr&#228;parat zu identifizieren. Dabei wurden die Testergebnisse, die Betrachtungszeit und die Augenbewegungen erfasst.</Pgraph><SubHeadline>Online-Eye-Tracking mit Webcams</SubHeadline><Pgraph>F&#252;r die Aufzeichnung der binokularen Augenbewegungen wurde ein OpenSource JavaScript-Code namens WebGazer verwendet <TextLink reference="45"></TextLink>. Die Studie dauerte ungef&#228;hr 10-15 Minuten und wurde im Vollbildmodus eines Webbrowsers ausgef&#252;hrt. Die Installation von zus&#228;tzlicher Software war nicht erforderlich. W&#228;hrend der Sitzung wurden keine pers&#246;nlichen Bilddaten &#252;bertragen, da der JavaScript-Code lokal auf dem Computer der Teilnehmenden die Berechnungen durchf&#252;hrt. Die Ausgabe lieferte entsprechenden binokularen X- und Y-Koordinaten mit einem Zeitstempel sowie anonymisierte Teilnehmer-IDs. Wir boten per E-Mail technischen Support f&#252;r Studierende an, die Probleme hatten (<Mark2>n</Mark2>&#61;1 bei t2).</Pgraph><SubHeadline>Stimuli und Instruktion</SubHeadline><Pgraph>An beiden Zeitpunkten wurden jeweils sechs verschiedene histologische Pr&#228;parate gezeigt. Es wurde darauf geachtet, dass das Schwierigkeitsniveau zwischen den Messzeitpunkten gleich war (siehe Abbildung 2 <ImgLink imgNo="2" imgType="figure"/>). An beiden Messzeitpunkten wurden unterschiedliche Pr&#228;parate verwendet, um das Wiedererkennen anhand unspezifischer Muster (z. B. F&#228;rbung) zu verhindern. Die Instruktion lautete: &#8222;Identifizieren Sie das folgende Organ&#8220;. Aus unserer eigenen Erfahrung in zahlreichen m&#252;ndlichen Pr&#252;fungen halten wir die schnelle Identifikation histologischer Pr&#228;parate f&#252;r eine hochselektive und trennscharfe Aufgabe f&#252;r Novizen. Die Pr&#228;parate wurden in derselben Reihenfolge und f&#252;r eine maximale Dauer von 15 Sekunden pr&#228;sentiert. Die Betrachtungszeit wurde bewusst kurzgehalten, um die Gesamtschwierigkeit zu erh&#246;hen und um schnelle Mustererkennungsf&#228;higkeiten zu erfassen. Scrollen oder Zoomen wurde deaktiviert, um die Komplexit&#228;t f&#252;r die Teilnehmenden zu reduzieren und die Vergleichbarkeit zu verbessern. Teilnehmende, die die Aufgabe in weniger als 15 Sekunden abgeschlossen hatten, konnten direkt zur Abfrage springen, um &#8222;unn&#246;tige Augenbewegungen&#8220; zu vermeiden. Es war nicht m&#246;glich, zu den Pr&#228;paraten zur&#252;ckzukehren.</Pgraph><SubHeadline>Testergebnisse</SubHeadline><Pgraph>Um die Ratewahrscheinlichkeit zu reduzieren, wurden den Teilnehmenden Freitextfragen anstelle von Multiple-Choice Fragen gestellt (&#8222;Welches Organ haben Sie identifiziert&#63;&#8220;) <TextLink reference="21"></TextLink>. Somit mussten die Studierenden die korrekten Antworten aktiv formulieren. Die schriftlichen Antworten wurden manuell und blind vom Erstautor ausgewertet. Korrekte Antworten wurden mit einem Punkt benotet. Der Testscore wurde als Summe aller korrekten Antworten berechnet (max. 6 Punkte). Am Ende der Studie erhielten die Teilnehmenden Musterl&#246;sungen, um sie f&#252;r ihre Teilnahme zu belohnen (siehe Tabelle 1 <ImgLink imgNo="1" imgType="table"/>).</Pgraph><SubHeadline>Datenanalyse</SubHeadline><Pgraph>Die Visualisierung der Eye-Tracking-Daten wurde mit der Software RStudio (Version 1.3.1093, RStudio Team, 2020) unter Verwendung der Scanpath-Erweiterung <TextLink reference="61"></TextLink> erstellt. Statistische Analysen wurden mit SPSS, Version 28 (IBM Corp., Armonk, NY) durchgef&#252;hrt. Alle Testungen erfolgten unter einem Signifikanzniveau von &#945;&#61;0,05 und wurden durch einen zweiseitigen p-Wert sowie eine Effektst&#228;rke (partieller) &#951;<Superscript>2</Superscript> spezifiziert. Ein &#951;<Superscript>2</Superscript> gr&#246;&#223;er als 0.14 wurde dabei als starker Effekt angenommen. Um mittlere Unterschiede zu erfassen, wurde ein zweiseitiger t-Test oder eine ANOVA (&#62;2 Variablen) mit Bonferroni-Korrektur f&#252;r mehrfache Tests durchgef&#252;hrt. Um den diskreten Vorhersagewert der Augenbewegungen (unabh&#228;ngige Variable) auf den Testscore (abh&#228;ngige Variable) zu ermitteln, wurde eine multivariate Regressionsanalyse zu jedem Messzeitpunkt durchgef&#252;hrt.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Results">
      <MainHeadline>Results</MainHeadline><SubHeadline>RQ1: After strict preprocessing, the webcam eye-tracking shows an acceptable data quality</SubHeadline><Pgraph>The 4-points test for accuracy showed gaze clouds on all four dots (yellow circles) (see figure 3 a <ImgLink imgNo="3" imgType="figure"/>). The gaze intensity is illustrated by different colors (red&#62;yellow&#62;green&#62;blue&#62;black), while the gaze cloud in the center of the screen corresponds to the central fixation bias. At both timepoints, there was a small off-set located downward in the upper quadrants. The click-to-gaze accuracy was suitable across both time points, with a <Mark2>mean</Mark2>&#61;115.7 px&#177;31.1 for t1 and <Mark2>M</Mark2>&#61;116.9 px&#177;25.8 for t2 (see figure 3 b <ImgLink imgNo="3" imgType="figure"/>). This value represents the deviation of the target point and the actual gaze position with a smaller value indicating a higher level of accuracy. A sampling rate of the participants&#8217; webcams was in the range of 14-32 Hz (<Mark2>M</Mark2>&#61;28.8 Hz&#177;4.1) for t1, and 2-32 Hz (<Mark2>M</Mark2>&#61;28.3 Hz&#177;5.1) for t2 (see figure 3 c <ImgLink imgNo="3" imgType="figure"/>). The participants&#8217; gaze-on-screen rate ranged from 29-99&#37; (<Mark2>M</Mark2>&#61;88.8&#37;&#177;15.3) for t1 and 3-99&#37; (<Mark2>M</Mark2>&#61;86.0&#37;&#177;19.5) for t2 (see figure 3 d <ImgLink imgNo="3" imgType="figure"/>). The data integrity (completeness of the data) at t1 was <Mark2>M</Mark2>&#61;92.17&#37;&#177;5.98 and for t2 <Mark2>M</Mark2>&#61;93.35&#37; 6.01; thus, approximately 7-8&#37; of the data were lost in both timepoints (see figure 3 e <ImgLink imgNo="3" imgType="figure"/>). Reasons for this loss may include detection difficulties and eye blinking.</Pgraph><SubHeadline>RQ2: Webcam eye-tracking measures visual expertise development in curricular histology training</SubHeadline><Pgraph>The first analysis aimed to show whether the students improved in visual expertise over the course span (see figure 4 <ImgLink imgNo="4" imgType="figure"/>). The following analyses on eye-tracking metrics were conducted to show that the webcam eye-tracking can reflect this development (see figure 5 <ImgLink imgNo="5" imgType="figure"/>).</Pgraph><SubHeadline2>Testscore increases and view time decreases from t1 to t2</SubHeadline2><Pgraph>The test score increased (t(91)&#61;5.69, <Mark2>p</Mark2>&#60;0.001, &#951;<Superscript>2</Superscript>&#61;0.26) from<Mark2> M</Mark2>&#61;1.69 points&#177;.69 at t1 to <Mark2>M</Mark2>&#61;3.48 points&#177;1.31 at t2, which indicates an improvement in diagnostic accuracy in the slide identification task. The analysis of the total view time for t1 (<Mark2>M</Mark2>&#61;14.35 s&#177;1.09) and t2 (<Mark2>M</Mark2>&#61;12.38 s&#177;2.08) showed a decline (t(91)&#61;5.99, <Mark2>p</Mark2>&#60;0.001, &#951;<Superscript>2</Superscript>&#61;0.28) (see figure 4 <ImgLink imgNo="4" imgType="figure"/>). There was a ceiling effect for the view time and a floor effect for the test score at both timepoints (t1&#62;t2), which suggest that the task was challenging for the students who were under high level of time-pressure.</Pgraph><SubHeadline2>The fixation count on dAOI is higher in t2</SubHeadline2><Pgraph>There was no difference between the fixation counts of the dAOI and those of the vAOI at t1 (F(51)&#61;1.74, <Mark2>p</Mark2>&#62;0.999), which suggests that students could not effectively distinguish between visually salient but irrelevant areas and diagnostically important areas (<Mark2>mean</Mark2> fixation count for dAOI&#61;2.10&#177;0.69 vs. vAOI&#61;1.12&#177;0.37) (see figure 5 b <ImgLink imgNo="5" imgType="figure"/>). Students at t2 showed an increased orientation toward the dAOI (F(42)&#61;3.53, <Mark2>p</Mark2>&#61;0.003, &#951;<Superscript>2</Superscript>&#61;0.43), thereby affirming information-reduction theory (mean fixation counts for dAOI&#61;6.96&#177;1.56 vs. vAOI&#61;4.50&#177;1.25). </Pgraph><SubHeadline2>The fixation duration on dAOI is higher in t2</SubHeadline2><Pgraph>Students at t1 showed a lower fixation duration on the dAOI than on the vAOI (F(51)&#61;14.26, <Mark2>p</Mark2>&#60;0.001, &#951;<Superscript>2</Superscript>&#61;0.74), which indicates a lower interaction rate with the diagnostically relevant areas (see figure 5 c <ImgLink imgNo="5" imgType="figure"/>). However, in t2, the fixation duration on the dAOI increased so that the difference with the vAOI was no longer significant (F(42)&#61;1.28, <Mark2>p</Mark2>&#62;0.999); this indicated a higher detection rate of diagnostically relevant regions at t2. These results showed an increased ability of the trained students to interact with dAOIs; however, they were still occupied with vAOIs.</Pgraph><SubHeadline2>The time to first fixation of dAOI is lower in t2</SubHeadline2><Pgraph>At t1, there was no significant difference between the dAOI (M&#61;5394 ms&#177;1025) and the vAOI (<Mark2>M</Mark2>&#61;5696 ms&#177;1515) (F(51)&#61;1.29, <Mark2>p</Mark2>&#62;0.999), while at t2, the time to first fixation for the dAOI (<Mark2>M</Mark2>&#61;2862 ms&#177;965) was lower than that for the vAOI (<Mark2>M</Mark2>&#61;3557 ms&#177;1094) (F(51)&#61;2.69, <Mark2>p</Mark2>&#61;0.046, &#951;<Superscript>2</Superscript>&#61;0.10) (see figure 5 d <ImgLink imgNo="5" imgType="figure"/>). Together, students at t2 were able to detect dAOIs faster than vAOIs, which is in line with the holistic theory of visual expertise.</Pgraph><SubHeadline>RQ3: Eye movements predict test performance in the slide identification task</SubHeadline><Pgraph>Eye movements recorded by the webcam eye-tracking could predict the test scores at t1 (see table 2 <ImgLink imgNo="2" imgType="table"/>). In other words, the statistical model was able to predict the corresponding test scores of the students by their eye movements. Approximately 39&#37; of the test score variance (R<Superscript>2</Superscript><Subscript>adj</Subscript>&#61;0.392, <Mark2>p</Mark2>&#60;0.001) was explainable by nine eye-movement variables. At t2, the statistical test result merely misses significance (R<Superscript>2</Superscript><Subscript>adj</Subscript>&#61;0.103, <Mark2>p</Mark2>&#61;0.057), which indicates the lower predictive power of the model. The predictors had similar regression coefficients at both second points. These results suggest that eye metrics were robust predictors for early visual expertise. Moreover, we observed that the predictive power is higher at an early timepoint than later in the course.</Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Ergebnisse">
      <MainHeadline>Ergebnisse</MainHeadline><SubHeadline>Die Webcam-Eye-Tracking-Methode zeigt eine akzeptable Datenqualit&#228;t</SubHeadline><Pgraph>Eine Blickwolke zeigt die Ergebnisse des 4-Punkte-Genauigkeitstests (siehe Abbildung 3 a <ImgLink imgNo="3" imgType="figure"/>). Die Blickintensit&#228;t wird durch verschiedene Farben veranschaulicht (rot&#62;gelb&#62;gr&#252;n&#62;blau&#62;schwarz). Die Blickwolke in der Mitte des Bildschirms entspricht dem <Mark2>zentralen Fixationsbias</Mark2>. In den oberen beiden Quadranten gab es eine geringf&#252;gige Abweichung nach unten. Die Klick-zu-Blick-Genauigkeit war an beiden Zeitpunkten akzeptabel, mit einem Durchschnittswert von 115.7 px&#177;31.1 an t1 und M&#61;116.9 px&#177;25.8 an t2 (siehe Abbildung 3 b <ImgLink imgNo="3" imgType="figure"/>). Dieser Wert stellt die Abweichung des Ziel-Punktes von der tats&#228;chlichen Blickposition dar, wobei ein kleinerer Wert auf eine h&#246;here Genauigkeit hinweist. Die Webcam-Bildrate der Teilnehmenden lag im Bereich von 14-32 Hz (M&#61;28.8 Hz&#177;4.1) an t1 und 2-32 Hz (M&#61;28.3 Hz&#177;5.1) an t2 (siehe Abbildung 3 c <ImgLink imgNo="3" imgType="figure"/>). Die Blick-auf-Bildschirm-Rate der Teilnehmenden lag zwischen 29-99&#37; (M&#61;88.8&#37;&#177;15.3) an t1 und 3-99&#37; (M&#61;86.0&#37;&#177;19.5) an t2 (siehe Abbildung 3 d <ImgLink imgNo="3" imgType="figure"/>). Die Datenintegrit&#228;t (Vollst&#228;ndigkeit der Daten) betrug an t1 M&#61;92.17&#37;&#177;5.98 und bei t2 M&#61;93.35&#37;&#177;6.01; somit gingen in beiden Zeitpunkten etwa 7-8&#37; der Daten verloren (siehe Abbildung 3 e <ImgLink imgNo="3" imgType="figure"/>). Gr&#252;nde f&#252;r diesen Verlust k&#246;nnen Schwierigkeiten bei der Erkennung und Blinzeln sein.</Pgraph><SubHeadline>Webcam-Eye-Tracking misst die Entwicklung von visueller Expertise in der Histologie</SubHeadline><Pgraph>Die erste Analyse zielte darauf ab zu &#252;berpr&#252;fen, ob sich die Studierenden im Laufe des Kurses in ihrer visuellen Expertise &#252;berhaupt verbessert haben (siehe Abbildung 4 <ImgLink imgNo="4" imgType="figure"/>). Die anschlie&#223;enden Analysen zu den Augenbewegungen wurden durchgef&#252;hrt, um zu zeigen, dass Webcam-Eye-Tracking diese Unterschiede messen kann (siehe Abbildung 5 <ImgLink imgNo="5" imgType="figure"/>).</Pgraph><SubHeadline>Der Testscore steigt an und die Betrachtungszeit nimmt von t1 zu t2 ab</SubHeadline><Pgraph>Der Testscore verbesserte sich (t(91)&#61;5.69, p&#60;0.001, &#951;<Superscript>2</Superscript>&#61;0.26) von M&#61;1.69 Punkten&#177;0.69 an t1 auf M&#61;3.48 Punkten&#177;1.31 an t2, was auf eine Verbesserung der visuellen Expertise hinweist. Die Analyse der Gesamtbetrachtungszeit an t1 (M&#61;14.35 s&#177;1.09) und t2 (M&#61;12.38 s&#177;2.08) zeigte erwartungsgem&#228;&#223; einen R&#252;ckgang (t(91)&#61;5.99, p&#60;0.001, &#951;<Superscript>2</Superscript>&#61;0.28) (siehe Abbildung 4 <ImgLink imgNo="4" imgType="figure"/>). Es gab einen Deckeneffekt f&#252;r die Betrachtungszeit und einen Bodeneffekt f&#252;r den Testscore zu beiden Zeitpunkten (t1&#62;t2), was darauf hindeutet, dass die Aufgabe f&#252;r die Teilnehmenden sehr anspruchsvoll war.</Pgraph><SubHeadline>Die Fixation von dAOIs ist an t2 h&#246;her, als an t1</SubHeadline><Pgraph>An t1 gab es keinen Unterschied zwischen der Anzahl an Fixationen der dAOI und der vAOI (F(51)&#61;1.74, p&#62;0.999), was zeigt, dass die Teilnehmenden nicht effektiv zwischen irrelevanten und wichtigen Bereichen unterscheiden konnten (mittlere Fixationsanzahl f&#252;r dAOI&#61;2.10&#177;0.69 vs. vAOI&#61;1.12&#177;0.37) (siehe Abbildung 5 b <ImgLink imgNo="5" imgType="figure"/>). Studierende an t2 zeigten jedoch eine st&#228;rkeren Orientierung zu dAOIs (F(42)&#61;3.53, p&#61;0.003, &#951;<Superscript>2</Superscript>&#61;0.43), (mittlere Fixationsanzahl f&#252;r dAOI&#61;6.96&#177;1.56 vs. vAOI&#61;4.50&#177;1.25), passend zum Informationsreduktionsmodell.</Pgraph><SubHeadline>Die Zeit bis zur ersten Fixation von dAOIs ist an t2 geringer</SubHeadline><Pgraph>An t1 gab es keinen signifikanten Unterschied zwischen der Zeit bis zur ersten Fixation von dAOIs (M&#61;5394 ms&#177;1025) und vAOIs (M&#61;5696 ms&#177;1515), (F(51)&#61;1.29, p&#62;0.999), w&#228;hrend an t2 die Zeit bis zur ersten Fixation f&#252;r dAOIs (M&#61;2862 ms&#177;965) geringer war als f&#252;r vAOIs (M&#61;3557 ms&#177;1094) (F(51)&#61;2.69, p&#61;0.046, &#951;<Superscript>2</Superscript>&#61;0.10) (siehe Abbildung 5 d <ImgLink imgNo="5" imgType="figure"/>). Zusammenfassend konnten die Teilnehmenden an t2 dAOIs schneller erkennen als vAOIs, was im Einklang mit der holistischen Verarbeitungstheorie von visueller Expertise steht.</Pgraph><SubHeadline>Augenbewegungen sagen die Leistung in der Histologie voraus</SubHeadline><Pgraph>Die Testergebnisse an t1 konnten anhand der Webcam-Eye-Tracking Daten vorhergesagt werden (siehe Tabelle 2 <ImgLink imgNo="2" imgType="table"/>). Dabei konnten etwa 39&#37; der Varianz (R<Superscript>2</Superscript><Subscript>adj</Subscript>&#61;0.392, p&#60;0.001) durch neun Augenbewegungsvariablen aufgekl&#228;rt werden. An t2 wurde das Signifikanzniveau knapp verfehlt (R<Superscript>2</Superscript><Subscript>adj</Subscript>&#61;0.103, p&#61;0.057), was auf eine geringere Vorhersagekraft des Modells hinweist. Die einzelnen Pr&#228;diktoren hatten vergleichbare Regressionskoeffizienten an beiden Messzeitpunkten. Diese Ergebnisse legen nahe, dass Augenbewegungen robuste Pr&#228;diktoren der fr&#252;hen visuellen Expertise sind.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Discussion">
      <MainHeadline>Discussion</MainHeadline><Pgraph>The aim of this study was to test the use of webcam eye-tracking in an (online) histology curriculum. </Pgraph><SubHeadline>How accurately can webcam eye-tracking detect eye movements&#63;</SubHeadline><Pgraph>To overcome some of the existing limitations of laboratory eye-tracking settings (expensive hardware, artificial laboratory environment, small sample sizes), open-source webcam-based eye-tracking has been refined over recent years <TextLink reference="44"></TextLink>, <TextLink reference="45"></TextLink>, <TextLink reference="50"></TextLink>. Our study supports the use of this methodology and demonstrates that the quality of data collected in a curricular online-only course is satisfactory. Setting up the test environment was convenient and did not require any programming skills. We were able to establish the research environments using the graphical interface, equivalent to a &#8220;drag-and-drop&#8221; principle. Care should be taken in future studies to ensure that the <Mark2>areas of interest</Mark2> are of sufficient size, i.e., large enough to compensate for potential accuracy errors. Due to the limited data quality of webcam eye trackers (see figure 3 <ImgLink imgNo="3" imgType="figure"/>), strict criteria must be applied to the data quality. We arrived at the comparatively high exclusion rate of approximately 30&#37; of participants, which means that a high number of subjects must be recruited for such studies in order to obtain valid results. This is a reasonable expectation given the ease with which this methodology can be used in a classroom context. We believe that future improvements in webcam technology itself will further resolve the data quality issues. The students&#8217; interest level in participating in the study was high, with even more students participating at the second timepoint compared to the first. Hence, implementation and use in (distant) classroom settings could become a practical possibility for medical educators.</Pgraph><SubHeadline>How valid is webcam eye-tracking in regard to capturing changes in visual expertise&#63;</SubHeadline><Pgraph>Our findings support the hypothesis that webcam eye-tracking can provide insights about temporal changes in visual expertise <TextLink reference="4"></TextLink>, <TextLink reference="26"></TextLink>, <TextLink reference="37"></TextLink>. We could show that with the progress of the online histology course, students enrolled showed:</Pgraph><Pgraph><OrderedList><ListItem level="1" levelPosition="1" numString="1.">better test scores, </ListItem><ListItem level="1" levelPosition="2" numString="2.">reduced slide view time, </ListItem><ListItem level="1" levelPosition="3" numString="3.">more frequent fixations of diagnostically relevant areas, </ListItem><ListItem level="1" levelPosition="4" numString="4.">longer fixation durations of diagnostically relevant areas, and </ListItem><ListItem level="1" levelPosition="5" numString="5.">faster detection of relevant image areas. </ListItem></OrderedList></Pgraph><Pgraph>Subsequent to <Mark2>holistic processing</Mark2>, the trained students focused on relevant areas indicating improved pattern recognition skills and expertise-related top-down control (see figure 5 d <ImgLink imgNo="5" imgType="figure"/>) <TextLink reference="38"></TextLink>, <TextLink reference="51"></TextLink>. Furthermore, the students were more confident in distinguishing relevant from irrelevant areas, which can be interpreted as an improvement in visual expertise according to the <Mark2>information reduction model</Mark2> (see figure 5 b-c <ImgLink imgNo="5" imgType="figure"/>) <TextLink reference="19"></TextLink>. The observable improvements occurred over the course span of ten course sessions, thereby highlighting the importance of early visual expertise development in histopathology training. To our knowledge, this study is the first to measure the development of visual expertise in histology training with a longitudinal study design. Given the paucity of literature, further research is needed to provide more insight on these important early stages.</Pgraph><SubHeadline>How reliably can webcam eye-tracking distinguish between low and high performances&#63;</SubHeadline><Pgraph>Combining several eye measurements, our linear models predicted up to 39&#37; of the test score variance, which is an unexpectedly prediction level for such a complex cognitive task. A particularly interesting observation is that the predictive power of eye movements decreased with an increase in training duration (see table 2 <ImgLink imgNo="2" imgType="table"/>). Therefore, the use of the webcam eye-tracking might be of particular value at the beginning of the training to monitor early visual expertise development. Future eye-tracking studies will investigate the presence of different search profiles in histology, at what point they develop, and how they affect students&#8217; visual searching behavior and performance. Another source of methodological triangulation would be desirable to fully understand the various cognitive processes conducted during this development (e.g., qualitative think-aloud protocols) <TextLink reference="40"></TextLink>.</Pgraph><SubHeadline>Possible areas of implementation in medical education</SubHeadline><Pgraph>Although there are still many open questions, it is worth discussing implementation options early on. Medical educators may use this methodology to gain insight into learners&#8217; unconscious perceptual mechanisms in a range of professional settings, including histopathology, surgery, and radiology. For example, webcam eye-tracking technology could be used as a practical and cost-effective method for evaluating the effectiveness of curricula. This would allow subjective self-report data to be easily supplemented with objective performance data <TextLink reference="57"></TextLink>. The affordable purchase price would enable the equipment to be installed throughout whole classes. Thus, it would be possible to use webcam eye-tracking across the board to provide real-time feedback to the instructor regarding task difficulty <TextLink reference="7"></TextLink>, cognitive load <TextLink reference="43"></TextLink>, <TextLink reference="55"></TextLink>, or students&#8217; in-class attention <TextLink reference="37"></TextLink>. This information could help to evaluate which educational methods may be problematic and at what point a change in method is appropriate. These data could also be evaluated post hoc, for example, to revise lecture slides and identify particularly difficult parts during a lecture. With sufficiently large samples, valuable feedback on the level of visual expertise could be given both to the students themselves and to the lecturers <TextLink reference="20"></TextLink>. This method could be especially valuable in online learning environments, where feedback is more challenging due to technical limitations, as we experienced in the COVID-19 pandemic <TextLink reference="9"></TextLink>, <TextLink reference="10"></TextLink>. </Pgraph><Pgraph>This methodology opens up further opportunities for educational scientists. The ease of implementation enables to measure a larger number of subjects in a short time. Thus, eye-tracking studies can recruit more participants. This approach consequently allows large (online) studies to detect smaller effects that have previously gone undetected due to small sample sizes or to conduct longitudinal studies <TextLink reference="16"></TextLink>. Another valuable source for methodological triangulation could be the use of scroll or zooming data during image inspection, which was successfully applied by van Montfort et al. <TextLink reference="60"></TextLink> and den Boer et al. <TextLink reference="11"></TextLink>. Finally, this methodology is open source, which allows countries and faculties with limited financial resources to benefit from it.</Pgraph><SubHeadline>Limitations</SubHeadline><Pgraph>Along with these findings, several limitations should be acknowledged. It is currently undisputed that the webcam eye tracking method provides lower quality data than that provided by conventional laboratory eye trackers <TextLink reference="50"></TextLink>. To anticipate this limitation, we recruited more participants and applied stringent procedures for data quality. Furthermore, as the study&#8217;s participation was voluntary, we cannot exclude our cohorts from selection bias. Even though we attentively inspected the time stamps, and screen resolutions for such conspicuity, we cannot rule out that certain students participated in the study multiple times. </Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Diskussion">
      <MainHeadline>Diskussion</MainHeadline><Pgraph>Diese Studie untersuchte den Einsatz von Webcam-Eye-Tracking in einem Online-Histologiekurs.</Pgraph><SubHeadline>Wie genau kann Webcam-Eye-Tracking Augenbewegungen erfassen&#63;</SubHeadline><Pgraph>Um einige der bestehenden Einschr&#228;nkungen von Eye-Tracking-Einstellungen im Labor zu &#252;berwinden (u.a. teure Hardware, k&#252;nstliche Laborumgebung, kleine Stichprobengr&#246;&#223;en), wurden in den letzten Jahren OpenSource Webcam-basierte Eye-Tracking Methoden entwickelt <TextLink reference="44"></TextLink>, <TextLink reference="45"></TextLink>, <TextLink reference="50"></TextLink>. Unsere Studie unterst&#252;tzt die Verwendung dieser Methode und zeigt, dass die Qualit&#228;t der in einem curricularen Online-Kurs gesammelten Daten zufriedenstellend ist. Die Einrichtung der Testumgebung war bequem und erforderte keine Programmierkenntnisse. Wir konnten die Forschungsumgebung mithilfe der grafischen Benutzeroberfl&#228;che aufbauen, &#228;hnlich einem &#8222;Drag-and-Drop&#8220;-Prinzip. In zuk&#252;nftigen Studien sollte darauf geachtet werden, dass die zu untersuchenden Bildbereiche (dAOIs oder vAOIs) eine ausreichende Gr&#246;&#223;e haben, um potenzielle Messungenauigkeiten auszugleichen. Aufgrund der begrenzten Datenqualit&#228;t von Webcam-Eye-Trackern (siehe Abbildung 3 <ImgLink imgNo="3" imgType="figure"/>) m&#252;ssen insgesamt strengere Kriterien f&#252;r die Datenqualit&#228;t angewendet werden, als f&#252;r Nahinfrarot Eye-Tracker. Um dies zu erreichen, kamen wir daher zu einer vergleichsweise hohen Ausschlussrate von etwa 30&#37; der Teilnehmenden, was bedeutet, dass eine hohe Anzahl an Probanden f&#252;r solche Untersuchungen rekrutiert werden m&#252;ssen. Wir glauben, dass zuk&#252;nftige Verbesserungen in der Webcam-Technologie die derzeitigen Limitationen in der Datenqualit&#228;t l&#246;sen werden. Wir haben au&#223;erdem festgestellt, dass die Studierenden ein starkes Interesse an der Nutzung dieser Technologie haben. Deshalb sehen wir praktische M&#246;glichkeiten, sie im Curriculum einzusetzen.</Pgraph><SubHeadline>Wie zuverl&#228;ssig kann Webcam-Eye-Tracking Unterschiede in der visuellen Expertise erfassen&#63;</SubHeadline><Pgraph>Unsere Ergebnisse unterst&#252;tzen die Hypothese, dass Webcam-Eye-Tracking zeitliche Ver&#228;nderungen der visuellen Expertise messen kann <TextLink reference="4"></TextLink>, <TextLink reference="26"></TextLink>, <TextLink reference="37"></TextLink>. Wir konnten zeigen, dass im Verlauf des Online-Histologiekurses die Studierenden:</Pgraph><Pgraph><OrderedList><ListItem level="1" levelPosition="1" numString="1.">bessere Testergebnisse erzielten,</ListItem><ListItem level="1" levelPosition="2" numString="2.">k&#252;rzere Betrachtungszeiten hatten,</ListItem><ListItem level="1" levelPosition="3" numString="3.">h&#228;ufiger Fixationen auf diagnostisch relevante Bildbereiche zeigten,</ListItem><ListItem level="1" levelPosition="4" numString="4.">l&#228;ngere Fixationsdauer auf diagnostisch relevante Bildbereiche hatten und</ListItem><ListItem level="1" levelPosition="5" numString="5.">diagnostisch relevante Bildbereiche schneller erkannten.</ListItem></OrderedList></Pgraph><Pgraph>Gem&#228;&#223; der holistische Verarbeitungstheorie konzentrierten sich die trainierten Studierenden am Ende des Kurses auf relevante Bildbereiche, was auf verbesserte Mustererkennungsf&#228;higkeiten und top-down-Kontrolle hinweist (siehe Abbildung 5 d <ImgLink imgNo="5" imgType="figure"/>) <TextLink reference="38"></TextLink>, <TextLink reference="51"></TextLink>. Dar&#252;ber hinaus waren die Studierenden sicherer darin, diagnostisch relevante von irrelevanten Bereichen zu unterscheiden, was gem&#228;&#223; dem Informationsreduktionsmodell als Verbesserung der visuellen Expertise interpretiert werden kann (siehe Abbildung 5 b-c <ImgLink imgNo="5" imgType="figure"/>) <TextLink reference="19"></TextLink>. Die beobachtbaren Verbesserungen erfolgten &#252;ber den Zeitraum von zehn Kurseinheiten und unterstreichen damit die Bedeutung der fr&#252;hen Entwicklungsphasen der visuellen Expertise im histopathologischen Training. Soweit uns bekannt ist, ist diese Studie die erste, die die Entwicklung dieser fr&#252;hen visuellen Expertise im histologischen Training mit einem longitudinalen Studiendesign untersucht. Aufgrund der begrenzten Literatur sind daher weitere Untersuchungen dringend notwendig.</Pgraph><SubHeadline>Wie zuverl&#228;ssig kann Webcam-Eye-Tracking zwischen Performanzen unterscheiden&#63;</SubHeadline><Pgraph>Durch die Kombination mehrerer Augenbewegungen konnten unsere linearen Modelle bis zu 39&#37; der Varianz der Testergebnisse vorhersagen. Eine besonders interessante Beobachtung ist, dass die Vorhersagekraft der Augenbewegungen mit zunehmender Trainingsdauer abnahm (siehe Tabelle 2 <ImgLink imgNo="2" imgType="table"/>). Daher k&#246;nnte der Einsatz des Webcam-Eye-Trackings vor allem zu Beginn des Trainings wertvoll sein. Zuk&#252;nftige Eye-Tracking-Studien k&#246;nnen hier ansetzen und das Vorhandensein verschiedener Suchprofile in der Histologie untersuchen, zu welchem Zeitpunkt sie sich entwickeln und wie sie das visuelle Suchverhalten und die Leistung der Studierenden beeinflussen. Um die verschiedenen kognitiven Prozesse w&#228;hrend dieser Entwicklung besser zu verstehen, w&#228;ren weitere Methoden zur Triangulation w&#252;nschenswert (z. B. qualitative Denkprotokolle) <TextLink reference="40"></TextLink>.</Pgraph><SubHeadline>M&#246;glichkeiten zur Implementierung</SubHeadline><Pgraph>Obwohl noch viele offene Fragen bestehen, lohnt es sich, fr&#252;hzeitig &#252;ber Implementierungsm&#246;glichkeiten zu diskutieren. Lehrenden k&#246;nnen die Webcam-Eye-Tracking Technologie nutzen, um Einblicke in die unbewussten Wahrnehmungsmechanismen der Lernenden in verschiedenen professionellen Umgebungen wie Histopathologie, Chirurgie und Radiologie zu gewinnen. Zum Beispiel k&#246;nnte die Webcam Eye-Tracking-Technologie als praktische und kosteng&#252;nstige Methode zur Evaluation der Effektivit&#228;t von Lehrpl&#228;nen eingesetzt werden. Dadurch k&#246;nnten subjektive Evaluationsdaten leicht mit objektiven Daten erg&#228;nzt werden <TextLink reference="57"></TextLink>. Der g&#252;nstige Anschaffungspreis erm&#246;glicht es, die Technologie kurs&#252;bergreifend einzusetzen. So w&#228;re es m&#246;glich, den Lehrenden in Echtzeit-Feedback zur Aufgabenschwierigkeit <TextLink reference="7"></TextLink>, zur kognitiven Belastung <TextLink reference="43"></TextLink>, <TextLink reference="55"></TextLink> oder zur Aufmerksamkeit der Studierenden im Unterricht <TextLink reference="37"></TextLink> zu geben. Diese Informationen k&#246;nnten helfen, zu bewerten, welche didaktischen Ma&#223;nahmen problematisch sein k&#246;nnten und wann ggf. ein Methodenwechsel angebracht ist. Die Daten k&#246;nnten auch im Anschluss an die Veranstaltungen ausgewertet werden, zum Beispiel um Vorlesungsfolien zu &#252;berarbeiten und besonders schwierige Phasen einer Vorlesung zu identifizieren. Mit ausreichend gro&#223;en Stichproben k&#246;nnten wertvolle R&#252;ckmeldungen zum Grad der visuellen Expertise sowohl an die Studierenden selbst als auch an die Dozenten gegeben werden <TextLink reference="20"></TextLink>. Diese Methode entfaltet ihr volles Potential jedoch in Online-Lernumgebungen, wo Feedback aufgrund technischer Einschr&#228;nkungen, wie wir es w&#228;hrend der Covid-19-Pandemie erlebt haben, schwierig ist <TextLink reference="9"></TextLink>, <TextLink reference="10"></TextLink>.</Pgraph><Pgraph>Diese Technologie er&#246;ffnet auch neue M&#246;glichkeiten f&#252;r die Ausbildungsforschung. Die einfache Implementierung erm&#246;glicht die Messung einer gr&#246;&#223;eren Anzahl von Teilnehmenden in relativ kurzer Zeit. So k&#246;nnten in gro&#223;en (Online-)Studien kleinere Effektst&#228;rken nachgewiesen werden, die zuvor aufgrund kleiner Stichprobengr&#246;&#223;en unentdeckt geblieben sind. Auch k&#246;nnen so longitudinale Studien leichter durchgef&#252;hrt werden <TextLink reference="16"></TextLink>. Eine weitere Quelle f&#252;r methodische Triangulation k&#246;nnte die Verwendung von Scroll- oder Zoom-Daten w&#228;hrend der Bildbetrachtung sein, wie bereits erfolgreich von van Montfort et al. <TextLink reference="60"></TextLink> und den Boer et al. <TextLink reference="11"></TextLink> verwendet. Schlie&#223;lich ist diese Technologie OpenSource, was es L&#228;ndern und Fakult&#228;ten mit begrenzten finanziellen Ressourcen erm&#246;glicht, davon zu profitieren.</Pgraph><SubHeadline>Limitationen</SubHeadline><Pgraph>Es steht au&#223;er Frage, dass herk&#246;mmliche Eye-Tracker im Labor eine h&#246;here Datenqualit&#228;t erzielen k&#246;nnen, als per Webcam derzeit m&#246;glich <TextLink reference="50"></TextLink>. Um dieser Einschr&#228;nkung entgegenzuwirken, haben wir mehr Teilnehmende rekrutiert und strenge Verfahren zur Sicherung der Datenqualit&#228;t angewendet. Dar&#252;ber hinaus k&#246;nnen wir aufgrund der freiwilligen Teilnahme an der Studie einen <Mark2>Selektionsbias</Mark2> nicht ausschlie&#223;en. Ebenso ist es m&#246;glich, dass Studierende mehrmals an der Studie teilgenommen haben.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Conclusion">
      <MainHeadline>Conclusion</MainHeadline><Pgraph>This is the first study to examine the use of webcam eye trackers in an educational context, and on a larger sample of medical students pursuing an undergraduate histology training. The webcam eye-tracking suggested both accuracy in measuring visual expertise in histology, and value for the in-depth evaluation of (online) curricula. As technology continues to advance, the implementation of this methodology can be used to tap into previously unused potential that is likely to be leveraged in future variations of outcome-based course formats.</Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Fazit">
      <MainHeadline>Fazit</MainHeadline><Pgraph>Diese Studie ist die erste, die den Einsatz von Webcam-Eye-Trackern an einer gro&#223;en Kohorte von Medizinstudierenden in der Histologie untersucht hat. Diese Technologie erm&#246;glicht die kosteng&#252;nstige und zuverl&#228;ssige Messung der visuellen Expertise in der Histologie und er&#246;ffnet neue M&#246;glichkeiten zur Untersuchung von Online-Curricula. Webcam-Eye-Tracking besitzt somit ein bislang ungenutztes Potential zur evidenz-basierten Weiterentwicklung der medizinischen Ausbildung.</Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Funding">
      <MainHeadline>Funding</MainHeadline><Pgraph>This study did not receive any specific grant from funding agencies in the public, commercial, or not-for-profit sectors. Our work in the development of digital histology learning resources is funded by the Land Nordrhein-Westfalen under the grant &#8220;OERContent.NRW&#8221; (Projekt &#8220;Digital Histo NRW &#8211; Digitale Histologie in der Hochschulmedizin, Bio- und Gesundheitswissenschaften in NRW&#8221;). </Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="F&#246;rderung">
      <MainHeadline>F&#246;rderung</MainHeadline><Pgraph>Diese Studie wurde von keinen spezifischen F&#246;rderagenturen im &#246;ffentlichen, kommerziellen oder gemeinn&#252;tzigen Bereich finanziell unterst&#252;tzt. Unsere Arbeit in der Entwicklung digitaler histologischer Lernressourcen wird vom Land Nordrhein-Westfalen im Rahmen des Projekts &#8222;OERContent.NRW&#8220; (Projekt &#8222;Digital Histo NRW &#8211; Digitale Histologie in der Hochschulmedizin, Bio- und Gesundheitswissenschaften in NRW&#8220;) gef&#246;rdert. </Pgraph></TextBlock>
    <TextBlock language="en" linked="yes" name="Competing interests">
      <MainHeadline>Competing interests</MainHeadline><Pgraph>The authors declare that they have no competing interests. </Pgraph></TextBlock>
    <TextBlock language="de" linked="yes" name="Interessenkonflikt">
      <MainHeadline>Interessenkonflikt</MainHeadline><Pgraph>Die Autoren erkl&#228;ren, dass sie keinen Interessenkonflikt im Zusammenhang mit diesem Artikel haben.</Pgraph></TextBlock>
    <References linked="yes">
      <Reference refNo="1">
        <RefAuthor>Anderson JR</RefAuthor>
        <RefAuthor>Bothell D</RefAuthor>
        <RefAuthor>Douglass S</RefAuthor>
        <RefTitle>Eye movements do not reflect retrieval processes: limits of the eye-mind hypothesis</RefTitle>
        <RefYear>2004</RefYear>
        <RefJournal>Psychol Sci</RefJournal>
        <RefPage>225-231</RefPage>
        <RefTotal>Anderson JR, Bothell D, Douglass S. Eye movements do not reflect retrieval processes: limits of the eye-mind hypothesis. Psychol Sci. 2004;15(4):225-231. DOI: 10.1111&#47;j.0956-7976.2004.00656.x</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1111&#47;j.0956-7976.2004.00656.x</RefLink>
      </Reference>
      <Reference refNo="2">
        <RefAuthor>B&#225;nki A</RefAuthor>
        <RefAuthor>de Eccher M</RefAuthor>
        <RefAuthor>Falschlehner L</RefAuthor>
        <RefAuthor>Hoehl S</RefAuthor>
        <RefAuthor>Markova G</RefAuthor>
        <RefTitle>Comparing online webcam- and laboratory-based eye-tracking for the assessment of infants&#8217; audio-visual synchrony perception</RefTitle>
        <RefYear>2022</RefYear>
        <RefJournal>Front Psychol</RefJournal>
        <RefPage>733933</RefPage>
        <RefTotal>B&#225;nki A, de Eccher M, Falschlehner L, Hoehl S, Markova G. Comparing online webcam- and laboratory-based eye-tracking for the assessment of infants&#8217; audio-visual synchrony perception. Front Psychol. 2022;12:733933. DOI: 10.3389&#47;fpsyg.2021.733933</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3389&#47;fpsyg.2021.733933</RefLink>
      </Reference>
      <Reference refNo="3">
        <RefAuthor>Bott N</RefAuthor>
        <RefAuthor>Madero EN</RefAuthor>
        <RefAuthor>Glenn J</RefAuthor>
        <RefAuthor>Lange A</RefAuthor>
        <RefAuthor>Anderson J</RefAuthor>
        <RefAuthor>Newton D</RefAuthor>
        <RefAuthor>Brennan A</RefAuthor>
        <RefAuthor>Buffalo EZ</RefAuthor>
        <RefAuthor>Rentz D</RefAuthor>
        <RefAuthor>Zola S</RefAuthor>
        <RefTitle>Device-embedded cameras for eye tracking-based cognitive assessment: Validation with paper-pencil and computerized cognitive composites</RefTitle>
        <RefYear>2018</RefYear>
        <RefJournal>J Med Internet Res</RefJournal>
        <RefPage>e11143</RefPage>
        <RefTotal>Bott N, Madero EN, Glenn J, Lange A, Anderson J, Newton D, Brennan A, Buffalo EZ, Rentz D, Zola S. Device-embedded cameras for eye tracking-based cognitive assessment: Validation with paper-pencil and computerized cognitive composites. J Med Internet Res. 2018;20(7):e11143. DOI: 10.2196&#47;11143</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.2196&#47;11143</RefLink>
      </Reference>
      <Reference refNo="4">
        <RefAuthor>Bruny&#233; TT</RefAuthor>
        <RefAuthor>Drew T</RefAuthor>
        <RefAuthor>Kerr KF</RefAuthor>
        <RefAuthor>Shucard H</RefAuthor>
        <RefAuthor>Weaver DL</RefAuthor>
        <RefAuthor>Elmore JG</RefAuthor>
        <RefTitle>Eye tracking reveals expertise-related differences in the time-course of medical image inspection and diagnosis</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>J Med Imaging</RefJournal>
        <RefPage>051203</RefPage>
        <RefTotal>Bruny&#233; TT, Drew T, Kerr KF, Shucard H, Weaver DL, Elmore JG. Eye tracking reveals expertise-related differences in the time-course of medical image inspection and diagnosis. J Med Imaging. 2020;7(5):051203.</RefTotal>
      </Reference>
      <Reference refNo="5">
        <RefAuthor>Bruny&#233; TT</RefAuthor>
        <RefAuthor>Drew T</RefAuthor>
        <RefAuthor>Weaver DL</RefAuthor>
        <RefAuthor>Elmore JG</RefAuthor>
        <RefTitle>A review of eye tracking for understanding and improving diagnostic interpretation</RefTitle>
        <RefYear>2019</RefYear>
        <RefJournal>Cogn Res Princ Implic</RefJournal>
        <RefPage>7</RefPage>
        <RefTotal>Bruny&#233; TT, Drew T, Weaver DL, Elmore JG. A review of eye tracking for understanding and improving diagnostic interpretation. Cogn Res Princ Implic. 2019;4(1):7. DOI: 10.1186&#47;s41235-019-0159-2</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1186&#47;s41235-019-0159-2</RefLink>
      </Reference>
      <Reference refNo="6">
        <RefAuthor>Bruny&#233; TT</RefAuthor>
        <RefAuthor>Mercan E</RefAuthor>
        <RefAuthor>Weaver DL</RefAuthor>
        <RefAuthor>Elmore JG</RefAuthor>
        <RefTitle>Accuracy is in the eyes of the pathologist: The visual interpretive process and diagnostic accuracy with digital whole slide images</RefTitle>
        <RefYear>2017</RefYear>
        <RefJournal>J Biomed Inform</RefJournal>
        <RefPage>171-179</RefPage>
        <RefTotal>Bruny&#233; TT, Mercan E, Weaver DL, Elmore JG. Accuracy is in the eyes of the pathologist: The visual interpretive process and diagnostic accuracy with digital whole slide images. J Biomed Inform. 2017;66:171-179. DOI: 10.1016&#47;j.jbi.2017.01.004</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;j.jbi.2017.01.004</RefLink>
      </Reference>
      <Reference refNo="7">
        <RefAuthor>Cho Y</RefAuthor>
        <RefTitle>Rethinking eye-blink: Assessing task difficulty through physiological representation of spontaneous blinking</RefTitle>
        <RefYear>2021</RefYear>
        <RefBookTitle>Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, Yokohama, Japan</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Cho Y. Rethinking eye-blink: Assessing task difficulty through physiological representation of spontaneous blinking. In: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, Yokohama, Japan. 2021.</RefTotal>
      </Reference>
      <Reference refNo="8">
        <RefAuthor>Cook DA</RefAuthor>
        <RefTitle>Much ado about differences: why expert-novice comparisons add little to the validity argument</RefTitle>
        <RefYear>2015</RefYear>
        <RefJournal>Adv Health Sci Educ Theory Pract</RefJournal>
        <RefPage>829-834</RefPage>
        <RefTotal>Cook DA. Much ado about differences: why expert-novice comparisons add little to the validity argument. Adv Health Sci Educ Theory Pract. 2015;20(3):829-834. DOI: 10.1007&#47;s10459-014-9551-3</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10459-014-9551-3</RefLink>
      </Reference>
      <Reference refNo="9">
        <RefAuthor>Darici D</RefAuthor>
        <RefAuthor>Missler M</RefAuthor>
        <RefAuthor>Schober A</RefAuthor>
        <RefAuthor>Masthoff M</RefAuthor>
        <RefAuthor>Schnittler H</RefAuthor>
        <RefAuthor>Schmitz M</RefAuthor>
        <RefTitle>&#8220;Fun slipping into the doctor&#8217;s role&#8221;&#8211;The relationship between sonoanatomy teaching and professional identity formation before and during the Covid-19 pandemic</RefTitle>
        <RefYear>2022</RefYear>
        <RefJournal>Anat Sci Educ</RefJournal>
        <RefPage>447-463</RefPage>
        <RefTotal>Darici D, Missler M, Schober A, Masthoff M, Schnittler H, Schmitz M. &#8220;Fun slipping into the doctor&#8217;s role&#8221;&#8211;The relationship between sonoanatomy teaching and professional identity formation before and during the Covid-19 pandemic. Anat Sci Educ. 2022;15(3):447-463. DOI: 10.1002&#47;ase.2178</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1002&#47;ase.2178</RefLink>
      </Reference>
      <Reference refNo="10">
        <RefAuthor>Darici D</RefAuthor>
        <RefAuthor>Reissner C</RefAuthor>
        <RefAuthor>Brockhaus J</RefAuthor>
        <RefAuthor>Missler M</RefAuthor>
        <RefTitle>Implementation of a fully digital histology course in the anatomical teaching curriculum during COVID-19 pandemic</RefTitle>
        <RefYear>2021</RefYear>
        <RefJournal>Ann Anat</RefJournal>
        <RefPage>151718</RefPage>
        <RefTotal>Darici D, Reissner C, Brockhaus J, Missler M. Implementation of a fully digital histology course in the anatomical teaching curriculum during COVID-19 pandemic. Ann Anat. 2021;236:151718. DOI: 10.1016&#47;j.aanat.2021.151718</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;j.aanat.2021.151718</RefLink>
      </Reference>
      <Reference refNo="11">
        <RefAuthor>den Boer L</RefAuthor>
        <RefAuthor>van der Schaaf MF</RefAuthor>
        <RefAuthor>Vincken KL</RefAuthor>
        <RefAuthor>Mol CP</RefAuthor>
        <RefAuthor>Stuijfzand BG</RefAuthor>
        <RefAuthor>van der Gjip A</RefAuthor>
        <RefTitle>Volumetric image interpretation in radiology: scroll behavior and cognitive processes</RefTitle>
        <RefYear>2018</RefYear>
        <RefJournal>Adv Health Sci Educ Theory Pract</RefJournal>
        <RefPage>783-802</RefPage>
        <RefTotal>den Boer L, van der Schaaf MF, Vincken KL, Mol CP, Stuijfzand BG, van der Gjip A. Volumetric image interpretation in radiology: scroll behavior and cognitive processes. Adv Health Sci Educ Theory Pract. 2018;23(4):783-802. DOI: 10.1007&#47;s10459-018-9828-z</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10459-018-9828-z</RefLink>
      </Reference>
      <Reference refNo="12">
        <RefAuthor>Drew T</RefAuthor>
        <RefAuthor>Vo ML</RefAuthor>
        <RefAuthor>Olwal A</RefAuthor>
        <RefAuthor>Jacobson F</RefAuthor>
        <RefAuthor>Seltzer SE</RefAuthor>
        <RefAuthor>Wolfe JM</RefAuthor>
        <RefTitle>Scanners and drillers: Characterizing expert visual search through volumetric images</RefTitle>
        <RefYear>2013</RefYear>
        <RefJournal>J Vis</RefJournal>
        <RefPage>3</RefPage>
        <RefTotal>Drew T, Vo ML, Olwal A, Jacobson F, Seltzer SE, Wolfe JM. Scanners and drillers: Characterizing expert visual search through volumetric images. J Vis. 2013;13(10):3. DOI: 10.1167&#47;13.10.3</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1167&#47;13.10.3</RefLink>
      </Reference>
      <Reference refNo="13">
        <RefAuthor>Duchowski A</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2007</RefYear>
        <RefBookTitle>Eye tracking methodology: Theory &#38; practice</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Duchowski A. Eye tracking methodology: Theory &#38; practice. London: Springer-Verlag; 2007.</RefTotal>
      </Reference>
      <Reference refNo="14">
        <RefAuthor>Eder TF</RefAuthor>
        <RefAuthor>Richter J</RefAuthor>
        <RefAuthor>Scheiter K</RefAuthor>
        <RefAuthor>Keutel C</RefAuthor>
        <RefAuthor>Castner N</RefAuthor>
        <RefAuthor>Kasneci E</RefAuthor>
        <RefTitle>How to support dental students in reading radiographs: effects of a gaze-based compare-and-contrast intervention</RefTitle>
        <RefYear>2021</RefYear>
        <RefJournal>Ad Health Sci Educ Theory Pract</RefJournal>
        <RefPage>159-181</RefPage>
        <RefTotal>Eder TF, Richter J, Scheiter K, Keutel C, Castner N, Kasneci E. How to support dental students in reading radiographs: effects of a gaze-based compare-and-contrast intervention. Ad Health Sci Educ Theory Pract. 2021;26(1):159-181. DOI: 10.1007&#47;s10459-020-09975-w</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10459-020-09975-w</RefLink>
      </Reference>
      <Reference refNo="15">
        <RefAuthor>Fox SE</RefAuthor>
        <RefAuthor>Faulkner-Jones BE</RefAuthor>
        <RefTitle>Eye-tracking in the study of visual expertise: Methodology and approaches in medicine</RefTitle>
        <RefYear>2017</RefYear>
        <RefJournal>Front Learn Res</RefJournal>
        <RefPage>29-40</RefPage>
        <RefTotal>Fox SE, Faulkner-Jones BE. Eye-tracking in the study of visual expertise: Methodology and approaches in medicine. Front Learn Res. 2017;5(3):29-40. DOI: 10.14786&#47;flr.v5i3.258</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.14786&#47;flr.v5i3.258</RefLink>
      </Reference>
      <Reference refNo="16">
        <RefAuthor>Gegenfurtner A</RefAuthor>
        <RefAuthor>Kok E</RefAuthor>
        <RefAuthor>van Geel K</RefAuthor>
        <RefAuthor>de Bruin A</RefAuthor>
        <RefAuthor>Jarodzka H</RefAuthor>
        <RefAuthor>Szulewski A</RefAuthor>
        <RefAuthor>van Merri&#235;nboer JJ</RefAuthor>
        <RefTitle>The challenges of studying visual expertise in medical image diagnosis</RefTitle>
        <RefYear>2017</RefYear>
        <RefJournal>Med Educ</RefJournal>
        <RefPage>97-104</RefPage>
        <RefTotal>Gegenfurtner A, Kok E, van Geel K, de Bruin A, Jarodzka H, Szulewski A, van Merri&#235;nboer JJ. The challenges of studying visual expertise in medical image diagnosis. Med Educ. 2017;51(1):97-104. DOI: 10.1111&#47;medu.13205</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1111&#47;medu.13205</RefLink>
      </Reference>
      <Reference refNo="17">
        <RefAuthor>Gegenfurtner A</RefAuthor>
        <RefAuthor>Lehtinen E</RefAuthor>
        <RefAuthor>S&#228;lj&#246; R</RefAuthor>
        <RefTitle>Expertise differences in the comprehension of visualizations: A meta-analysis of eye-tracking research in professional domains</RefTitle>
        <RefYear>2011</RefYear>
        <RefJournal>Educ Psychol Rev</RefJournal>
        <RefPage>523-552</RefPage>
        <RefTotal>Gegenfurtner A, Lehtinen E, S&#228;lj&#246; R. Expertise differences in the comprehension of visualizations: A meta-analysis of eye-tracking research in professional domains. Educ Psychol Rev. 2011;23:523-552. DOI: 10.1007&#47;s10648-011-9174-7</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10648-011-9174-7</RefLink>
      </Reference>
      <Reference refNo="18">
        <RefAuthor>Gobet F</RefAuthor>
        <RefAuthor>Charness N</RefAuthor>
        <RefTitle>Expertise in chess</RefTitle>
        <RefYear>2018</RefYear>
        <RefBookTitle>Cambridge handbook of expertise and expert performance</RefBookTitle>
        <RefPage>523-538</RefPage>
        <RefTotal>Gobet F, Charness N. Expertise in chess. In: Ericsson K, Charness&#180;N, Feltovich P, Hoffmann R, editors. Cambridge handbook of expertise and expert performance. New-York: Cambridge University Press; 2018. p.523-538.</RefTotal>
      </Reference>
      <Reference refNo="19">
        <RefAuthor>Haider H</RefAuthor>
        <RefAuthor>Frensch PA</RefAuthor>
        <RefTitle>The role of information reduction in skill acquisition</RefTitle>
        <RefYear>1996</RefYear>
        <RefJournal>Cogn Psychol</RefJournal>
        <RefPage>304-337</RefPage>
        <RefTotal>Haider H, Frensch PA. The role of information reduction in skill acquisition. Cogn Psychol. 1996;30(3):304-337. DOI: 10.1006&#47;cogp.1996.0009</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1006&#47;cogp.1996.0009</RefLink>
      </Reference>
      <Reference refNo="20">
        <RefAuthor>Hattie J</RefAuthor>
        <RefAuthor>Timperley H</RefAuthor>
        <RefTitle>The power of feedback</RefTitle>
        <RefYear>2007</RefYear>
        <RefJournal>Rev Educ Res</RefJournal>
        <RefPage>81-112</RefPage>
        <RefTotal>Hattie J, Timperley H. The power of feedback. Rev Educ Res. 2007;77(1):81-112. DOI: 10.3102&#47;003465430298487</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3102&#47;003465430298487</RefLink>
      </Reference>
      <Reference refNo="21">
        <RefAuthor>Hauer KE</RefAuthor>
        <RefAuthor>Boscardin C</RefAuthor>
        <RefAuthor>Brenner JM</RefAuthor>
        <RefAuthor>van Schaik SM</RefAuthor>
        <RefAuthor>Papp KK</RefAuthor>
        <RefTitle>Twelve tips for assessing medical knowledge with open-ended questions: Designing constructed response examinations in medical education</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>Med Teach</RefJournal>
        <RefPage>880-885</RefPage>
        <RefTotal>Hauer KE, Boscardin C, Brenner JM, van Schaik SM, Papp KK. Twelve tips for assessing medical knowledge with open-ended questions: Designing constructed response examinations in medical education. Med Teach. 2020;42(8):880-885. DOI: 10.1080&#47;0142159X.2019.1629404</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1080&#47;0142159X.2019.1629404</RefLink>
      </Reference>
      <Reference refNo="22">
        <RefAuthor>Hay J</RefAuthor>
        <RefAuthor>Troup E</RefAuthor>
        <RefAuthor>Clark I</RefAuthor>
        <RefAuthor>Pietsch J</RefAuthor>
        <RefAuthor>Zieli&#324;ski T</RefAuthor>
        <RefAuthor>Millar A</RefAuthor>
        <RefTitle>PyOmeroUpload: A Python toolkit for uploading images and metadata to OMERO</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>Wellcome Open Res</RefJournal>
        <RefPage>96</RefPage>
        <RefTotal>Hay J, Troup E, Clark I, Pietsch J, Zieli&#324;ski T, Millar A. PyOmeroUpload: A Python toolkit for uploading images and metadata to OMERO. Wellcome Open Res. 2020;5:96. DOI: 10.12688&#47;wellcomeopenres.15853.2</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.12688&#47;wellcomeopenres.15853.2</RefLink>
      </Reference>
      <Reference refNo="23">
        <RefAuthor>Hirt L</RefAuthor>
        <RefAuthor>Leonard C</RefAuthor>
        <RefAuthor>Lee LM</RefAuthor>
        <RefTitle>Are you copying me&#63; Leveraging expert visual scan path to transmit visual literacy in novice histology students</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>FASEB J</RefJournal>
        <RefPage>1</RefPage>
        <RefTotal>Hirt L, Leonard C, Lee LM. Are you copying me&#63; Leveraging expert visual scan path to transmit visual literacy in novice histology students. FASEB J. 2020;34(S1):1. DOI: 10.1096&#47;fasebj.2020.34.s1.04665</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1096&#47;fasebj.2020.34.s1.04665</RefLink>
      </Reference>
      <Reference refNo="24">
        <RefAuthor>Holmqvist K</RefAuthor>
        <RefAuthor>Nystr&#246;m M</RefAuthor>
        <RefAuthor>Andersson R</RefAuthor>
        <RefAuthor>Dewhurst R</RefAuthor>
        <RefAuthor>Jarodzka H</RefAuthor>
        <RefAuthor>van de Weijer J</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2011</RefYear>
        <RefBookTitle>Eye tracking: A comprehensive guide to methods and measures</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Holmqvist K, Nystr&#246;m M, Andersson R, Dewhurst R, Jarodzka H, van de Weijer J. Eye tracking: A comprehensive guide to methods and measures. Oxford: University Press; 2011.</RefTotal>
      </Reference>
      <Reference refNo="25">
        <RefAuthor>Holmqvist K</RefAuthor>
        <RefAuthor>&#214;rbom S</RefAuthor>
        <RefAuthor>Hooge I</RefAuthor>
        <RefAuthor>Niehorster D</RefAuthor>
        <RefAuthor>Alexander R</RefAuthor>
        <RefAuthor>Andersson R</RefAuthor>
        <RefAuthor>Benjamins JS</RefAuthor>
        <RefAuthor>Blignaut P</RefAuthor>
        <RefAuthor>Brouwer AM</RefAuthor>
        <RefAuthor>Chuang LL</RefAuthor>
        <RefAuthor>Dalrymple KA</RefAuthor>
        <RefAuthor>Drieghe D</RefAuthor>
        <RefAuthor>Dunn MJ</RefAuthor>
        <RefAuthor>Ettinger U</RefAuthor>
        <RefAuthor>Fiedler S</RefAuthor>
        <RefAuthor>Foulsham T</RefAuthor>
        <RefAuthor>van der Geest JN</RefAuthor>
        <RefAuthor>Hansen DW</RefAuthor>
        <RefAuthor>Hutton SB</RefAuthor>
        <RefAuthor>Kasneci E</RefAuthor>
        <RefAuthor>Kingstone A</RefAuthor>
        <RefAuthor>Knox PC</RefAuthor>
        <RefAuthor>Kok EM</RefAuthor>
        <RefAuthor>Lee H</RefAuthor>
        <RefAuthor>Lee JY</RefAuthor>
        <RefAuthor>Lepp&#228;nen JM</RefAuthor>
        <RefAuthor>Macknik S</RefAuthor>
        <RefAuthor>Majaranta P</RefAuthor>
        <RefAuthor>Martinez-Conde S</RefAuthor>
        <RefAuthor>Nuthmann A</RefAuthor>
        <RefAuthor>Nystr&#246;m M</RefAuthor>
        <RefAuthor>Orquin JL</RefAuthor>
        <RefAuthor>Otero-Millan J</RefAuthor>
        <RefAuthor>Park SY</RefAuthor>
        <RefAuthor>Popelka S</RefAuthor>
        <RefAuthor>Proudlock F</RefAuthor>
        <RefAuthor>Renkewitz F</RefAuthor>
        <RefAuthor>Roorda A</RefAuthor>
        <RefAuthor>Schulte-Mecklenbeck M</RefAuthor>
        <RefAuthor>Sharif B</RefAuthor>
        <RefAuthor>Shic F</RefAuthor>
        <RefAuthor>Shovman M</RefAuthor>
        <RefAuthor>Thomas MG</RefAuthor>
        <RefAuthor>Venrooij W</RefAuthor>
        <RefAuthor>Zemblys R</RefAuthor>
        <RefAuthor>Hessels RS</RefAuthor>
        <RefTitle>Eye tracking: empirical foundations for a minimal reporting guideline</RefTitle>
        <RefYear>2022</RefYear>
        <RefJournal>Behav Res Methods</RefJournal>
        <RefPage>364-416</RefPage>
        <RefTotal>Holmqvist K, &#214;rbom S, Hooge I, Niehorster D, Alexander R, Andersson R, Benjamins JS, Blignaut P, Brouwer AM, Chuang LL, Dalrymple KA, Drieghe D, Dunn MJ, Ettinger U, Fiedler S, Foulsham T, van der Geest JN, Hansen DW, Hutton SB, Kasneci E, Kingstone A, Knox PC, Kok EM, Lee H, Lee JY, Lepp&#228;nen JM, Macknik S, Majaranta P, Martinez-Conde S, Nuthmann A, Nystr&#246;m M, Orquin JL, Otero-Millan J, Park SY, Popelka S, Proudlock F, Renkewitz F, Roorda A, Schulte-Mecklenbeck M, Sharif B, Shic F, Shovman M, Thomas MG, Venrooij W, Zemblys R, Hessels RS. Eye tracking: empirical foundations for a minimal reporting guideline. Behav Res Methods. 2022;55:364-416. DOI: 10.3758&#47;s13428-021-01762-8</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3758&#47;s13428-021-01762-8</RefLink>
      </Reference>
      <Reference refNo="26">
        <RefAuthor>Jaarsma T</RefAuthor>
        <RefAuthor>Jarodzka H</RefAuthor>
        <RefAuthor>Nap M</RefAuthor>
        <RefAuthor>van Merrienboer JJ</RefAuthor>
        <RefAuthor>Boshuizen HP</RefAuthor>
        <RefTitle>Expertise under the microscope: processing histopathological slides</RefTitle>
        <RefYear>2014</RefYear>
        <RefJournal>Med Educ</RefJournal>
        <RefPage>292-300</RefPage>
        <RefTotal>Jaarsma T, Jarodzka H, Nap M, van Merrienboer JJ, Boshuizen HP. Expertise under the microscope: processing histopathological slides. Med Educ. 2014;48(3):292-300. DOI: 10.1111&#47;medu.12385</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1111&#47;medu.12385</RefLink>
      </Reference>
      <Reference refNo="27">
        <RefAuthor>Jaarsma T</RefAuthor>
        <RefAuthor>Jarodzka H</RefAuthor>
        <RefAuthor>Nap M</RefAuthor>
        <RefAuthor>van Merri&#235;nboer JJ</RefAuthor>
        <RefAuthor>Boshuizen HP</RefAuthor>
        <RefTitle>Expertise in clinical pathology: combining the visual and cognitive perspective</RefTitle>
        <RefYear>2015</RefYear>
        <RefJournal>Adv Health Sci Educ Theory Pract</RefJournal>
        <RefPage>1089-1106</RefPage>
        <RefTotal>Jaarsma T, Jarodzka H, Nap M, van Merri&#235;nboer JJ, Boshuizen HP. Expertise in clinical pathology: combining the visual and cognitive perspective. Adv Health Sci Educ Theory Pract. 2015;20(4):1089-1106. DOI: 10.1007&#47;s10459-015-9589-x</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10459-015-9589-x</RefLink>
      </Reference>
      <Reference refNo="28">
        <RefAuthor>Just MA</RefAuthor>
        <RefAuthor>Carpenter PA</RefAuthor>
        <RefTitle>A theory of reading: From eye fixations to comprehension</RefTitle>
        <RefYear>1980</RefYear>
        <RefJournal>Psychol Rev</RefJournal>
        <RefPage>329-354</RefPage>
        <RefTotal>Just MA, Carpenter PA. A theory of reading: From eye fixations to comprehension. Psychol Rev. 1980;87(4):329-354.</RefTotal>
      </Reference>
      <Reference refNo="29">
        <RefAuthor>Kahl J</RefAuthor>
        <RefAuthor>Carry P</RefAuthor>
        <RefAuthor>Lee L</RefAuthor>
        <RefTitle>Quantification of pattern recognition skills acquisition in histology using an eye-tracking device: First-step toward evidence-based histology education</RefTitle>
        <RefYear>2015</RefYear>
        <RefJournal>FASEB J</RefJournal>
        <RefPage>689.4</RefPage>
        <RefTotal>Kahl J, Carry P, Lee L. Quantification of pattern recognition skills acquisition in histology using an eye-tracking device: First-step toward evidence-based histology education. FASEB J. 2015;29(S1):689.4. DOI: 10.1096&#47;fasebj.29.1&#95;supplement.689.4</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1096&#47;fasebj.29.1&#95;supplement.689.4</RefLink>
      </Reference>
      <Reference refNo="30">
        <RefAuthor>Koh A</RefAuthor>
        <RefAuthor>Roy D</RefAuthor>
        <RefAuthor>Gale A</RefAuthor>
        <RefAuthor>Mihai R</RefAuthor>
        <RefAuthor>Atwal G</RefAuthor>
        <RefAuthor>Ellis I</RefAuthor>
        <RefAuthor>Snead D</RefAuthor>
        <RefAuthor>Chen Y</RefAuthor>
        <RefTitle>Understanding digital pathology performance: an eye tracking study</RefTitle>
        <RefYear>2020</RefYear>
        <RefBookTitle>Proc. SPIE 11316. Medical Imaging 2020: Image Perception, Observer Performance, and Technology Assessment. 1131607</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Koh A, Roy D, Gale A, Mihai R, Atwal G, Ellis I, Snead D, Chen Y. Understanding digital pathology performance: an eye tracking study. In: Proc. SPIE 11316. Medical Imaging 2020: Image Perception, Observer Performance, and Technology Assessment. 1131607. 16 March 2020. DOI: 10.1117&#47;12.2550513</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1117&#47;12.2550513</RefLink>
      </Reference>
      <Reference refNo="31">
        <RefAuthor>Kok E</RefAuthor>
        <RefTitle>Eye tracking: the silver bullet of competency assessment in medical image interpretation&#63;</RefTitle>
        <RefYear>2019</RefYear>
        <RefJournal>Perspect Med Educ</RefJournal>
        <RefPage>63-64</RefPage>
        <RefTotal>Kok E. Eye tracking: the silver bullet of competency assessment in medical image interpretation&#63; Perspect Med Educ. 2019;8(2):63-64. DOI: 10.1007&#47;s40037-019-0506-5</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s40037-019-0506-5</RefLink>
      </Reference>
      <Reference refNo="32">
        <RefAuthor>Kok EM</RefAuthor>
        <RefAuthor>Jarodzka H</RefAuthor>
        <RefTitle>Before your very eyes: the value and limitations of eye tracking in medical education</RefTitle>
        <RefYear>2017</RefYear>
        <RefJournal>Med Educ</RefJournal>
        <RefPage>114-122</RefPage>
        <RefTotal>Kok EM, Jarodzka H. Before your very eyes: the value and limitations of eye tracking in medical education. Med Educ. 2017;51(1):114-122. DOI: 10.1111&#47;medu.13066</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1111&#47;medu.13066</RefLink>
      </Reference>
      <Reference refNo="33">
        <RefAuthor>Kok EM</RefAuthor>
        <RefAuthor>Jarodzka H</RefAuthor>
        <RefAuthor>de Bruin AB</RefAuthor>
        <RefAuthor>Binamir HA</RefAuthor>
        <RefAuthor>Robben SG</RefAuthor>
        <RefAuthor>van Merri&#235;nboer JJ</RefAuthor>
        <RefTitle>Systematic viewing in radiology: seeing more, missing less&#63;</RefTitle>
        <RefYear>2016</RefYear>
        <RefJournal>Adv Health Sci Educ Theory Pract</RefJournal>
        <RefPage>189-205</RefPage>
        <RefTotal>Kok EM, Jarodzka H, de Bruin AB, Binamir HA, Robben SG, van Merri&#235;nboer JJ. Systematic viewing in radiology: seeing more, missing less&#63; Adv Health Sci Educ Theory Pract. 2016;21(1):189-205. DOI: 10.1007&#47;s10459-015-9624-y</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10459-015-9624-y</RefLink>
      </Reference>
      <Reference refNo="34">
        <RefAuthor>Krupinski EA</RefAuthor>
        <RefAuthor>Graham AR</RefAuthor>
        <RefAuthor>Weinstein RS</RefAuthor>
        <RefTitle>Characterizing the development of visual search expertise in pathology residents viewing whole slide images</RefTitle>
        <RefYear>2013</RefYear>
        <RefJournal>Hum Pathol</RefJournal>
        <RefPage>357-364</RefPage>
        <RefTotal>Krupinski EA, Graham AR, Weinstein RS. Characterizing the development of visual search expertise in pathology residents viewing whole slide images. Hum Pathol. 2013;44(3):357-364. DOI: 10.1016&#47;j.humpath.2012.05.024 </RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;j.humpath.2012.05.024</RefLink>
      </Reference>
      <Reference refNo="35">
        <RefAuthor>Krupinski EA</RefAuthor>
        <RefAuthor>Tillack AA</RefAuthor>
        <RefAuthor>Richter L</RefAuthor>
        <RefAuthor>Henderson JT</RefAuthor>
        <RefAuthor>Bhattacharyya AK</RefAuthor>
        <RefAuthor>Scott KM</RefAuthor>
        <RefAuthor>Graham AR</RefAuthor>
        <RefAuthor>Descour MR</RefAuthor>
        <RefAuthor>Davis JR</RefAuthor>
        <RefAuthor>Weinstein RS</RefAuthor>
        <RefTitle>Eye-movement study and human performance using telepathology virtual slides. Implications for medical education and differences with experience</RefTitle>
        <RefYear>2006</RefYear>
        <RefJournal>Hum Pathol</RefJournal>
        <RefPage>1543-1556</RefPage>
        <RefTotal>Krupinski EA, Tillack AA, Richter L, Henderson JT, Bhattacharyya AK, Scott KM, Graham AR, Descour MR, Davis JR, Weinstein RS. Eye-movement study and human performance using telepathology virtual slides. Implications for medical education and differences with experience. Hum Pathol. 2006;37(12):1543-1556. DOI: 10.1016&#47;j.humpath.2006.08.024</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;j.humpath.2006.08.024</RefLink>
      </Reference>
      <Reference refNo="36">
        <RefAuthor>Kundel HL</RefAuthor>
        <RefAuthor>Nodine CF</RefAuthor>
        <RefAuthor>Conant EF</RefAuthor>
        <RefAuthor>Weinstein SP</RefAuthor>
        <RefTitle>Holistic component of image perception in mammogram interpretation: gaze-tracking study</RefTitle>
        <RefYear>2007</RefYear>
        <RefJournal>Radiology</RefJournal>
        <RefPage>396-402</RefPage>
        <RefTotal>Kundel HL, Nodine CF, Conant EF, Weinstein SP. Holistic component of image perception in mammogram interpretation: gaze-tracking study. Radiology. 2007;242(2):396-402. DOI: 10.1148&#47;radiol.2422051997</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1148&#47;radiol.2422051997</RefLink>
      </Reference>
      <Reference refNo="37">
        <RefAuthor>Madsen J</RefAuthor>
        <RefAuthor>J&#250;lio SU</RefAuthor>
        <RefAuthor>Gucik PJ</RefAuthor>
        <RefAuthor>Steinberg R</RefAuthor>
        <RefAuthor>Parra LC</RefAuthor>
        <RefTitle>Synchronized eye movements predict test scores in online video education</RefTitle>
        <RefYear>2021</RefYear>
        <RefJournal>Proc Natl Acad Sci U S A</RefJournal>
        <RefPage>e2016980118</RefPage>
        <RefTotal>Madsen J, J&#250;lio SU, Gucik PJ, Steinberg R, Parra LC. Synchronized eye movements predict test scores in online video education. Proc Natl Acad Sci U S A. 2021;118(5):e2016980118. DOI: 10.1073&#47;pnas.2016980118</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1073&#47;pnas.2016980118</RefLink>
      </Reference>
      <Reference refNo="38">
        <RefAuthor>McMains S</RefAuthor>
        <RefAuthor>Kastner S</RefAuthor>
        <RefTitle>Interactions of top-down and bottom-up mechanisms in human visual cortex</RefTitle>
        <RefYear>2011</RefYear>
        <RefJournal>J Neurosci</RefJournal>
        <RefPage>587-597</RefPage>
        <RefTotal>McMains S, Kastner S. Interactions of top-down and bottom-up mechanisms in human visual cortex. J Neurosci. 2011;31(2):587-597. DOI: 10.1523&#47;JNEUROSCI.3766-10.2011</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1523&#47;JNEUROSCI.3766-10.2011</RefLink>
      </Reference>
      <Reference refNo="39">
        <RefAuthor>Mercan E</RefAuthor>
        <RefAuthor>Shapiro LG</RefAuthor>
        <RefAuthor>Bruny&#233; TT</RefAuthor>
        <RefAuthor>Weaver DL</RefAuthor>
        <RefAuthor>Elmore JG</RefAuthor>
        <RefTitle>Characterizing diagnostic search patterns in digital breast pathology: Scanners and drillers</RefTitle>
        <RefYear>2018</RefYear>
        <RefJournal>J Digit Imaging</RefJournal>
        <RefPage>32-41</RefPage>
        <RefTotal>Mercan E, Shapiro LG, Bruny&#233; TT, Weaver DL, Elmore JG. Characterizing diagnostic search patterns in digital breast pathology: Scanners and drillers. J Digit Imaging. 2018;31(1):32-41. DOI: 10.1007&#47;s10278-017-9990-5</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10278-017-9990-5</RefLink>
      </Reference>
      <Reference refNo="40">
        <RefAuthor>Oh K</RefAuthor>
        <RefAuthor>Almarode J</RefAuthor>
        <RefAuthor>Tai R</RefAuthor>
        <RefTitle>An exploration of think-aloud protocols linked with eye-gaze tracking: Are they talking about what they are looking at</RefTitle>
        <RefYear>2013</RefYear>
        <RefJournal>Procedia Soc Behav Sci</RefJournal>
        <RefPage>184-189</RefPage>
        <RefTotal>Oh K, Almarode J, Tai R. An exploration of think-aloud protocols linked with eye-gaze tracking: Are they talking about what they are looking at. Procedia Soc Behav Sci. 2013;93:184-189. DOI: 10.1016&#47;j.sbspro.2013.09.175</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;j.sbspro.2013.09.175</RefLink>
      </Reference>
      <Reference refNo="41">
        <RefAuthor>Olsen A</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2012</RefYear>
        <RefBookTitle>The Tobii I-VT fixation filter</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Olsen A. The Tobii I-VT fixation filter. Danderyd: Tobii Technology; 2012.</RefTotal>
      </Reference>
      <Reference refNo="42">
        <RefAuthor>Orquin JL</RefAuthor>
        <RefAuthor>Holmqvist K</RefAuthor>
        <RefTitle>Threats to the validity of eye-movement research in psychology</RefTitle>
        <RefYear>2018</RefYear>
        <RefJournal>Behav Res Methods</RefJournal>
        <RefPage>1645-1656</RefPage>
        <RefTotal>Orquin JL, Holmqvist K. Threats to the validity of eye-movement research in psychology. Behav Res Methods. 2018;50(4):1645-1656. DOI: 10.3758&#47;s13428-017-0998-z</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3758&#47;s13428-017-0998-z</RefLink>
      </Reference>
      <Reference refNo="43">
        <RefAuthor>Palinko O</RefAuthor>
        <RefAuthor>Kun AL</RefAuthor>
        <RefAuthor>Shyrokov A</RefAuthor>
        <RefAuthor>Heeman P</RefAuthor>
        <RefTitle>Estimating cognitive load using remote eye tracking in a driving simulator</RefTitle>
        <RefYear>2010</RefYear>
        <RefBookTitle>Proceedings of the 2010 Symposium on Eye-Tracking Research, Austin, Texas</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Palinko O, Kun AL, Shyrokov A, Heeman P. Estimating cognitive load using remote eye tracking in a driving simulator. In: Proceedings of the 2010 Symposium on Eye-Tracking Research, Austin, Texas. 2010.</RefTotal>
      </Reference>
      <Reference refNo="44">
        <RefAuthor>Papoutsaki A</RefAuthor>
        <RefAuthor>Laskey J</RefAuthor>
        <RefAuthor>Huang J</RefAuthor>
        <RefTitle>SearchGazer: Webcam eye tracking for remote studies of web search</RefTitle>
        <RefYear>2017</RefYear>
        <RefBookTitle>Proceedings of the 2017 Conference on Conference Human Information Interaction and Retrieval, New York</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Papoutsaki A, Laskey J, Huang J. SearchGazer: Webcam eye tracking for remote studies of web search. In: Proceedings of the 2017 Conference on Conference Human Information Interaction and Retrieval, New York. 2017.</RefTotal>
      </Reference>
      <Reference refNo="45">
        <RefAuthor>Papoutsaki A</RefAuthor>
        <RefAuthor>Sangkloy P</RefAuthor>
        <RefAuthor>Laskey J</RefAuthor>
        <RefAuthor>Daskalova N</RefAuthor>
        <RefAuthor>Huang J</RefAuthor>
        <RefAuthor>Hays J</RefAuthor>
        <RefTitle>WebGazer: Scalable webcam eye tracking using user interactions</RefTitle>
        <RefYear>2016</RefYear>
        <RefBookTitle>Proceedings of the twenty-fifth international joint conference on artificial intelligence, New York</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Papoutsaki A, Sangkloy P, Laskey J, Daskalova N, Huang J, Hays J. WebGazer: Scalable webcam eye tracking using user interactions. In: Proceedings of the twenty-fifth international joint conference on artificial intelligence, New York. 2016.</RefTotal>
      </Reference>
      <Reference refNo="46">
        <RefAuthor>Reingold E</RefAuthor>
        <RefAuthor>Sheridan H</RefAuthor>
        <RefTitle>Eye movements and visual expertise in chess and medicine</RefTitle>
        <RefYear>2011</RefYear>
        <RefBookTitle>Oxford handbook of eye movements</RefBookTitle>
        <RefPage>523-550</RefPage>
        <RefTotal>Reingold E, Sheridan H. Eye movements and visual expertise in chess and medicine. In: Liveredge SP, Gilchrist ID, Everling S, editors. Oxford handbook of eye movements. Oxford: Oxford university press; 2011. p.523-550.</RefTotal>
      </Reference>
      <Reference refNo="47">
        <RefAuthor>Roach VA</RefAuthor>
        <RefAuthor>Fraser GM</RefAuthor>
        <RefAuthor>Kryklywy JH</RefAuthor>
        <RefAuthor>Mitchell DG</RefAuthor>
        <RefAuthor>Wilson TD</RefAuthor>
        <RefTitle>Guiding low spatial ability individuals through visual cueing: The dual importance of where and when to look</RefTitle>
        <RefYear>2019</RefYear>
        <RefJournal>Anat Sci Educ</RefJournal>
        <RefPage>32-42</RefPage>
        <RefTotal>Roach VA, Fraser GM, Kryklywy JH, Mitchell DG, Wilson TD. Guiding low spatial ability individuals through visual cueing: The dual importance of where and when to look. Anat Sci Educ. 2019;12(1):32-42. DOI: 10.1002&#47;ase.1783</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1002&#47;ase.1783</RefLink>
      </Reference>
      <Reference refNo="48">
        <RefAuthor>Rosebrock A</RefAuthor>
        <RefTitle>OpenCV Saliency Detection</RefTitle>
        <RefYear>2021</RefYear>
        <RefTotal>Rosebrock A. OpenCV Saliency Detection. 2021 Oct 5. Zug&#228;nglich unter&#47;available from: https:&#47;&#47;www.pyimagesearch.com&#47;2018&#47;07&#47;16&#47;opencv-saliency-detection&#47;</RefTotal>
        <RefLink>https:&#47;&#47;www.pyimagesearch.com&#47;2018&#47;07&#47;16&#47;opencv-saliency-detection&#47;</RefLink>
      </Reference>
      <Reference refNo="49">
        <RefAuthor>Rosicke S</RefAuthor>
        <RefAuthor>Leonard CJ</RefAuthor>
        <RefAuthor>Carry P</RefAuthor>
        <RefAuthor>Lee LM</RefAuthor>
        <RefTitle>Does &#8220;See one and do one&#8221; work in histology&#63; A two-phase cross-over eye tracking study on complex histological pattern recognition skills acquisition</RefTitle>
        <RefYear>2019</RefYear>
        <RefJournal>FASEB J</RefJournal>
        <RefPage>444.33</RefPage>
        <RefTotal>Rosicke S, Leonard CJ, Carry P, Lee LM. Does &#8220;See one and do one&#8221; work in histology&#63; A two-phase cross-over eye tracking study on complex histological pattern recognition skills acquisition. FASEB J. 2019;33(S1):444.33. DOI: 10.1096&#47;fasebj.2019.33.1&#95;supplement.444.33</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1096&#47;fasebj.2019.33.1&#95;supplement.444.33</RefLink>
      </Reference>
      <Reference refNo="50">
        <RefAuthor>Semmelmann K</RefAuthor>
        <RefAuthor>Weigelt S</RefAuthor>
        <RefTitle>Online webcam-based eye tracking in cognitive science: A first look</RefTitle>
        <RefYear>2018</RefYear>
        <RefJournal>Behav Res Methods</RefJournal>
        <RefPage>451-465</RefPage>
        <RefTotal>Semmelmann K, Weigelt S. Online webcam-based eye tracking in cognitive science: A first look. Behav Res Methods. 2018;50(2):451-465. DOI: 10.3758&#47;s13428-017-0913-7</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3758&#47;s13428-017-0913-7</RefLink>
      </Reference>
      <Reference refNo="51">
        <RefAuthor>Sheridan H</RefAuthor>
        <RefAuthor>Reingold EM</RefAuthor>
        <RefTitle>The holistic processing account of visual expertise in medical image perception: A review</RefTitle>
        <RefYear>2017</RefYear>
        <RefJournal>Front Psychol</RefJournal>
        <RefPage>1620</RefPage>
        <RefTotal>Sheridan H, Reingold EM. The holistic processing account of visual expertise in medical image perception: A review. Front Psychol. 2017;8:1620. DOI: 10.3389&#47;fpsyg.2017.01620</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.3389&#47;fpsyg.2017.01620</RefLink>
      </Reference>
      <Reference refNo="52">
        <RefAuthor>Sibbald M</RefAuthor>
        <RefAuthor>de Bruin AB</RefAuthor>
        <RefAuthor>Yu E</RefAuthor>
        <RefAuthor>van Merri&#235;nboer JJ</RefAuthor>
        <RefTitle>Why verifying diagnostic decisions with a checklist can help: insights from eye tracking</RefTitle>
        <RefYear>2015</RefYear>
        <RefJournal>Adv Health Sci Educ Theory Pract</RefJournal>
        <RefPage>1053-1060</RefPage>
        <RefTotal>Sibbald M, de Bruin AB, Yu E, van Merri&#235;nboer JJ. Why verifying diagnostic decisions with a checklist can help: insights from eye tracking. Adv Health Sci Educ Theory Pract. 2015;20(4):1053-1060. DOI: 10.1007&#47;s10459-015-9585-1</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10459-015-9585-1</RefLink>
      </Reference>
      <Reference refNo="54">
        <RefAuthor>Stine R</RefAuthor>
        <RefTitle>An Introduction to Bootstrap methods: Examples and ideas</RefTitle>
        <RefYear>1989</RefYear>
        <RefJournal>Sociol Methods Res</RefJournal>
        <RefPage>243-291</RefPage>
        <RefTotal>Stine R. An Introduction to Bootstrap methods: Examples and ideas. Sociol Methods Res. 1989;18(2&#8211;3):243-291. DOI: 10.1177&#47;0049124189018002003</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1177&#47;0049124189018002003</RefLink>
      </Reference>
      <Reference refNo="55">
        <RefAuthor>Szulewski A</RefAuthor>
        <RefAuthor>Roth N</RefAuthor>
        <RefAuthor>Howes D</RefAuthor>
        <RefTitle>The use of task-evoked pupillary response as an objective measure of cognitive load in novices and trained physicians a new tool for the assessment of expertise</RefTitle>
        <RefYear>2015</RefYear>
        <RefJournal>Acad Med</RefJournal>
        <RefPage>981-987</RefPage>
        <RefTotal>Szulewski A, Roth N, Howes D. The use of task-evoked pupillary response as an objective measure of cognitive load in novices and trained physicians a new tool for the assessment of expertise. Acad Med. 2015;90(7):981-987. DOI: 10.1097&#47;ACM.0000000000000677</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1097&#47;ACM.0000000000000677</RefLink>
      </Reference>
      <Reference refNo="53">
        <RefAuthor>Sqalli MT</RefAuthor>
        <RefAuthor>Al-Thani D</RefAuthor>
        <RefAuthor>Elshazly MB</RefAuthor>
        <RefAuthor>Al-Hijji M</RefAuthor>
        <RefAuthor>Alahmadi A</RefAuthor>
        <RefAuthor>Houssaini YS</RefAuthor>
        <RefTitle>Understanding cardiology practitioners&#8217; interpretations of electrocardiograms: An eye-tracking study</RefTitle>
        <RefYear>2022</RefYear>
        <RefJournal>JMIR Human Factors</RefJournal>
        <RefPage>e34058</RefPage>
        <RefTotal>Sqalli MT, Al-Thani D, Elshazly MB, Al-Hijji M, Alahmadi A, Houssaini YS. Understanding cardiology practitioners&#8217; interpretations of electrocardiograms: An eye-tracking study. JMIR Human Factors. 2022;9(1):e34058. DOI: 10.2196&#47;34058</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.2196&#47;34058</RefLink>
      </Reference>
      <Reference refNo="56">
        <RefAuthor>Tatler B</RefAuthor>
        <RefAuthor>Wade N</RefAuthor>
        <RefAuthor>Kwan H</RefAuthor>
        <RefAuthor>Findlay J</RefAuthor>
        <RefAuthor>Velichkovsky B</RefAuthor>
        <RefTitle>Yarbus, eye movements, and vision</RefTitle>
        <RefYear>2010</RefYear>
        <RefJournal>Iperception</RefJournal>
        <RefPage>7-27</RefPage>
        <RefTotal>Tatler B, Wade N, Kwan H, Findlay J, Velichkovsky B. Yarbus, eye movements, and vision. Iperception. 2010;1(1):7-27. DOI: 10.1068&#47;i0382</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1068&#47;i0382</RefLink>
      </Reference>
      <Reference refNo="57">
        <RefAuthor>Tempelaar D</RefAuthor>
        <RefAuthor>Rienties B</RefAuthor>
        <RefAuthor>Nguyen Q</RefAuthor>
        <RefTitle>Subjective data, objective data and the role of bias in predictive modelling: Lessons from a dispositional learning analytics application</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>PLoS One</RefJournal>
        <RefPage>e0233977</RefPage>
        <RefTotal>Tempelaar D, Rienties B, Nguyen Q. Subjective data, objective data and the role of bias in predictive modelling: Lessons from a dispositional learning analytics application. PLoS One. 2020;15(6):e0233977. DOI: 10.1371&#47;journal.pone.0233977</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1371&#47;journal.pone.0233977</RefLink>
      </Reference>
      <Reference refNo="58">
        <RefAuthor>Valliappan N</RefAuthor>
        <RefAuthor>Dai N</RefAuthor>
        <RefAuthor>Steinberg E</RefAuthor>
        <RefAuthor>He J</RefAuthor>
        <RefAuthor>Rogers K</RefAuthor>
        <RefAuthor>Ramachandran V</RefAuthor>
        <RefAuthor>Xu P</RefAuthor>
        <RefAuthor>Shojaeizadeh M</RefAuthor>
        <RefAuthor>Guo L</RefAuthor>
        <RefAuthor>Kohlhoff K</RefAuthor>
        <RefAuthor>Navalpakkam V</RefAuthor>
        <RefTitle>Accelerating eye movement research via accurate and affordable smartphone eye tracking</RefTitle>
        <RefYear>2020</RefYear>
        <RefJournal>Nature Commun</RefJournal>
        <RefPage>4553</RefPage>
        <RefTotal>Valliappan N, Dai N, Steinberg E, He J, Rogers K, Ramachandran V, Xu P, Shojaeizadeh M, Guo L, Kohlhoff K, Navalpakkam V. Accelerating eye movement research via accurate and affordable smartphone eye tracking. Nature Commun. 2020;11(1):4553. DOI: 10.1038&#47;s41467-020-18360-5</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1038&#47;s41467-020-18360-5</RefLink>
      </Reference>
      <Reference refNo="59">
        <RefAuthor>van der Gijp A</RefAuthor>
        <RefAuthor>Ravesloot CJ</RefAuthor>
        <RefAuthor>Jarodzka H</RefAuthor>
        <RefAuthor>van der Schaaf MF</RefAuthor>
        <RefAuthor>van der Schaaf IC</RefAuthor>
        <RefAuthor>van Schaik JP</RefAuthor>
        <RefAuthor>Ten Cate TJ</RefAuthor>
        <RefTitle>How visual search relates to visual diagnostic performance: a narrative systematic review of eye-tracking research in radiology</RefTitle>
        <RefYear>2017</RefYear>
        <RefJournal>Adv Health Sci Educ Theory Pract</RefJournal>
        <RefPage>765-787</RefPage>
        <RefTotal>van der Gijp A, Ravesloot CJ, Jarodzka H, van der Schaaf MF, van der Schaaf IC, van Schaik JP, Ten Cate TJ. How visual search relates to visual diagnostic performance: a narrative systematic review of eye-tracking research in radiology. Adv Health Sci Educ Theory Pract. 2017;22(3):765-787. DOI: 10.1007&#47;s10459-016-9698-1</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10459-016-9698-1</RefLink>
      </Reference>
      <Reference refNo="60">
        <RefAuthor>van Montfort D</RefAuthor>
        <RefAuthor>Kok E</RefAuthor>
        <RefAuthor>Vincken K</RefAuthor>
        <RefAuthor>van der Schaaf M</RefAuthor>
        <RefAuthor>van der Gijp A</RefAuthor>
        <RefAuthor>Ravesloot C</RefAuthor>
        <RefAuthor>Rutgers R</RefAuthor>
        <RefTitle>Expertise development in volumetric image interpretation of radiology residents: what do longitudinal scroll data reveal&#63;</RefTitle>
        <RefYear>2021</RefYear>
        <RefJournal>Adv Health Sci Educ Theory Pract</RefJournal>
        <RefPage>437-466</RefPage>
        <RefTotal>van Montfort D, Kok E, Vincken K, van der Schaaf M, van der Gijp A, Ravesloot C, Rutgers R. Expertise development in volumetric image interpretation of radiology residents: what do longitudinal scroll data reveal&#63; Adv Health Sci Educ Theory Pract. 2021;26(2):437-466. DOI: 10.1007&#47;s10459-020-09995-6</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1007&#47;s10459-020-09995-6</RefLink>
      </Reference>
      <Reference refNo="61">
        <RefAuthor>von der Malsburg T</RefAuthor>
        <RefAuthor>Vasishth S</RefAuthor>
        <RefTitle>What is the scanpath signature of syntactic reanalysis&#63;</RefTitle>
        <RefYear>2011</RefYear>
        <RefJournal>J Mem Lang</RefJournal>
        <RefPage>109-127</RefPage>
        <RefTotal>von der Malsburg T, Vasishth S. What is the scanpath signature of syntactic reanalysis&#63; J Mem Lang. 2011;65(2):109-127. DOI: 10.1016&#47;j.jml.2011.02.004</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1016&#47;j.jml.2011.02.004</RefLink>
      </Reference>
      <Reference refNo="62">
        <RefAuthor>Ziv G</RefAuthor>
        <RefTitle>Gaze behavior and visual attention: A review of eye tracking studies in aviation</RefTitle>
        <RefYear>2016</RefYear>
        <RefJournal>Int J Aviat Psychol</RefJournal>
        <RefPage>75-104</RefPage>
        <RefTotal>Ziv G. Gaze behavior and visual attention: A review of eye tracking studies in aviation. Int J Aviat Psychol. 2016;26(3-4):75-104. DOI: 10.1080&#47;10508414.2017.1313096</RefTotal>
        <RefLink>https:&#47;&#47;doi.org&#47;10.1080&#47;10508414.2017.1313096</RefLink>
      </Reference>
    </References>
    <Media>
      <Tables>
        <Table format="png">
          <MediaNo>1</MediaNo>
          <MediaID language="en">1en</MediaID>
          <MediaID language="de">1de</MediaID>
          <Caption language="en"><Pgraph><Mark1>Table 1: Variables used in this study, their description, respective units, prediction, and descriptive statistics</Mark1></Pgraph></Caption>
          <Caption language="de"><Pgraph><Mark1>Tabelle 1: Variablen, die in dieser Studie genutzt wurden, deren Beschreibung, Einheiten, Erwartungen, sowie deskriptive Statistiken</Mark1></Pgraph></Caption>
        </Table>
        <Table format="png">
          <MediaNo>2</MediaNo>
          <MediaID language="en">2en</MediaID>
          <MediaID language="de">2de</MediaID>
          <Caption language="en"><Pgraph><Mark1>Table 2: Predictive value of eye metrics on the test scores at timepoints t1 and t2. Deviation of &#946; from zero is associated with a higher test score in the slide identification task</Mark1><LineBreak></LineBreak></Pgraph></Caption>
          <Caption language="de"><Pgraph><Mark1>Tabelle 2: Vorhersagebeitrag der Augenbewegungen auf den Testscore an Zeitpunkt 1 und 2. Eine Abweichung der &#946;-Werte von 0 ist mit einer h&#246;heren Performanz beim Erkennen von histologischen Pr&#228;paraten assoziiert</Mark1></Pgraph></Caption>
        </Table>
        <NoOfTables>2</NoOfTables>
      </Tables>
      <Figures>
        <Figure format="png" height="308" width="877">
          <MediaNo>1</MediaNo>
          <MediaID language="en">1en</MediaID>
          <MediaID language="de">1de</MediaID>
          <Caption language="en"><Pgraph><Mark1>Figure 1: The study procedure is shown schematically. </Mark1><LineBreak></LineBreak>At two timepoints during an online-only histology course, one cohort of third semester medical students passed both a 9-point eye-tracking calibration and a 4-point test for eye-tracking accuracy. Afterward, they were instructed to identify 6 histological slides each (<Mark2>slide identification task</Mark2>). The test score, view time and several eye movements were recorded. Data were preprocessed and analyzed.<LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak></Pgraph></Caption>
          <Caption language="de"><Pgraph><Mark1>Abbildung 1: Schematische Darstellung des Studienablaufs. </Mark1><LineBreak></LineBreak>Zu zwei Zeitpunkten eines online Histologie-Kurses absolvierten Medizinstudierende im dritten Semester eine 9-Punkte-Kalibrierung und einen 4-Punkte-Test zur Erfassung der Genauigkeit des Webcam-Eye-Trackings. Anschlie&#223;end wurden sie aufgefordert, jeweils 6 histologische Pr&#228;parate zu identifizieren. W&#228;hrenddessen wurden die Testergebnisse, Betrachtungszeit und die Augenbewegungen aufgezeichnet. Die Daten wurden vorverarbeitet und analysiert.<LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak></Pgraph></Caption>
        </Figure>
        <Figure format="png" height="688" width="848">
          <MediaNo>2</MediaNo>
          <MediaID>2</MediaID>
          <Caption language="en"><Pgraph><Mark1>Figure 2: Histological slides used in this study (name&#43;staining). </Mark1><LineBreak></LineBreak>(a) Slides shown at timepoint one (t1): 1) Alveoli in Hematoxylin Eosin; 2) Adrenal glands in H.E.; 3) Peripheral nerve in Azan; 4) Thymus in H.E.; 5) Bone in H.E.; 6) Skin &#8211; Finger in Elastica. (b) Slides shown at timepoint two (t2): 1) Liver in van Gieson; 2) Thymus in Azan; 3) Ureter in H. E.; 4) Stomach in Van Gieson; 5) Kidney in Azan; 6) Testis in Azan.<LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak></Pgraph></Caption>
          <Caption language="de"><Pgraph><Mark1>Abbildung 2: In dieser Studie verwendete histologische Pr&#228;parate (Name&#43;F&#228;rbung). </Mark1><LineBreak></LineBreak>(a) Pr&#228;parate, die zum Zeitpunkt eins (t1) gezeigt wurden: 1) Alveolen in H&#228;matoxylin-Eosin; 2) Nebenniere in H.E.; 3) Peripherer Nerv in Azan; 4) Thymus in H.E.; 5) Knochen in H.E.; 6) Fingerbeere in Elastica. (b) Pr&#228;parate, die zum Zeitpunkt zwei (t2) gezeigt wurden: 1) Leber in van-Gieson; 2) Thymus in Azan; 3) Ureter in H.E.; 4) Magen in van-Gieson; 5) Niere in Azan; 6) Hoden in Azan.<LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak></Pgraph></Caption>
        </Figure>
        <Figure format="png" height="574" width="878">
          <MediaNo>3</MediaNo>
          <MediaID language="en">3en</MediaID>
          <MediaID language="de">3de</MediaID>
          <Caption language="en"><Pgraph><Mark1>Figure 3: Webcam eye-tracking methodology &#8211; data quality. </Mark1><LineBreak></LineBreak>(a) Fixation-based heatmaps for testing accuracy for timepoints one and two. Data are aggregated per group. The color of the point cloud corresponds to the intensity of the subjects&#8217; gaze (red&#61;strong; blue&#61;weak). (b) Accuracy is quantified and expressed in pixels. (c) Sample rate in Hz and <Mark2>mean</Mark2> gaze on screen time in &#37; are shown as additional indicators for the data quality at t1 and t2, respectively. The red dotted line marks the cutoff for study inclusion. Participants with either a sample rate&#60;20 Hz or a gaze on screen rate&#60;70&#37; were excluded in the preprocessing phase.</Pgraph></Caption>
          <Caption language="de"><Pgraph><Mark1>Abbildung 3: Webcam-Eye-Tracking &#8211; Datenqualit&#228;t. </Mark1><LineBreak></LineBreak>(a) Fixationsbasierte Heatmaps zur Erfassung der Messgenauigkeit f&#252;r die Messzeitpunkte t1 und t2. Die Daten sind jeweils pro Gruppe aggregiert. Die Farbe der Punktewolke entspricht der Intensit&#228;t des Blicks der Probanden (rot&#61;stark; blau&#61;schwach). (b) Die Genauigkeit wird quantifiziert und in Pixeln ausgedr&#252;ckt. (c) Die Bildrate in Hz und die durchschnittliche Blick-auf-Bildschirm-Zeit in &#37; und die Vollst&#228;ndigkeit der Daten (Datenintegrit&#228;t) werden als zus&#228;tzliche Indikatoren f&#252;r die Datenqualit&#228;t bei t1 bzw. t2 angegeben. Die gestrichelte rote Linie markiert den Grenzwert f&#252;r die Studieneinschluss. Teilnehmer mit einer Bildrate von unter 20 Hz oder einer Blick-auf-Bildschirm-Rate von unter 70&#37; wurden in der Vorverarbeitungsphase ausgeschlossen.</Pgraph></Caption>
        </Figure>
        <Figure format="png" height="347" width="769">
          <MediaNo>4</MediaNo>
          <MediaID language="en">4en</MediaID>
          <MediaID language="de">4de</MediaID>
          <Caption language="en"><Pgraph><Mark1>Figure 4: Online histology course &#8211; performance data. </Mark1><LineBreak></LineBreak>The results for timepoint 1 are shown in blue, and those for timepoint 2 are shown in green. The <Mark2>P</Mark2>-value shows the result of the statistical testing. Each circle represents one participant. Columns with the <Mark2>mean</Mark2> and <Mark2>standard</Mark2> <Mark2>deviation</Mark2> show the <Mark2>mean</Mark2> view time on the slides in seconds (maximum 15 seconds) and the mean test score in points (x out of six questions). <Mark2>t1&#61;timepoint 1 after 10 course sessions; t2&#61;timepoint 2 after 20 course sessions</Mark2>.<LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak></Pgraph></Caption>
          <Caption language="de"><Pgraph><Mark1>Abbildung 4: Online-Kurs der Histologie &#8211; Leistungsdaten. </Mark1><LineBreak></LineBreak>Die Ergebnisse f&#252;r den Zeitpunkt 1 sind blau- und die Ergebnisse f&#252;r den Zeitpunkt 2 sind gr&#252;n dargestellt. Der P-Wert zeigt das Ergebnis der statistischen Testung an. Jeder Kreis repr&#228;sentiert einen Teilnehmer. Die Balken zeigen die durchschnittliche Betrachtungszeit auf den Pr&#228;paraten in Sekunden (maximal 15 Sekunden) und die durchschnittlichen Testergebnisse in Punkten (x von sechs Fragen) an. <Mark2>t1&#61;Zeitpunkt 1 nach 10 Kurseinheiten; t2&#61;Zeitpunkt 2 nach 20 Kurseinheiten</Mark2>.</Pgraph></Caption>
        </Figure>
        <Figure format="png" height="753" width="1009">
          <MediaNo>5</MediaNo>
          <MediaID language="en">5en</MediaID>
          <MediaID language="de">5de</MediaID>
          <Caption language="en"><Pgraph><Mark1>Figure 5: Webcam eye-tracking methodology &#8211; eye metrics. </Mark1><LineBreak></LineBreak>(a) An overlay of all participants&#8217; fixations of the range 0.5-5 seconds of image viewing are shown as a fixation-based heatmap for both timepoints, respectively. (b-d) The results for t1 are shown in blue, and t2 are shown in green. The <Mark2>P</Mark2>-value shows the result of the statistical testing. Each circle represents one participant. The median and 25<Superscript>th</Superscript>-75<Superscript>th</Superscript> percentile are shown for the violin plots. (b) Violin plots illustrate the fixation count on the dAOI and the vAOI for both timepoints, respectively. (c) Violin plots illustrate the fixation duration on dAOI and vAOI for both timepoints, respectively. (e) Violin plots illustrate the time to first fixation on dAOIs and vAOIs fixation. <Mark2>Abbreviations: &#42;&#61;View time adjusted values were used; dAOI&#61;diagnostically most relevant areas of interest; vAOI&#61;visually most salient but diagnostically irrelevant areas of interest; t1&#61;timepoint 1 after 10 course sessions; t2&#61;timepoint 2 after 20</Mark2> <Mark2>course sessions</Mark2>.<LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak><LineBreak></LineBreak></Pgraph></Caption>
          <Caption language="de"><Pgraph><Mark1>Abbildung 5: Webcam-Eye-Tracking Technologie &#8211; Augenbewegungen. </Mark1><LineBreak></LineBreak>(a) Gezeigt sind die aggregierten Fixationen aller Teilnehmenden in den ersten 5 Sekunden als Punktewolke. (b-d) Die Ergebnisse f&#252;r t1 sind blau- und die Ergebnisse f&#252;r t2 sind gr&#252;n dargestellt. Der P-Wert zeigt das Ergebnis der statistischen Testung. Jeder Kreis repr&#228;sentiert einen Teilnehmer. F&#252;r die Violinplots sind der Median und das 25.-75. Perzentil dargestellt. (b) Violinplots zeigen die Fixationsanzahl auf dAOIs und vAOIs f&#252;r beide Zeitpunkte. (c) Violinplots zeigen die Fixationsdauer auf dAOIs und vAOIs f&#252;r beide Zeitpunkte. (e) Violinplots zeigen die Zeit bis zur ersten Fixation auf dAOIs und vAOIs f&#252;r beide Zeitpunkte. <Mark2>Abk&#252;rzungen: &#42;&#61;Es wurden die Blickzeit-angepasste Werte verwendet; dAOI&#61;diagnostisch relevante Bildbereiche, vAOI&#61;visuell auff&#228;llige, aber diagnostisch irrelevante Bildbereiche; t1&#61;Zeitpunkt 1 nach 10 Kurseinheiten; t2&#61;Zeitpunkt 2 nach 20 Kurseinheiten</Mark2>.<LineBreak></LineBreak><LineBreak></LineBreak></Pgraph></Caption>
        </Figure>
        <NoOfPictures>5</NoOfPictures>
      </Figures>
      <InlineFigures>
        <NoOfPictures>0</NoOfPictures>
      </InlineFigures>
      <Attachments>
        <Attachment>
          <MediaNo>1</MediaNo>
          <MediaID filename="zma001642.a1en.pdf" language="en" mimeType="application/pdf" origFilename="Attachment&#95;1.pdf" size="1477855" url="">1en</MediaID>
          <MediaID filename="zma001642.a1de.pdf" language="de" mimeType="application/pdf" origFilename="Anhang&#95;1.pdf" size="238321" url="">1de</MediaID>
          <AttachmentTitle language="en">Supplemental material</AttachmentTitle>
          <AttachmentTitle language="de">Erg&#228;nzendes Material</AttachmentTitle>
        </Attachment>
        <NoOfAttachments>1</NoOfAttachments>
      </Attachments>
    </Media>
  </OrigData>
</GmsArticle>