<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<GmsArticle>
  <MetaData>
    <Identifier>mibe000115</Identifier>
    <IdentifierDoi>10.3205/mibe000115</IdentifierDoi>
    <IdentifierUrn>urn:nbn:de:0183-mibe0001150</IdentifierUrn>
    <ArticleType>Research Article</ArticleType>
    <TitleGroup>
      <Title language="en">Assessing qualitative usability in life-threatening, time-critical and unstable situations</Title>
    </TitleGroup>
    <CreatorList>
      <Creator>
        <PersonNames>
          <Lastname>Nestler</Lastname>
          <LastnameHeading>Nestler</LastnameHeading>
          <Firstname>Simon</Firstname>
          <Initials>S</Initials>
        </PersonNames>
        <Address>Technische Universit&#228;t M&#252;nchen, Fakult&#228;t f&#252;r Informatik, Fachgebiet Augmented Reality, Boltzmannstra&#223;e 3, 85748 Garching bei M&#252;nchen, Germany<Affiliation>Technische Universit&#228;t M&#252;nchen, Fakult&#228;t f&#252;r Informatik, Fachgebiet Augmented Reality, Garching bei M&#252;nchen, Germany</Affiliation></Address>
        <Email>simon&#64;nestlers.de</Email>
        <Creatorrole corresponding="yes" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Artinger</Lastname>
          <LastnameHeading>Artinger</LastnameHeading>
          <Firstname>Eva</Firstname>
          <Initials>E</Initials>
        </PersonNames>
        <Address>Technische Universit&#228;t M&#252;nchen, Fakult&#228;t f&#252;r Informatik, Fachgebiet Augmented Reality, Boltzmannstra&#223;e 3, 85748 Garching bei M&#252;nchen, Germany<Affiliation>Technische Universit&#228;t M&#252;nchen, Fakult&#228;t f&#252;r Informatik, Fachgebiet Augmented Reality, Garching bei M&#252;nchen, Germany</Affiliation></Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Coskun</Lastname>
          <LastnameHeading>Coskun</LastnameHeading>
          <Firstname>Tayfur</Firstname>
          <Initials>T</Initials>
        </PersonNames>
        <Address>Technische Universit&#228;t M&#252;nchen, Fakult&#228;t f&#252;r Informatik, Fachgebiet Augmented Reality, Boltzmannstra&#223;e 3, 85748 Garching bei M&#252;nchen, Germany<Affiliation>Technische Universit&#228;t M&#252;nchen, Fakult&#228;t f&#252;r Informatik, Fachgebiet Augmented Reality, Garching bei M&#252;nchen, Germany</Affiliation></Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Yildirim-Krannig</Lastname>
          <LastnameHeading>Yildirim-Krannig</LastnameHeading>
          <Firstname>Yeliz</Firstname>
          <Initials>Y</Initials>
        </PersonNames>
        <Address>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Ernst-Abbe-Platz 8, 07743 Jena, Germany<Affiliation>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Jena, Germany</Affiliation></Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Schumann</Lastname>
          <LastnameHeading>Schumann</LastnameHeading>
          <Firstname>Sandy</Firstname>
          <Initials>S</Initials>
        </PersonNames>
        <Address>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Ernst-Abbe-Platz 8, 07743 Jena, Germany<Affiliation>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Jena, Germany</Affiliation></Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Maehler</Lastname>
          <LastnameHeading>Maehler</LastnameHeading>
          <Firstname>Mareike</Firstname>
          <Initials>M</Initials>
        </PersonNames>
        <Address>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Ernst-Abbe-Platz 8, 07743 Jena, Germany<Affiliation>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Jena, Germany</Affiliation></Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Wucholt</Lastname>
          <LastnameHeading>Wucholt</LastnameHeading>
          <Firstname>Fabian</Firstname>
          <Initials>F</Initials>
        </PersonNames>
        <Address>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Ernst-Abbe-Platz 8, 07743 Jena, Germany<Affiliation>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Jena, Germany</Affiliation></Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Strohschneider</Lastname>
          <LastnameHeading>Strohschneider</LastnameHeading>
          <Firstname>Stefan</Firstname>
          <Initials>S</Initials>
        </PersonNames>
        <Address>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Ernst-Abbe-Platz 8, 07743 Jena, Germany<Affiliation>Friedrich-Schiller-Universit&#228;t Jena, Interkulturelle Wirtschaftskommunikation, Jena, Germany</Affiliation></Address>
        <Email>stefan.strohschneider&#64;uni-jena.de</Email>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Klinker</Lastname>
          <LastnameHeading>Klinker</LastnameHeading>
          <Firstname>Gudrun</Firstname>
          <Initials>G</Initials>
        </PersonNames>
        <Address>Technische Universit&#228;t M&#252;nchen, Fakult&#228;t f&#252;r Informatik, Fachgebiet Augmented Reality, Boltzmannstra&#223;e 3, 85748 Garching bei M&#252;nchen, Germany<Affiliation>Technische Universit&#228;t M&#252;nchen, Fakult&#228;t f&#252;r Informatik, Fachgebiet Augmented Reality, Garching bei M&#252;nchen, Germany</Affiliation></Address>
        <Email>klinker&#64;in.tum.de</Email>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
    </CreatorList>
    <PublisherList>
      <Publisher>
        <Corporation>
          <Corporatename>German Medical Science GMS Publishing House</Corporatename>
        </Corporation>
        <Address>D&#252;sseldorf</Address>
      </Publisher>
    </PublisherList>
    <SubjectGroup>
      <SubjectheadingDDB>610</SubjectheadingDDB>
    </SubjectGroup>
    <DatePublishedList>
      
    <DatePublished>20111017</DatePublished></DatePublishedList>
    <Language>engl</Language>
    <SourceGroup>
      <Journal>
        <ISSN>1860-9171</ISSN>
        <Volume>7</Volume>
        <Issue>1</Issue>
        <JournalTitle>GMS Medizinische Informatik, Biometrie und Epidemiologie</JournalTitle>
        <JournalTitleAbbr>GMS Med Inform Biom Epidemiol</JournalTitleAbbr>
        <IssueTitle>Sonderheft "Mobile Informationstechnologien in der Medizin"</IssueTitle>
      </Journal>
    </SourceGroup>
    <ArticleNo>01</ArticleNo>
  </MetaData>
  <OrigData>
    <Abstract language="en" linked="yes"><Pgraph>We developed a heuristic for assessing the usability of mobile user-i<TextGroup><PlainText>nte</PlainText></TextGroup>rfaces in life-threating, time-critical and unstable situations in a qualitative way. The major advantages of our approach as opposed to standardized quantitative questionnaires is the independence from a baseline, the possibility to make absolute statements and the potential for adaptations. When creating a qualitative semi structured interview we adhere to the common modus operandi of the qualitative social research. On the basis of 17 common quantitative questionnaires on usability we identified the five major categories <Mark1>Utility</Mark1>, <Mark1>Intuitiveness</Mark1>, <Mark1>Memorability</Mark1>, <Mark1>Learnability</Mark1> and <Mark1>Personal Effect</Mark1>. We selected all questions from the questionnaires which are useful for assessing the usa<TextGroup><PlainText>bility of user</PlainText></TextGroup>-interfaces in emergencies. Furthermore, we rephrased the closed-ended questions to open-ended ones. The quantification of research results is possible by weighting the qualitative results in dependence to the research question.</Pgraph></Abstract>
    <TextBlock linked="yes" name="1 Introduction">
      <MainHeadline>1 Introduction</MainHeadline><Pgraph>Within the scope of the <Mark2>SpeedUp</Mark2> project &#91;The project SpeedUp is funded by the German Federal Ministry <TextGroup><PlainText>of Ed</PlainText></TextGroup>ucation and Research (BMBF) within the programme &#8220;Research for Civil Security&#8221; (May 1st, 2009&#8211;April 30th, 2012, FKZ: 13N10175). Website: <Hyperlink href="http:&#47;&#47;www.speedup-projekt.de&#47;">http:&#47;&#47;www.speedup-projekt.de&#47;</Hyperlink>.&#93; we found out that the sound evaluation of mobile user-interfaces for medical emergencies is challenging for three reasons: (1) Mobile user-i<TextGroup><PlainText>nterf</PlainText></TextGroup>aces replace paper based workflows, (2) evaluations take place in lifelike trainings and (3) stress is dominant in medical emergencies. </Pgraph><Pgraph><Mark1>Paper based workflows:</Mark1> When mobile user-interfaces are compared to paper based approaches, this comparison is inhomogeneous. Although mobile user-interfaces increase the quality of information &#91;Quality of information is enhanced by increased structuring.&#93;, entering high quality information is more laborious. By comparing the usability of the mobile user-interface to the usability of paper, the mobile user-interface is in an inferior position. It is essential to provide a possibility to evaluate mobile user-interfaces without needing a baseline (<Mark2>see A</Mark2>). </Pgraph><Pgraph><Mark1>Real life scenarios:</Mark1> Due to the fact that evaluations of mobile user-interfaces for medical emergencies take place in lifelike trainings, the repeatability of the evaluation is limited. The trainer cannot completely control the set of parameters in these scenarios. Consequently, the sequential, quantitative comparison of different design alternatives is subject to restrictions. Evaluating user-i<TextGroup><PlainText>nterfac</PlainText></TextGroup>es without requiring a quantitative comparison of different alternatives is essential (<Mark2>see B</Mark2>). </Pgraph><Pgraph><Mark1>Dominance of stress:</Mark1> The impact of stress on usability is not considered by the different standardized questionnaires. Questionnaires either focus on usability or on physical and mental demands. However, because usability depends on the task load and the mental demands are high in medical emergencies, considering the impact of stress is essential for gaining meaningful results. Furthermore, weighting the different categories is essential for evaluating mobile user-interfaces (<Mark2>see C</Mark2>). </Pgraph><Pgraph>Consequently, for the proper evaluation of user-interfaces in life-threatening, time-critical and unstable situations a new type of usability evaluation is required. We propose the conduction of a qualitative semi structured interview for three reasons: </Pgraph><Pgraph><UnorderedList><ListItem level="1">qualitative interviews <Mark1>do not depend on a baseline</Mark1> (A) &#91;The qualitative properties of user-interfaces are absolute.&#93; </ListItem><ListItem level="1">qualitative interviews <Mark1>do not require different alterna</Mark1><TextGroup><Mark1>tive</Mark1></TextGroup><Mark1>s</Mark1> (B) </ListItem><ListItem level="1">qualitative interviews <Mark1>allow the weighting of categories</Mark1> (C) </ListItem></UnorderedList></Pgraph><Pgraph>Furthermore, qualitative information is essential for improving the capabilities of mobile user-interfaces. By providing detailed qualitative information, the engineers and computer scientists can identify the weaknesses of mobile user-interfaces more easily and can improve them more effectively and efficiently. Consequently, the applic<TextGroup><PlainText>atio</PlainText></TextGroup>n of qualitative assessments in requirements analys<TextGroup><PlainText>is</PlainText></TextGroup>, interaction design, prototypical implementation and evaluation simplifies the overall process of developing mobile user-interfaces.</Pgraph></TextBlock>
    <TextBlock linked="yes" name="2 Related work">
      <MainHeadline>2 Related work</MainHeadline><Pgraph>In literature different standardized questionnaires are used for the evaluation of user-interfaces. In the following we present the 17 most common quantitative questionnaires with focus on <Mark2>Usability</Mark2>, <Mark2>Attractiveness</Mark2>, <Mark2>Satisfaction</Mark2>, <Mark2>Experience</Mark2> and <Mark2>Work Load</Mark2>. Some of these questionnaires categorize the different questions. All categories which are transferred to our qualitative semi structured interview are written in bold.</Pgraph><Pgraph>The <Mark2>After-Scenario Questionnaire</Mark2> consists of three questions on the user&#8217;s <Mark1>Satisfaction</Mark1> <TextLink reference="1"></TextLink>. The <Mark2>AttrakDiff</Mark2> questionnaire consists of 21 pairs of antithetic adjectives. The <Mark2>AttrakDiff</Mark2> focuses on <Mark1>Attractiveness</Mark1>, <Mark2>Hedonic Quality</Mark2> and <Mark2>Pragmatic Quality</Mark2> <TextLink reference="2"></TextLink>. The <Mark2>Computer Literacy Scale</Mark2> consists of different questions on the user&#8217;s experience with computers. The <Mark2>CLS</Mark2> focuses on <Mark2>Experience</Mark2>, <Mark2>Symbols</Mark2> and <Mark2>Terminology</Mark2> <TextLink reference="3"></TextLink>. The <Mark2>Computer System Usability Questionnaire</Mark2> consists of 19 questions on the system&#8217;s usability. The <Mark2>CSUQ</Mark2> is unstructured and does not use categories <TextLink reference="4"></TextLink>. The <Mark2>IsoMetrics</Mark2> focuses on usability in general. The <Mark2>IsoMetrics</Mark2> focuses on <Mark2>Adequacy of Tasks</Mark2>, <Mark2>Ability of Self-Characterization</Mark2>, <Mark2>Controllability</Mark2>, <Mark1>Compliance with Expectations</Mark1>, <Mark1>Error Robustness</Mark1>, <Mark1>Customizability</Mark1> and <Mark1>Learnability</Mark1> <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>. The <Mark2>IsoMetrics</Mark2> questionnaire is based on the <Mark2>Isonorm 9241-10</Mark2>, so the structure is quite similar. The categories are identical to <Mark2>IsoMetrics</Mark2> &#8211; except <Mark2>Fault Tolerance</Mark2> (instead of <Mark1>Error Robustness</Mark1>) and <Mark2>Ease of Learning</Mark2> (instead of <Mark1>Learnability)</Mark1> <TextLink reference="7"></TextLink>. The <Mark2>Nielsen&#8217;s Attrib</Mark2><TextGroup><Mark2>ute</Mark2></TextGroup><Mark2>s of Usability</Mark2> consist of 5 different categories: <Mark1>Learnability</Mark1>, <Mark1>Efficiency</Mark1>, <Mark1>Memorability</Mark1>, <Mark1>Errors</Mark1> and <Mark1>Subject</Mark1><TextGroup><Mark1>iv</Mark1></TextGroup><Mark1>e Satisfaction</Mark1> <TextLink reference="8"></TextLink>. The <Mark2>Nielsen&#8217;s Heuristic Evaluation</Mark2> consists of 10 questions which result in a heuristic guideline. The <Mark2>NHE</Mark2> is unstructured and does not use categories <TextLink reference="9"></TextLink>, <TextLink reference="10"></TextLink>, <TextLink reference="11"></TextLink>. The <Mark2>Practical Heuristics for Usability Evaluation</Mark2> consist of a heuristic guideline with 13 questions. The <Mark2>PHUE</Mark2> focus on <Mark1>Learning</Mark1>, <Mark2>Adapting to the user</Mark2>, <Mark1>Feedback</Mark1> and <Mark1>Errors</Mark1> <TextLink reference="12"></TextLink>. The <Mark2>Perceived Usefulness and Ease of Use</Mark2> questionnaire consists of 12 questions on <Mark2>Usefulness</Mark2> and <Mark1>Ease of Use</Mark1> <TextLink reference="13"></TextLink>. The <Mark2>Purdue Usability Testing Questionnaire</Mark2> consists of 100 questions in different categories. The <Mark2>PUTQ</Mark2> focuses on <Mark2>Compatibility</Mark2>, <Mark1>Consist</Mark1><TextGroup><Mark1>enc</Mark1></TextGroup><Mark1>y</Mark1>, <Mark1>Flexibility</Mark1>, <Mark1>Learnability</Mark1>, <Mark2>Minimal Action</Mark2>, <Mark2>Minimal Memory Load</Mark2>, <Mark2>Perceptual Limitation</Mark2> and <Mark1>User Guidance</Mark1> <TextLink reference="14"></TextLink>. The <Mark2>Questionnaire for User Interface Satisfaction</Mark2> consists of 27 questions on <Mark1>Satisfaction</Mark1>. The <Mark2>QUIS</Mark2> focuses on <Mark2>Overall Reaction</Mark2>, <Mark2>Screen</Mark2>, <Mark2>Terminology</Mark2>, <Mark2>System Information</Mark2>, <Mark1>Learning</Mark1> and <Mark2>System Capabilities</Mark2> <TextLink reference="15"></TextLink>. The <Mark2>Software Usability Measurement Inventory</Mark2> consists of 50 questions on usability in general. The <Mark2>SUMI</Mark2> is unstructured and does not use categories <TextLink reference="16"></TextLink>. The <Mark2>System Usa</Mark2><TextGroup><Mark2>bilit</Mark2></TextGroup><Mark2>y Scale</Mark2> consists of 10 questions on usability. The <Mark2>SUS</Mark2> is unstructured and does not use categories <TextLink reference="17"></TextLink>. The <Mark2>NASA Task Load Index</Mark2> consists of 6 questions on work load. The <Mark2>NASA-TLX</Mark2> is unstructured and does not use categories <TextLink reference="18"></TextLink>. The <Mark2>User Experience Questionnaire</Mark2> consists of 26 pairs of antithetic adjectives. The <Mark2>UEQ</Mark2> focuses on <Mark1>Attractiveness</Mark1>, <Mark1>Perspicuity</Mark1>, <Mark1>Novelty</Mark1>, <Mark2>Stimulation</Mark2> and <Mark1>Dependability</Mark1> <TextLink reference="19"></TextLink>. The <Mark2>USE Questionnaire</Mark2> consists of 30 questions on general usability. The <Mark2>USEQ</Mark2> focuses on <Mark2>Usefulness</Mark2>, <Mark1>Ease of Use</Mark1>, <Mark1>Ease of Learning</Mark1> and <Mark1>Satisfaction</Mark1> <TextLink reference="20"></TextLink>. </Pgraph><Pgraph>When taking a closer look on the categories from these questionnaires it becomes clear, that these various questionnaires are not selective. Several aspects, however, such as <Mark1>Stress</Mark1>, <Mark1>Expericence</Mark1> or <Mark1>User Guidance</Mark1>, are only considered by one questionnaire.</Pgraph></TextBlock>
    <TextBlock linked="yes" name="3 Method">
      <MainHeadline>3 Method</MainHeadline><Pgraph>For the qualitative evaluation of user-interfaces we make use of a method which comes from social science: qualitative interviews. We combined this method with a structured literature review. According to <TextLink reference="21"></TextLink> these qualitative interviews take the perspective of the subjects into consideration. Besides the parameters the underlying causes of decisions are subject of the evaluation. The used methods are legitimated by their contribution to the solution of a research question. The semi-structured interviews base on a set of problems, each of these problems consists of a set of questions. These questions are orally answered by the subject and the interview is documented on a voice recorder as described by <TextLink reference="22"></TextLink>, <TextLink reference="23"></TextLink>. The interviewer uses open-ended questions and avoids interrupting the subject. The aim of the interview is to discuss all problems with the subject. &#91;Usually it is not necessary to ask the complete set of questions to cover all problems.&#93; </Pgraph><Pgraph>Although this method is quite well known and successful in social sciences, it has not found its way into usability research. Therefore, we transferred this method from social sciences to usability research. Our qualitative interview on usability was developed on the basis of an extens<TextGroup><PlainText>iv</PlainText></TextGroup>e brainstorming process in combination with creating a detailed associagram. During the brainstorming we made use of the quantitative questionnaires on usability. Furthermore, the categories of the quantitative questionnaires were utilized in our associagram. Due to the fact that we started from the scratch we could design a method which is independent from a baseline (<Mark2>see A</Mark2>) and does not rely on different alternatives (<Mark2>see B</Mark2>). The adaption of the questionnaire to various fields of applic<TextGroup><PlainText>atio</PlainText></TextGroup>n can be guaranteed by a flexible weighting of different categories (<Mark2>see C</Mark2>). Consequently, this method can flexibly be adapted to specific requirements. </Pgraph><Pgraph>Qualitative evaluation is not limited to an effictivity control, assessing the value of components or the overall object is of equal importance. The evaluation has to conform with critical-rational demands as well as with ethic-moral standards. In general an evaluation can have four different aims <TextLink reference="24"></TextLink>: (R.1) facilitating insights, (R.2) reaching decisions, (R.3) legitimating decisions and (R.4) optimizing objects. In usability engineering the aspects (R.1) and (R.4) are of special importance during the p<TextGroup><PlainText>rotot</PlainText></TextGroup>yping phase. </Pgraph><Pgraph><TextLink reference="21"></TextLink> describes the general principles for preparing qualitative interviews: (1) The research question has to be concretized, (2) the questions have to be selected by a team of experts and (3) questions have to be formulated open-ended. Furthermore, the evaluation design has to consider the following questions: (D.1) How many subjects should be interviewed, (D.2) how are these subjects chosen, (D.3) when should the evaluation take place, (D.4) how are the interviews recorded and (D.5) how will the transliteration be done. The most popular literature on the number of subjects was published by <TextLink reference="25"></TextLink>, <TextLink reference="26"></TextLink>, <TextLink reference="27"></TextLink> and <TextLink reference="28"></TextLink>. In these publications a mathematical model is developed which enables the user-interface designer to calculate the optimal sample size: <Mark2>U</Mark2>&#61;1&#8211;(1&#8211;<Mark2>p</Mark2>)<Mark2><Superscript>n</Superscript></Mark2>. The probability to detect a problem (U) depends on the probability (p) that a subject identifies a problem and the number (n) of subjects. Due to the fact that the probability (p) is not known for qualitative usability evaluations, we have to rely on their general heuristics: (1) most usability problems are detected with three to five subjects, (2) it is unlikely that additional subjects reveal new information and (3) most severe usability problems are detected by the first few subjects. Consequently, we use three to five subjects for the qualitative usability evaluations (D.1). We choose this set of subjects randomly from the group of our end users (D.2). The evaluations take place in each iteration (D.3). &#91;An iteration consists of requirements analysis, interaction design, prototypic implementation and evaluation.&#93; The interviews are documented by voice recorders (D.4) and are manually transliterated (D.5).</Pgraph><Pgraph>After the transliteration of the interviews, the different statements are categorized according to the following rules: (1) categories are terms, (2) categories are deduced from the aims and research questions, (3) categories should neither be slender nor extensive and (4) categories have to be selective <TextLink reference="29"></TextLink>. According to <TextLink reference="24"></TextLink> the following requirements have to be taken into consideration in order to get methodically dependable and valid evaluation results: (1) The individual cases are part of the research process, (2) research process is open for revision and extension, (3) the general procedure is lead by a set rules, (4) research processes are seen as an interaction, (5) the objects are analyzed holistically and (6) generalization is demonstrated by arguments. Consequently, the pres<TextGroup><PlainText>ente</PlainText></TextGroup>d set of problems and questions is always subject of further research. </Pgraph></TextBlock>
    <TextBlock linked="yes" name="4 Results">
      <MainHeadline>4 Results</MainHeadline><Pgraph>The combination of existing quantitative questionnaires on usability and qualitative research methods leads to semi structured interviews on usability. In the following the resulting categories and questions of the qualitative interview are described. The process of generating categories and open-ended questions was performed according to the process from <TextLink reference="21"></TextLink> as presented above. </Pgraph><SubHeadline>4.1 Categories </SubHeadline><Pgraph>On the basis of these questionnaires three usability experts conjointly identified five main categories for the qualitative interview: <Mark1>(U) Utility</Mark1>, <Mark1>(J) Intuitiveness</Mark1>, <Mark1>(M) Memorability</Mark1> <TextLink reference="8"></TextLink>, <Mark1>(L) Learnability</Mark1> <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="7"></TextLink>, <TextLink reference="8"></TextLink>, <TextLink reference="12"></TextLink>, <TextLink reference="14"></TextLink>, <TextLink reference="15"></TextLink>, <TextLink reference="20"></TextLink> and <Mark1>(P) Personal Effect</Mark1>. These main five categories are subdivided in four to five sub-categories each: The <Mark1>(U) Utility</Mark1> is classified in: (U-I) Dependability <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="7"></TextLink>, <TextLink reference="19"></TextLink> (U-II) Ease of Use <TextLink reference="13"></TextLink>, <TextLink reference="20"></TextLink>, (U-III) Effect<TextGroup><PlainText>iven</PlainText></TextGroup>ess, (U-IV) Efficiency <TextLink reference="8"></TextLink> and (U-V) Productivity. The <Mark1>(J) Intuitiveness</Mark1> is classified in: (J-I) Affordance <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="7"></TextLink>, (J-II) Expectations <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="7"></TextLink>, (J-III) Conventions and (J-IV) Transparency. The <Mark1>(M) Memorability</Mark1> is classified in: (MI) Consistency <TextLink reference="14"></TextLink>, (M-II) Customizability <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="7"></TextLink>, <TextLink reference="14"></TextLink>, (M-III) Complexity and (M-IV) Perspicuity <TextLink reference="19"></TextLink>. The <Mark1>(L) Learnability</Mark1> is classified in: (LI) Error handling <TextLink reference="5"></TextLink>, <TextLink reference="6"></TextLink>, <TextLink reference="7"></TextLink>, <TextLink reference="8"></TextLink>, <TextLink reference="12"></TextLink>, (L-II) Feedback <TextLink reference="12"></TextLink>, (LIII) Help and (L-IV) User Guidance <TextLink reference="14"></TextLink>. The <Mark1>(P) Personal Effect</Mark1> is classified in: (P-I) Attractiveness <TextLink reference="2"></TextLink>, <TextLink reference="19"></TextLink>, (P-II) Novelty <TextLink reference="19"></TextLink>, (P-III) Satisfaction <TextLink reference="1"></TextLink>, <TextLink reference="8"></TextLink>, <TextLink reference="15"></TextLink>, <TextLink reference="20"></TextLink> and (P-IV) Stress. </Pgraph><Pgraph>These categories fulfill the major requirements from <TextLink reference="21"></TextLink>: The categories are terms, are deduced from the research question and are selective. These terms are directly taken from the different questionnaires. Due to the fact that these usability questionnaires deal with our research question, we could prove that the categories are deduced from the research question. Furthermore, the concordant identification of the five main categories by three usability experts indicates the selectiveness of these categories. The question whether these categories are neither slender nor extensive has to be proven within the scope of an evaluation. Table 1 <ImgLink imgNo="1" imgType="table"/> gives an overview on all categories and sub-categories. </Pgraph><SubHeadline>4.2 Questions </SubHeadline><Pgraph>In the next step three usability experts conjointly assigned all questions from the qualitative usability questionnaires to the different categories. When the assignment was ambiguous, the question was removed from the qualitative semi structured interview. &#91;An unambiguous assignment requires at least the same categorization by two of the three experts.&#93;</Pgraph><Pgraph>The resulting semi structured interview is shown in the appendix (Attachment 1 <AttachmentLink attachmentNo="1"/>). We followed the principles from <TextLink reference="21"></TextLink>: The concrete research question is <Mark1>Usability</Mark1>, the questions were selected by a team of three experts with regard to the research question and finally the closed-ended questions were rephrased to open-ended questions. This rephrasing is necessary to be able to use these questions as a guideline for the semi-structured interview. In each of the categories several different questions are available and the interviewer is free to choose a subset of questions which fits best for the concrete user-interface. As an alternative the team of interviewers can pre-select a subset of questions in the run-up to the evaluation. </Pgraph></TextBlock>
    <TextBlock linked="yes" name="5 Discussion">
      <MainHeadline>5 Discussion</MainHeadline><Pgraph>The qualitative evaluation provides a detailed assessment of the quality of a mobile user-interface. According to <TextLink reference="30"></TextLink> a quantification of research results is important. A quantitative score, however, is not directly deduced by the proposed method. <TextLink reference="24"></TextLink> proposes a quantitative analy<TextGroup><PlainText>si</PlainText></TextGroup>s of the qualitative evaluation to receive the required quantitative data. In this analysis the transliterated statements from the interview are sorted by experts into the categories and sub-categories. This quantitative summarization of the qualitative evaluation is the basis for the quantification of the research results according to <TextLink reference="30"></TextLink>. The summarized data is adjusted on a 3-point scale: (a) positive comment (<Mark2>1.0</Mark2>), (b) neutral comment (<Mark2>0.5</Mark2>) and (c) negative comment (<Mark2>0.0</Mark2>). The mean value over all statements in the same sub-categories is calculated. As a result we receive a quantitative rating of all sub-categories on a scale from 0.0 to 1.0. </Pgraph><Pgraph>After calculating these scales, an usability scale which is application specific can be calculated by weighting these categories. The categories for the quantitative summariz<TextGroup><PlainText>atio</PlainText></TextGroup>n are shown in Table 1 <ImgLink imgNo="1" imgType="table"/>. The balanced weighting of the categories and sub-categories from Table 1 <ImgLink imgNo="1" imgType="table"/> is shown in Table 2 <ImgLink imgNo="2" imgType="table"/>. When performing general evaluations of user-interfaces this weighting leads to a quantitative one-dimensional usability value &#8211; besides the qualitative results. </Pgraph><Pgraph>We argued that weighting the different sub-categories is essential due to the dominance of stress in emergencies. Therefore, we included a weighting of the qualitative results in dependence to the research question. For the evaluation of mobile user-interfaces for emergencies the experts changed the weighting with regard to the research question as shown in Table 3 <ImgLink imgNo="3" imgType="table"/>. The <Mark1>Utility</Mark1> and the <Mark1>Personal Effect</Mark1> is of special importance in emergencies <TextLink reference="31"></TextLink>. Therefore, the weights for these categories where increased. <Mark1>Stress</Mark1> is dominant in emergencies, whereas <Mark1>Attractiveness</Mark1> and <Mark1>Novelty</Mark1> is of lower importance. Therefore, the weight for <Mark1>Stress</Mark1> was increased as well. From previous requirements analyses we know that <Mark1>Customization</Mark1> is difficult in emergencies <TextLink reference="31"></TextLink>, <TextLink reference="32"></TextLink>. Consequently, customization is not considered in the emergency specific usability value. </Pgraph><Pgraph>Finally, the usability score is calculated by multiplying all weights <Mark2>w(s)</Mark2> with the quantitative scores <Mark2>v(s)</Mark2> of the sub-categories <Mark2>s:</Mark2> see Figure 1 <ImgLink imgNo="1" imgType="figure"/>.</Pgraph><Pgraph></Pgraph><Pgraph>Besides the general usability score <Mark2>U</Mark2>, more specific scores <Mark2>U</Mark2><Mark2><Subscript>c</Subscript></Mark2> for each category <Mark2>c</Mark2> can be calculated &#8211; as described above. Each category <Mark2>c</Mark2> consists of a set of subcategories <Mark2>S(c)</Mark2>. For the calculation of these specific scores the weights <Mark2>w</Mark2> have to be normalized. The score <Mark2>U</Mark2> as well as the scores <Mark2>U</Mark2><Mark2><Subscript>c</Subscript></Mark2> go from 0 to 100, due to the fact that the sum of all weights is 100.</Pgraph></TextBlock>
    <TextBlock linked="yes" name="6 Conclusion and future work">
      <MainHeadline>6 Conclusion and future work</MainHeadline><Pgraph>The qualitative usability evaluation leads to a benefit for developers, because detailed qualitative information is provided. This qualitative information helps to identify the weaknesses of mobile user-interfaces more easily. Nevertheless a quantitative usability score can be provided as well and the qualitative usability evaluation has no disadvantages &#8211; but many benefits &#8211; as opposed to the quantitative usability evaluation. Furthermore, the quantitative score can flexibly be adapted to the concrete research question &#91;in our case: mobile user-interfaces for emergencies&#93;. </Pgraph><Pgraph>We will use our qualitative interviews to get deeper insights in the human-computer interaction in emergencies (R.1, see above). Furthermore, the existing research method is iteratively improved by all these qualitative evaluations due to the fact that a re-categorization of the answers is performed subsequent to every interview. The questionnaire can be simplified by reducing the number of questions and can be customized by weighting the different categories. Due to the fact that a small set of subjects (three to five) is used in qualitative evaluations, our evaluation efforts are reduced significantly. </Pgraph><Pgraph>In the future we expect an intensive use of qualitative usability evaluations in the ubiquitous computing domain because of the following reasons: </Pgraph><Pgraph><UnorderedList><ListItem level="1">ubiquitous applications <Mark1>are new and innovative</Mark1> </ListItem><ListItem level="1">ubiquitous applications <Mark1>are frequently evaluated in real-life scenarios</Mark1> </ListItem><ListItem level="1">ubiquitous applications <Mark1>are developed domain-specific</Mark1> </ListItem></UnorderedList></Pgraph><Pgraph>When building new and innovative ubiquitous applications, the comparison with existing applications is often difficult. On the one hand innovative applications exceed the capabilities and functionalities of existing ones and on the other hand subjects are more familiar with existing applications. Consequently, these qualitative, comparative evaluations are often inhomogeneous in the ubiquitous computing domain. In our impression qualitative assessment of the attributes is more promising with regard to the effective improvement of the ubiquitous application. When evaluating in real-life or lifelike scenarios, the comparability of successive runs is limited. When the ubiquitous application focuses on a specific domain, the usage of standardized questionnaires is complicated. Consequently, the flexible customizability of the qualitat<TextGroup><PlainText>iv</PlainText></TextGroup>e evaluation is a strong argument for its future, more intense application. </Pgraph></TextBlock>
    <TextBlock linked="yes" name="Notes">
      <MainHeadline>Notes</MainHeadline><SubHeadline>Competing interests</SubHeadline><Pgraph>The authors declare that they have no competing interests.</Pgraph></TextBlock>
    <References linked="yes">
      <Reference refNo="17">
        <RefAuthor>Brooke J</RefAuthor>
        <RefTitle>SUS: A &#34;quick and dirty&#34; usability scale</RefTitle>
        <RefYear>1996</RefYear>
        <RefBookTitle>Usability evaluation in industry</RefBookTitle>
        <RefPage>189-194</RefPage>
        <RefTotal>Brooke J. SUS: A &#34;quick and dirty&#34; usability scale. In: Jordan PW, Thomas B, Weerdmeester BA, McClelland AL, eds. Usability evaluation in industry. London: Taylor and Francis; 1996. p. 189-194.</RefTotal>
      </Reference>
      <Reference refNo="15">
        <RefAuthor>Chin JP</RefAuthor>
        <RefAuthor>Diehl VA</RefAuthor>
        <RefAuthor>Norman KL</RefAuthor>
        <RefTitle>Development of an instrument measuring user satisfaction of the human-computer interface</RefTitle>
        <RefYear>1988</RefYear>
        <RefBookTitle>Proceedings of the ACM CHI 88 Human Factors in Computing Systems Conference</RefBookTitle>
        <RefPage>213-218</RefPage>
        <RefTotal>Chin JP, Diehl VA, Norman KL. Development of an instrument measuring user satisfaction of the human-computer interface. In: Soloway E, Frye D, Sheppard SB, eds. Proceedings of the ACM CHI 88 Human Factors in Computing Systems Conference; 1988 June 15-19; Washington. New York: ACM; 1988. p. 213-218.</RefTotal>
      </Reference>
      <Reference refNo="13">
        <RefAuthor>Davis FD</RefAuthor>
        <RefTitle>Perceived usefulness, perceived ease of use, and user acceptance of information technology</RefTitle>
        <RefYear>1989</RefYear>
        <RefJournal>MIS quarterly</RefJournal>
        <RefPage>319-40</RefPage>
        <RefTotal>Davis FD. Perceived usefulness, perceived ease of use, and user acceptance of information technology. MIS quarterly. 1989;13(3):319-40. DOI: 10.2307&#47;249008</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.2307&#47;249008</RefLink>
      </Reference>
      <Reference refNo="5">
        <RefAuthor>Gediga G</RefAuthor>
        <RefAuthor>Hamborg LC</RefAuthor>
        <RefTitle>IsoMetrics: An usability inventory supporting summative and formative evaluation of software systems</RefTitle>
        <RefYear>1999</RefYear>
        <RefBookTitle>Human-Computer Interaction: Communication, Cooperation, and Application Design, Proceedings of HCI International &#39;99</RefBookTitle>
        <RefPage>1018-1022</RefPage>
        <RefTotal>Gediga G, Hamborg LC. IsoMetrics: An usability inventory supporting summative and formative evaluation of software systems. In: Bullinger HJ, Ziegler J, eds. Human-Computer Interaction: Communication, Cooperation, and Application Design, Proceedings of HCI International &#39;99; 1999 August 22-26; Munich, Germany. Mahwah, NJ: Lawrence Erlbaum; 1999. p. 1018-1022.</RefTotal>
      </Reference>
      <Reference refNo="6">
        <RefAuthor>Gediga G</RefAuthor>
        <RefAuthor>Hamborg KC</RefAuthor>
        <RefAuthor>D&#252;ntsch I</RefAuthor>
        <RefTitle>The IsoMetrics usability inventory</RefTitle>
        <RefYear>1999</RefYear>
        <RefJournal>Behaviour &#38; Information Technology</RefJournal>
        <RefPage>151-64</RefPage>
        <RefTotal>Gediga G, Hamborg KC, D&#252;ntsch I. The IsoMetrics usability inventory. Behaviour &#38; Information Technology. 1999;18(3):151-64. DOI: 10.1080&#47;014492999119057</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1080&#47;014492999119057</RefLink>
      </Reference>
      <Reference refNo="2">
        <RefAuthor>Hassenzahl M</RefAuthor>
        <RefAuthor>Burmester M</RefAuthor>
        <RefAuthor>Koller F</RefAuthor>
        <RefTitle>AttrakDiff: Ein Fragebogen zur Messung wahrgenommener hedonischer und pragmatischer Qualit&#228;t</RefTitle>
        <RefYear>2003</RefYear>
        <RefBookTitle>Mensch &#38; Computer 2003: Interaktion in Bewegung</RefBookTitle>
        <RefPage>187-196</RefPage>
        <RefTotal>Hassenzahl M, Burmester M, Koller F. AttrakDiff: Ein Fragebogen zur Messung wahrgenommener hedonischer und pragmatischer Qualit&#228;t. In: Ziegler J, Szwillus G, eds. Mensch &#38; Computer 2003: Interaktion in Bewegung. Stuttgart: B.G. Teubner; 2003. p. 187-196.</RefTotal>
      </Reference>
      <Reference refNo="22">
        <RefAuthor>Holtzblatt K</RefAuthor>
        <RefAuthor>Jones S</RefAuthor>
        <RefTitle>Contextual inquiry: A participatory technique for system design</RefTitle>
        <RefYear>1993</RefYear>
        <RefBookTitle>Participatory design: Principles and practices</RefBookTitle>
        <RefPage>177-210</RefPage>
        <RefTotal>Holtzblatt K, Jones S. Contextual inquiry: A participatory technique for system design. In: Schuler D, Namioka A, eds. Participatory design: Principles and practices. Mahwah, NJ: Lawrence Erlbaum; 1993. p. 177-210.</RefTotal>
      </Reference>
      <Reference refNo="18">
        <RefAuthor>Hart SG</RefAuthor>
        <RefAuthor>Staveland LE</RefAuthor>
        <RefTitle>Development of NASA-TLX (Task Load Index): Results of empirical and theoretical research</RefTitle>
        <RefYear>1988</RefYear>
        <RefJournal>Adv Psychol</RefJournal>
        <RefPage>139-83</RefPage>
        <RefTotal>Hart SG, Staveland LE. Development of NASA-TLX (Task Load Index): Results of empirical and theoretical research. Adv Psychol. 1988;52:139-83. DOI: 10.1016&#47;S0166-4115(08)62386-9</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1016&#47;S0166-4115(08)62386-9</RefLink>
      </Reference>
      <Reference refNo="21">
        <RefAuthor>Kuckartz U</RefAuthor>
        <RefAuthor>Dresing T</RefAuthor>
        <RefAuthor>Stefer C</RefAuthor>
        <RefAuthor>R&#228;diker S</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2007</RefYear>
        <RefBookTitle>Qualitative Evaluation: der Einstieg in die Praxis</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Kuckartz U, Dresing T, Stefer C, R&#228;diker S. Qualitative Evaluation: der Einstieg in die Praxis. Wiesbaden: VS Verlag f&#252;r Sozialwissenschaft; 2007.</RefTotal>
      </Reference>
      <Reference refNo="24">
        <RefAuthor>Kinast EU</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>1998</RefYear>
        <RefBookTitle>Evaluation interkultureller Trainings</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Kinast EU. Evaluation interkultureller Trainings. Lengerich: Pabst; 1998.</RefTotal>
      </Reference>
      <Reference refNo="16">
        <RefAuthor>Kirakowski J</RefAuthor>
        <RefTitle>The software usability measurement inventory: background and usage</RefTitle>
        <RefYear>1996</RefYear>
        <RefBookTitle>Usability evaluation in industry</RefBookTitle>
        <RefPage>169-178</RefPage>
        <RefTotal>Kirakowski J. The software usability measurement inventory: background and usage. In: Jordan P, Thomas B, Weerdmeester B, eds. Usability evaluation in industry. London: Taylor and Francis; 1996. p. 169-178.</RefTotal>
      </Reference>
      <Reference refNo="14">
        <RefAuthor>Lin HX</RefAuthor>
        <RefAuthor>Choong YY</RefAuthor>
        <RefAuthor>Salvendy G</RefAuthor>
        <RefTitle>A proposed index of usability: a method for comparing the relative usability of different software systems</RefTitle>
        <RefYear>1997</RefYear>
        <RefJournal>Behav Inf Technol</RefJournal>
        <RefPage>267-77</RefPage>
        <RefTotal>Lin HX, Choong YY, Salvendy G. A proposed index of usability: a method for comparing the relative usability of different software systems. Behav Inf Technol. 1997;16(4):267-77. DOI: 10.1080&#47;014492997119833</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1080&#47;014492997119833</RefLink>
      </Reference>
      <Reference refNo="1">
        <RefAuthor>Lewis JR</RefAuthor>
        <RefTitle>Psychometric evaluation of an after-scenario questionnaire for computer usability studies: the ASQ</RefTitle>
        <RefYear>1991</RefYear>
        <RefJournal>ACM SIGCHI Bulletin</RefJournal>
        <RefPage>78-81</RefPage>
        <RefTotal>Lewis JR. Psychometric evaluation of an after-scenario questionnaire for computer usability studies: the ASQ. ACM SIGCHI Bulletin. 1991;23(1):78-81. DOI: 10.1145&#47;122672.122692</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1145&#47;122672.122692</RefLink>
      </Reference>
      <Reference refNo="27">
        <RefAuthor>Lewis JR</RefAuthor>
        <RefTitle>Sample sizes for usability studies: Additional considerations</RefTitle>
        <RefYear>1994</RefYear>
        <RefJournal>Hum Factors</RefJournal>
        <RefPage>368-78</RefPage>
        <RefTotal>Lewis JR. Sample sizes for usability studies: Additional considerations. Hum Factors. 1994;36(2):368-78.</RefTotal>
      </Reference>
      <Reference refNo="4">
        <RefAuthor>Lewis JR</RefAuthor>
        <RefTitle>IBM computer usability satisfaction questionnaires: psychometric evaluation and instructions for use</RefTitle>
        <RefYear>1995</RefYear>
        <RefJournal>Int J Hum Comput Interact</RefJournal>
        <RefPage>57-78</RefPage>
        <RefTotal>Lewis JR. IBM computer usability satisfaction questionnaires: psychometric evaluation and instructions for use. Int J Hum Comput Interact. 1995;7(1):57-78. DOI: 10.1080&#47;10447319509526110</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1080&#47;10447319509526110</RefLink>
      </Reference>
      <Reference refNo="28">
        <RefAuthor>Lewis JR</RefAuthor>
        <RefTitle>Usability testing</RefTitle>
        <RefYear>2006</RefYear>
        <RefBookTitle>Handbook of human factors and ergonomics</RefBookTitle>
        <RefPage>1275-1316</RefPage>
        <RefTotal>Lewis JR. Usability testing. In: Salvendy G, ed. Handbook of human factors and ergonomics. New York: Wiley; 2006. p. 1275-1316. DOI: 10.1002&#47;0470048204.ch49</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1002&#47;0470048204.ch49</RefLink>
      </Reference>
      <Reference refNo="19">
        <RefAuthor>Laugwitz B</RefAuthor>
        <RefAuthor>Held T</RefAuthor>
        <RefAuthor>Schrepp M</RefAuthor>
        <RefTitle>Construction and Evaluation of a User Experience Questionnaire</RefTitle>
        <RefYear>2008</RefYear>
        <RefJournal>Lect Notes Comput Sci</RefJournal>
        <RefPage>63-76</RefPage>
        <RefTotal>Laugwitz B, Held T, Schrepp M. Construction and Evaluation of a User Experience Questionnaire. Lect Notes Comput Sci. 2008;5298:63-76. DOI: 10.1007&#47;978-3-540-89350-9&#95;6</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1007&#47;978-3-540-89350-9&#95;6</RefLink>
      </Reference>
      <Reference refNo="20">
        <RefAuthor>Lund A</RefAuthor>
        <RefTitle>Measuring Usability with the USE Questionnaire</RefTitle>
        <RefYear>2001</RefYear>
        <RefJournal>Usability &#38; User Experience</RefJournal>
        <RefPage></RefPage>
        <RefTotal>Lund A. Measuring Usability with the USE Questionnaire. Usability &#38; User Experience. 2001;8(2).</RefTotal>
      </Reference>
      <Reference refNo="30">
        <RefAuthor>Mayring P</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>1993</RefYear>
        <RefBookTitle>Einf&#252;hrung in die qualitative Sozialforschung: Eine Anleitung zu qualitativem Denken</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Mayring P. Einf&#252;hrung in die qualitative Sozialforschung: Eine Anleitung zu qualitativem Denken. 2nd ed. Weinheim: Beltz, Psychologie-Verl.-Union; 1993.</RefTotal>
      </Reference>
      <Reference refNo="29">
        <RefAuthor>Mayring P</RefAuthor>
        <RefAuthor>Gl&#228;ser-Zikuda M</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2008</RefYear>
        <RefBookTitle>Die Praxis der Qualitativen Inhaltsanalyse</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Mayring P, Gl&#228;ser-Zikuda M, eds. Die Praxis der Qualitativen Inhaltsanalyse. 2nd ed. Weinheim: Beltz, Psychologie-Verl.-Union; 2008.</RefTotal>
      </Reference>
      <Reference refNo="32">
        <RefAuthor>Nestler S</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2008</RefYear>
        <RefBookTitle>Einsatzorganisation in Katastrophen: Unterst&#252;tzung von Einsatzkr&#228;ften durch mobile User-Interfaces</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Nestler S. Einsatzorganisation in Katastrophen: Unterst&#252;tzung von Einsatzkr&#228;ften durch mobile User-Interfaces. Saarbr&#252;cken: VDM Verlag Dr. M&#252;ller; 2008.</RefTotal>
      </Reference>
      <Reference refNo="31">
        <RefAuthor>Nestler S</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2010</RefYear>
        <RefBookTitle>Design, Implementation and Evaluation of User-Interfaces for lifethreatening, time-critical and unstable Situations</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Nestler S. Design, Implementation and Evaluation of User-Interfaces for lifethreatening, time-critical and unstable Situations &#91;dissertation&#93;. M&#252;nchen: Technische Universit&#228;t M&#252;nchen; 2010.</RefTotal>
      </Reference>
      <Reference refNo="8">
        <RefAuthor>Nielsen J</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>1993</RefYear>
        <RefBookTitle>Usability engineering</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Nielsen J. Usability engineering. Boston: AP Professional; 1993.</RefTotal>
      </Reference>
      <Reference refNo="10">
        <RefAuthor>Nielsen J</RefAuthor>
        <RefTitle>Enhancing the explanatory power of usability heuristics</RefTitle>
        <RefYear>1994</RefYear>
        <RefBookTitle>Proceedings of the SIGCHI conference on Human factors in computing systems: Celebrating Interdependence</RefBookTitle>
        <RefPage>158</RefPage>
        <RefTotal>Nielsen J. Enhancing the explanatory power of usability heuristics. In: Adelson B, Dumais S, Olson JS, eds. Proceedings of the SIGCHI conference on Human factors in computing systems: Celebrating Interdependence; 1994 April 24-28; Boston. New York: ACM; 1994. p. 158. DOI: 10.1145&#47;191666.191729</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1145&#47;191666.191729</RefLink>
      </Reference>
      <Reference refNo="11">
        <RefAuthor>Nielsen J</RefAuthor>
        <RefTitle>Usability inspection methods</RefTitle>
        <RefYear>1994</RefYear>
        <RefBookTitle>Conference on Human factors in computing systems</RefBookTitle>
        <RefPage>413-414</RefPage>
        <RefTotal>Nielsen J. Usability inspection methods. In: Plaisant C, ed. Conference on Human factors in computing systems; 1994 April 24-28; Boston. Conference Companion. New York: ACM; 1994. p. 413-414.</RefTotal>
      </Reference>
      <Reference refNo="25">
        <RefAuthor>Nielsen J</RefAuthor>
        <RefAuthor>Landauer TK</RefAuthor>
        <RefTitle>A mathematical model of the finding of usability problems</RefTitle>
        <RefYear>1993</RefYear>
        <RefBookTitle>Human-Computer Interaction, INTERACT &#39;93, IFIP TC13 International Conference on Human-Computer Interaction, jointly organised with ACM Conference on Human Aspects in Computing Systems CHI&#39;93</RefBookTitle>
        <RefPage>206-213</RefPage>
        <RefTotal>Nielsen J, Landauer TK. A mathematical model of the finding of usability problems. In: Ashlund S, Mullet K, Henderson A, Hollnagel E, White TN, eds. Human-Computer Interaction, INTERACT &#39;93, IFIP TC13 International Conference on Human-Computer Interaction, jointly organised with ACM Conference on Human Aspects in Computing Systems CHI&#39;93; 1993 April 24-29; Amsterdam. New York: ACM; 1993. p. 206-213.</RefTotal>
      </Reference>
      <Reference refNo="9">
        <RefAuthor>Nielsen J</RefAuthor>
        <RefAuthor>Molich R</RefAuthor>
        <RefTitle>Heuristic evaluation of user interfaces</RefTitle>
        <RefYear>1990</RefYear>
        <RefBookTitle>Proceedings of the SIGCHI conference on Human factors in computing systems: Empowering people</RefBookTitle>
        <RefPage>249-256</RefPage>
        <RefTotal>Nielsen J, Molich R. Heuristic evaluation of user interfaces. In: Carrasco Chew J, Whiteside J, eds. Proceedings of the SIGCHI conference on Human factors in computing systems: Empowering people; 1990 April 1-5; Seattle. New York: ACM; 1990. p. 249-256.</RefTotal>
      </Reference>
      <Reference refNo="12">
        <RefAuthor>Perlman G</RefAuthor>
        <RefTitle>Practical usability evaluation</RefTitle>
        <RefYear>1994</RefYear>
        <RefBookTitle>Conference on Human factors in computing systems</RefBookTitle>
        <RefPage>407-408</RefPage>
        <RefTotal>Perlman G. Practical usability evaluation. In: Plaisant C, ed. Conference on Human factors in computing systems; 1994 April 24-28; Boston. Conference Companion. New York: ACM; 1994. p. 407-408.</RefTotal>
      </Reference>
      <Reference refNo="7">
        <RefAuthor>Pr&#252;mper J</RefAuthor>
        <RefTitle>Der Benutzungsfragebogen ISONORM 9241-10: Ergebnisse zur Reliabilit&#228;t und Validit&#228;t</RefTitle>
        <RefYear>1997</RefYear>
        <RefBookTitle>Software-Ergonomie &#39;97 &#8211; Usability Engineering: Integration von Mensch-Computer-Interaktion und Software-Entwicklung</RefBookTitle>
        <RefPage>253-262</RefPage>
        <RefTotal>Pr&#252;mper J. Der Benutzungsfragebogen ISONORM 9241-10: Ergebnisse zur Reliabilit&#228;t und Validit&#228;t. In: Software-Ergonomie &#39;97 &#8211; Usability Engineering: Integration von Mensch-Computer-Interaktion und Software-Entwicklung. Stuttgart: Teubner; 1997. p. 253-262.</RefTotal>
      </Reference>
      <Reference refNo="23">
        <RefAuthor>Sarodnick F</RefAuthor>
        <RefAuthor>Brau H</RefAuthor>
        <RefTitle></RefTitle>
        <RefYear>2006</RefYear>
        <RefBookTitle>Methoden der Usability Evaluation: Wissenschaftliche Grundlagen und praktische Anwendung</RefBookTitle>
        <RefPage></RefPage>
        <RefTotal>Sarodnick F, Brau H. Methoden der Usability Evaluation: Wissenschaftliche Grundlagen und praktische Anwendung. Bern: Verlag Hans Huber; 2006.</RefTotal>
      </Reference>
      <Reference refNo="3">
        <RefAuthor>Sengpiel M</RefAuthor>
        <RefAuthor>Dittberner D</RefAuthor>
        <RefTitle>The computer literacy scale (CLS) for older adults &#8211; development and validation</RefTitle>
        <RefYear>2008</RefYear>
        <RefBookTitle>Mensch &#38; Computer 2008: Viel Mehr Interaktion</RefBookTitle>
        <RefPage>7-16</RefPage>
        <RefTotal>Sengpiel M, Dittberner D. The computer literacy scale (CLS) for older adults &#8211; development and validation. In: Herczeg M, Kindsm&#252;ller MC, eds. Mensch &#38; Computer 2008: Viel Mehr Interaktion. M&#252;nchen: Oldenbourg Verlag; 2008. p. 7-16.</RefTotal>
      </Reference>
      <Reference refNo="26">
        <RefAuthor>Virzi RA</RefAuthor>
        <RefTitle>Refining the test phase of usability evaluation: How many subjects is enough&#63;</RefTitle>
        <RefYear>1992</RefYear>
        <RefJournal>Hum Factors</RefJournal>
        <RefPage>457-68</RefPage>
        <RefTotal>Virzi RA. Refining the test phase of usability evaluation: How many subjects is enough&#63; Hum Factors. 1992;34(4):457-68.</RefTotal>
      </Reference>
    </References>
    <Media>
      <Tables>
        <Table format="png">
          <MediaNo>1</MediaNo>
          <MediaID>1</MediaID>
          <Caption><Pgraph><Mark1>Table 1: Categories for quantitative summarization</Mark1></Pgraph></Caption>
        </Table>
        <Table format="png">
          <MediaNo>2</MediaNo>
          <MediaID>2</MediaID>
          <Caption><Pgraph><Mark1>Table 2: Balanced weighting (general user-interfaces)</Mark1></Pgraph></Caption>
        </Table>
        <Table format="png">
          <MediaNo>3</MediaNo>
          <MediaID>3</MediaID>
          <Caption><Pgraph><Mark1>Table 3: Focused weighting (user-interfaces for emergencies)</Mark1></Pgraph></Caption>
        </Table>
        <NoOfTables>3</NoOfTables>
      </Tables>
      <Figures>
        <Figure format="png" height="148" width="767">
          <MediaNo>1</MediaNo>
          <MediaID>1</MediaID>
          <Caption><Pgraph><Mark1>Figure 1</Mark1></Pgraph></Caption>
        </Figure>
        <NoOfPictures>1</NoOfPictures>
      </Figures>
      <InlineFigures>
        <NoOfPictures>0</NoOfPictures>
      </InlineFigures>
      <Attachments>
        <Attachment>
          <MediaNo>1</MediaNo>
          <MediaID filename="mibe000115.a1.pdf" mimeType="application/pdf" origFilename="GMS-mibe-Nestler-Att1.pdf" size="107630" url="">1</MediaID>
          <AttachmentTitle>Appendix: Questionnaire</AttachmentTitle>
        </Attachment>
        <NoOfAttachments>1</NoOfAttachments>
      </Attachments>
    </Media>
  </OrigData>
</GmsArticle>