@inproceedings {INPROC-2009-93,
   author = {Andreas Hub and Joachim Kizler},
   title = {{Integration of Voice-Operated Route Planning and Route Guidance into a Portable Navigation and Object Recognition System for the Blind and Visually Impaired}},
   booktitle = {Proceedings of the 2009 Biennial South Pacific Educators in Vision Impairment Conference (SPEVI 2009); January 5-9, Adelaide, Australia},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--7},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2009},
   keywords = {Blind Navigation; Object Recognition},
   language = {Englisch},
   cr-category = {H.5.2 Information Interfaces and Presentation User Interfaces,
                   K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-93/INPROC-2009-93.pdf},
   contact = {andreas.hub@vis.uni-stuttgart.de},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {This study describes the development of a multi-functional assistant system for
      the blind which combines localisation, real and virtual navigation within
      modelled environments and the identification and tracking of fixed and movable
      objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-93&amp;engl=0}
}

@inproceedings {INPROC-2009-92,
   author = {Oliver Siemoneit and Christoph Hubig and Bernhard Schmitz and Thomas Ertl},
   title = {{Mobiquitous Devices and Perception of Reality. A Philosophical Enquiry into Mobile and Ubiquitous Computing Devices that Alter Perception Using the Example of TANIA - A Tactile Acoustical Indoor and Outdoor Navigation and Information Assistant for the Blind, Deafblind, and Visually-impaired Users.}},
   booktitle = {Proceedings of the 5th Asia-Pacific Computing and Philosophy Conference (APCAP 2009)},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {123--130},
   type = {Konferenz-Beitrag},
   month = {Oktober},
   year = {2009},
   language = {Englisch},
   cr-category = {K.4.2 Computers and Society Social Issues,
                   H.5.2 Information Interfaces and Presentation User Interfaces,
                   H.1.2 User/Machine Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-92/INPROC-2009-92.pdf,
      http://bentham.k2.t.u-tokyo.ac.jp/ap-cap09/proceedings.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Philosophie, Abteilung f{\"u}r Wissenschaftstheorie und Technikphilosophie (IP/WTTP);
                  Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Philosophical research in mobiquitous devices still lacks important topics, in
      particular how mobiquitous devices change us and our perception of the world.
      It is the aim of this paper, to make a basic contribution to this debate and to
      shed some light on fundamental questions. Therefore, we introduce the TANIA
      system, a tactile acoustical navigation and information assistant for the
      blind, which uses - among other concepts - 1) a vibrating Wii remote mounted to
      a cane so as to indicate for the orientation of the blind towards a certain
      destination and 2) a stereo camera integrated into a bicycle helmet so as to
      compensate for the loss of vision. Alteration of perception is discussed in
      detail by distinguishing between perception enhancement, perception
      substitution, perception constriction and perception determination. Moreover we
      elaborate upon basic system design issues, thereby also justifying why we
      designed the TANIA system like it is. Finally, it is shown that technology
      itself has never been something else but an extension to man, and that
      technology - since technology is not only a means but also a medium - has
      always altered, still alters, and will always alter our perception of the
      world.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-92&amp;engl=0}
}

@inproceedings {INPROC-2009-91,
   author = {Andreas Hub and Bernhard Schmitz},
   title = {{Addition of RFID-Based Initialization and Object Recognition to the Navigation System TANIA}},
   booktitle = {Proceedings of the California State University, Northridge Center on Disabilities' 24th Annual International Technology and Persons with Disabilities Conference (CSUN 2009)},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--3},
   type = {Konferenz-Beitrag},
   month = {M{\"a}rz},
   year = {2009},
   language = {Englisch},
   cr-category = {K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-91/INPROC-2009-91.pdf},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The TANIA indoor and outdoor blind navigation system has been augmented with
      RFID technology, providing automatic initialization and recognition of tagged
      objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-91&amp;engl=0}
}

@inproceedings {INPROC-2009-89,
   author = {Bernhard Schmitz and Andreas Hub},
   title = {{Combination of the Navigation System TANIA with RFID-Based Initialization and Object Recognition}},
   booktitle = {Proceedings from 7th European Conference of ICEVI},
   address = {Dublin},
   publisher = {Online},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {1--2},
   type = {Konferenz-Beitrag},
   month = {Juli},
   year = {2009},
   language = {Englisch},
   cr-category = {K.4.2 Computers and Society Social Issues},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2009-89/INPROC-2009-89.pdf,
      http://www.icevi-europe.org/dublin2009/index.html},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {In order to initialize the user’s location more accurately, the TANIA indoor
      and outdoor blind navigation system has been extended with an RFID reader. The
      system can also be used for the recognition of tagged objects.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-89&amp;engl=0}
}

@inproceedings {INPROC-2009-84,
   author = {M. Eissele and D. Weiskopf and T. Ertl},
   title = {{{Interactive Context-Aware Visualization for Mobile Devices}},
   booktitle = {SG '09: Proceedings of Smart Graphics},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   pages = {167--178},
   type = {Konferenz-Beitrag},
   month = {Januar},
   year = {2009},
   language = {Englisch},
   cr-category = {I.3 Computer Graphics},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Utilizing context informationœ{\^o}{\`o}{\^o}e.g. location, user aspects, or hardware
      capabilitiesœ{\^o}{\`o}{\^o}enables the presented generic framework to automatically
      control the selection and configuration of visualization techniques and
      therewith provide interactive illustrations, displayed on small mobile devices.
      For contextannotated data, provided by an underlying context-aware world model,
      the proposed system determines adequate visualization methods out of a
      database. Based on a novel analysis of a hierarchical data format definition
      and an evaluation of relevant context attributes, visualization templates are
      selected, configured, and instanced. This automatic, interactive process
      enables visualizations that smartly reconfigure according to changed context
      aspects. In addition to the generic concept, we present real-world applications
      that make use of this framework.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-84&amp;engl=0}
}

@inproceedings {INPROC-2009-83,
   author = {H. Sanftmann and A. Blessing and H. Sch{\"u}tze and D. Weiskopf},
   title = {{Visual Exploration of Classifiers for Hybrid Textual and Geospatial Matching}},
   booktitle = {Proceedings of Vision, Modeling, and Visualization VMV '09},
   publisher = {-},
   institution = {Universit{\"a}t Stuttgart : Sonderforschungsbereich SFB 627 (Nexus: Umgebungsmodelle f{\"u}r mobile kontextbezogene Systeme), Germany},
   type = {Konferenz-Beitrag},
   month = {November},
   year = {2009},
   language = {Englisch},
   cr-category = {I.3 Computer Graphics},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Maschinelle Sprachverarbeitung;
                  Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The availability of large geospatial data from different sources has
      dramatically increased, but for the usage of such data in geo-mashup or
      contextaware systems, a data fusion component is necessary. To solve the
      integration issue classifiers are obtained by supervised training, with feature
      vectors derived from textual and geospatial attributes. In an application
      example, a coherent part of Germany was annotated by humans and used for
      supervised learning. Annotation by humans is not free of errors, which
      decreases the performance of the classifier. We show how visual analytics
      techniques can be used to efficiently detect such false annotations. Especially
      the textual features introduce high-dimensional feature vectors, where visual
      analytics becomes important and helps to understand and improve the trained
      classifiers. Particular technical components used in our systems are
      scatterplots, multiple coordinated views, and interactive data drill-down.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2009-83&amp;engl=0}
}

@article {ART-2009-22,
   author = {J. Chuang and D. Weiskopf and T. M{\"o}ller},
   title = {{Energy Aware Color Sets}},
   journal = {Computer Graphics Forum},
   publisher = {Wiley},
   volume = {28},
   number = {2},
   pages = {203--211},
   type = {Artikel in Zeitschrift},
   month = {Januar},
   year = {2009},
   language = {Englisch},
   cr-category = {I.3.3 Picture/Image Generation,
                   I.3.6 Computer Graphics Methodology and Techniques,
                   I.3.7 Three-Dimensional Graphics and Realism},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {We present a design technique for colors with the purpose of lowering the
      energy consumption of the display device. Our approach is based on a screen
      space variant energy model. The result of our design is a set of
      distinguishable iso-lightness colors guided by perceptual principles. We
      present two variations of our approach. One is based on a set of discrete
      user-named (categorical) colors, which are analyzed according to their energy
      consumption. The second is based on the constrained continuous optimization of
      color energy in the perceptually uniform CIELAB color space. We quantitatively
      compare our two approaches with a traditional choice of colors, demonstrating
      that we typically save approximately 40 percent of the energy. The color sets
      are applied to examples from the 2D visualization of nominal data and volume
      rendering of 3D scalar fields.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-22&amp;engl=0}
}

@article {ART-2009-17,
   author = {H. Sanftmann and D. Weiskopf},
   title = {{Illuminated 3D Scatterplots}},
   journal = {Computer Graphics Forum (Proceedings of EuroVis 2009)},
   publisher = {Wiley},
   volume = {28},
   number = {3},
   pages = {751--758},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2009},
   language = {Englisch},
   cr-category = {I.3.7 Three-Dimensional Graphics and Realism},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {In contrast to 2D scatterplots, the existing 3D variants have the advantage of
      showing one additional data dimension, but suffer from inadequate spatial and
      shape perception and therefore are not well suited to display structures of the
      underlying data. We improve shape perception by applying a new illumination
      technique to the pointcloud representation of 3D scatterplots. Points are
      classified as locally linear, planar, and volumetric structuresœ{\^o}{\`o}{\^o}according to
      the eigenvalues of the inverse distance-weighted covariance matrix at each data
      element. Based on this classification, different lighting models are applied:
      codimension-2 illumination, surface illumination, and emissive volumetric
      illumination. Our technique lends itself to efficient GPU point rendering and
      can be combined with existing methods like semi-transparent rendering, halos,
      and depth or attribute based color coding. The user can interactively navigate
      in the dataset and manipulate the classification and other visualization
      parameters. We demonstrate our visualization technique by showing examples of
      multi-dimensional data and of generic pointcloud data.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-17&amp;engl=0}
}

@article {ART-2009-16,
   author = {M. Eissele and H. Sanftmann and T. Ertl},
   title = {{Interactively Refining Object-Recognition System}},
   journal = {Journal of WSCG},
   publisher = {Online},
   volume = {17},
   number = {1},
   pages = {1--8},
   type = {Artikel in Zeitschrift},
   month = {Juni},
   year = {2009},
   issn = {1213-6972},
   language = {Englisch},
   cr-category = {I.3.7 Three-Dimensional Graphics and Realism},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {The availability of large geospatial data from different sources has
      dramatically increased, but for the usage of such data in geo-mashup or
      contextaware systems, a data fusion component is necessary. To solve the
      integration issue classifiers are obtained by supervised training, with feature
      vectors derived from textual and geospatial attributes. In an application
      example, a coherent part of Germany was annotated by humans and used for
      supervised learning. Annotation by humans is not free of errors, which
      decreases the performance of the classifier. We show how visual analytics
      techniques can be used to efficiently detect such false annotations. Especially
      the textual features introduce high-dimensional feature vectors, where visual
      analytics becomes important and helps to understand and improve the trained
      classifiers. Particular technical components used in our systems are
      scatterplots, multiple coordinated views, and interactive data drill-down.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2009-16&amp;engl=0}
}

@inbook {INBOOK-2009-05,
   author = {D. Weiskopf},
   title = {{Geo-spatial context-aware visualization}},
   series = {Eurographics 2009 Areas Papers Proceedings},
   publisher = {-},
   pages = {1--2},
   type = {Beitrag in Buch},
   month = {Juni},
   year = {2009},
   language = {Deutsch},
   cr-category = {I.3 Computer Graphics},
   department = {Universit{\"a}t Stuttgart, Institut f{\"u}r Visualisierung und Interaktive Systeme, Visualisierung und Interaktive Systeme},
   abstract = {Mobile computer systems equipped with wireless communication and sensor
      technology - such as mobile phones with cameras - have become widely available.
      Context information, for example the userœ{\^o}{\`o}{\`u}s current location or their
      physical environment, plays an increasingly important role in simplifying the
      interaction between users and such mobile information systems. A generic
      framework for federating heterogeneous spatial context models is briefly
      described. The federated information serves as basis for the visualization of
      spatially referenced data. Visualization challenges include efficient rendering
      on mobile devices, automatic adaptation of visualization techniques to context
      information, as well as consideration of the quality of context in the form of
      uncertainty visualization.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INBOOK-2009-05&amp;engl=0}
}

