@inproceedings {INPROC-2011-87,
   author = {Jorge Minguez and Florian Niedermann and Bernhard Mitschang},
   title = {{A provenance-aware service repository for EAI process modeling tools}},
   booktitle = {IEEE International Conference on Information Reuse and Integration 2011 (IRI '11)},
   address = {Las Vegas},
   publisher = {IEEE Press},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {42--47},
   type = {Conference Paper},
   month = {August},
   year = {2011},
   doi = {10.1109/IRI.2011.6009518},
   keywords = {EAI process modeling tool; business process; business service; data interoperability; enterprise application integration; functional interoperability; manufacturing domain; process lifecycle management; provenance aware service repository; provenance data model; provenance subscription capabilities; service engineering methods; service knowledge base; service reusability; business data processing; knowledge based systems; manufacturing industries; open systems},
   language = {English},
   cr-category = {D.2.11 Software Engineering Software Architectures,
                   D.2.13 Software Engineering Reusable Software},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {One of the major challenges for Enterprise Application Integration (EAI)
      process modeling tools is the continuous adaptation of the business processes
      and services. Business and IT specialists are both confronted with a number of
      problems involved in the adaptation of such processes, such as the lack of
      support for process lifecycle management, data and functional interoperability
      problems or the appropriate service knowledge base. Currently, most service
      engineering methods adopt a lifecycle strategy for the design, implementation,
      deployment and evaluation of services. However, enterprises exploiting service
      reusability lack the knowledge on process dependencies across the entire
      service lifecycle. This knowledge is required by process modeling tools in
      order to keep EAI processes loosely-coupled. Using a provenance data model we
      describe the different types of service dependencies in EAI processes with
      regard to the service changes across its lifecycle. We present a
      provenance-aware service repository with provenance subscription capabilities
      and its adoption for different use cases in the manufacturing domain.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-87&amp;engl=1}
}

@inproceedings {INPROC-2011-86,
   author = {Nazario Cipriani and Oliver Schiller and Bernhard Mitschang},
   title = {{M-TOP: Multi-target Operator Placement of Query Graphs for Data Streams}},
   booktitle = {Proceedings of the 15th International Database Engineering and Applications Symposium (IDEAS 2011)},
   publisher = {ACM},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {52--60},
   type = {Conference Paper},
   month = {September},
   year = {2011},
   language = {English},
   cr-category = {G.1.6 Numerical Analysis Optimization,
                   C.2.3 Network Operations},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Nowadays, many applications processes stream-based data, such as financial
      market analysis, network intrusion detection, or visualization applications. To
      process stream-based data in an applicationindependent manner, distributed
      stream processing systems emerged. They typically translate a query to an
      operator graph, place the operators to stream processing nodes, and execute
      them to process the streamed data. The operator placement is crucial in such
      systems, as it deeply influences query execution. Often, different stream-based
      applications require dedicated placement of query graphs according to their
      specific objectives, e.g. bandwidth not less than 500 MBit/s and costs not more
      that 1 cost unit. This fact constraints operator placement. Existing approaches
      do not take into account application-specific objectives, thus not reflecting
      application-specific placement decisions. As objectives might conflict among
      each other, operator placement is subject to delicate trade-offs, such as
      bandwidth maximization is more important than cost reduction. Thus, the
      challenge is to find a solution which considers the application-specific
      objectives and their trade-offs.
      
      We present M-TOP, an QoS-aware multi-target operator placement framework for
      data stream systems. Particularly, we propose an operator placement strategy
      considering application-specific targets consisting of objectives, their
      respective trade-offs specifications, bottleneck conditions, and ranking
      schemes to compute a suitable placement. We integrated M-TOP into NexusDS, our
      distributed data stream processing middleware, and provide an experimental
      evaluation to show the effectiveness of M-TOP.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-86&amp;engl=1}
}

@inproceedings {INPROC-2011-85,
   author = {Nazario Cipriani and Matthias Grossmann and Harald Sanftmann and Bernhard Mitschang},
   title = {{Design Considerations of a Flexible Data Stream Processing Middleware}},
   booktitle = {Proceedings of the 15th East-European Conference on Advances in Databases and Information Systems (ADBIS 2011)},
   publisher = {CEUR-WS.org},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {222--231},
   type = {Conference Paper},
   month = {September},
   year = {2011},
   language = {German},
   cr-category = {K.6.1 Project and People Management},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Techniques for efficient and distributed processing of huge, unbound data
      streams have made some impact in the database community. Distributed data
      stream processing systems have emerged providing a distributed environment to
      process these potentially unbound streams of data by a set of processing nodes.
      A wide range of realtime applications process stream-based data. Sensors and
      data sources, such as position data of moving objects, continuously produce
      data that is consumed by, e.g., location-aware applications. Depending on the
      domain of interest, the processing of such data often depends on
      domain-specific functionality. For instance, an application which visualizes
      stream-based data has stringent timing constraints, or may even need a specific
      hardware environment to smoothly process the data. Furthermore, users may add
      additional constraints. E.g., for security reasons they may want to restrict
      the set of nodes that participates in processing.
      
      In this paper we review context-aware applications which, despite their
      different application fields, share common data processing principles. We
      analyse these applications and extract common requirements which data stream
      processing systems must meet to support these applications. Finally, we show
      how such applications are implemented using NexusDS, our extensible stream
      processing middleware.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-85&amp;engl=1}
}

@inproceedings {INPROC-2011-64,
   author = {Michael Abel and Peter Klemm and Stefan Silcher and Jorge Minguez},
   title = {{Start-Up of Reconfigurable Production Machines with a Service-Oriented Architecture}},
   booktitle = {Proceedings of the 21st International Conference on Production Research},
   publisher = {Fraunhofer IAO},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--5},
   type = {Conference Paper},
   month = {August},
   year = {2011},
   language = {English},
   cr-category = {D.2.11 Software Engineering Software Architectures},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Starting-up production machines takes a considerable part of time and
      development expenses. Especially in the case of reconfigurable machines a short
      start-up phase is essential. Many activities, which are necessary during the
      start-up, can be automated. These are the configuration of mechatronic modules,
      control- and fieldbus systems as well as the extensive testing of functions.
      This paper presents an approach based on a service- oriented architecture (SOA)
      to automate the start-up of a reconfigurable production machine. Functionality
      for configuration and start-up is provided by an internal middleware system.
      The sequence control for the startup process is realised within a configuration
      system. A new approach to the combination of field-bus and middleware
      communication infrastructure is used to adopt SOA paradigms to existing
      automation technology. Thus, real-time communication can be combined with
      flexible communication for the automatic configuration of production machines.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-64&amp;engl=1}
}

@inproceedings {INPROC-2011-57,
   author = {Andreas Brodt and Oliver Schiller and Bernhard Mitschang},
   title = {{Efficient resource attribute retrieval in RDF triple stores}},
   booktitle = {Proceeding of the 20th ACM conference on Information and knowledge management (CIKM)},
   publisher = {Association for Computing Machinery (ACM)},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   type = {Conference Paper},
   month = {October},
   year = {2011},
   keywords = {RDF; SPARQL},
   language = {English},
   cr-category = {H.2.4 Database Management Systems,
                   H.2.2 Database Management Physical Design},
   contact = {andreas.brodt@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {The W3C Resource Description Framework (RDF) is gaining popularity for its
      ability to manage semi-structured data without a predefined database schema. So
      far, most RDF query processors have concentrated on finding complex graph
      patterns in RDF, which typically involves a high number of joins. This works
      very well to query resources by the relations between them. Yet, obtaining a
      record-like view on the attributes of resources, as natively supported by
      RDBMS, imposes unnecessary performance burdens, as the individual attributes
      must be joined to assemble the final result records.
      
      We present an approach to retrieve the attributes of resources efficiently. We
      first determine the resources in question and then retrieve all their
      attributes efficiently at once, exploiting contiguous storage in RDF indexes.
      In addition, we present an index structure which is specifically designed for
      RDF attribute retrieval. In a performance evaluation we show that our approach
      is clearly superior for larger numbers of retrieved attributes.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-57&amp;engl=1}
}

@inproceedings {INPROC-2011-42,
   author = {Jorge Minguez and Peter Reimann and Sema Zor},
   title = {{Event-driven Business Process Management in Engineer-to-Order Supply Chains}},
   booktitle = {Proceedings of the 15th International Conference on Computer Supported Cooperative Work in Design},
   publisher = {IEEE},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--8},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   keywords = {Event-driven Architecture; Service-oriented Architecture; SOA; EDA; Engineer-to-Order; ETO; Supply chain},
   language = {English},
   cr-category = {D.2.11 Software Engineering Software Architectures,
                   D.2.13 Software Engineering Reusable Software},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
                  University of Stuttgart, Institute of Architecture of Application Systems},
   abstract = {Integration efforts in today’s manufacturing environments tend to enable
      service-based communication interfaces between enterprise and manufacturing
      systems. Constantly changing business conditions demand a high level of
      flexibility in business processes as well as an adaptive and fully
      interoperable IT infrastructure. The principles of reusability and
      loosely-coupled services have driven Service Oriented Architecture (SOA) to
      become the most used paradigm for software design at the business level. In a
      manufacturing environment, event-driven architectures (EDA) are often employed
      for managing information flows across different production systems. The timely
      propagation of business-relevant events is a fundamental requirement in
      Engineer-to-Order (ETO) enterprises, which require a high level of transparency
      in their supply chains. Agility is one of the top priorities for ETO
      manufacturers in order to react to turbulent scenarios. Therefore, the main
      challenge for ETO supply chains is to identify and propagate events across the
      ETO logistics network and integrate these into the manufacturer business
      processes. We present how an existing service-oriented integration platform for
      manufacturing can be used to fill the gap between EDA-based manufacturing
      environments of an ETO supply chain and SOA-based manufacturer business
      processes. In this paper, we discuss the benefits of the Business Process
      Execution Language (BPEL) as vehicle for this integration. The adoption of BPEL
      will enable an efficient and effective reaction to turbulent manufacturing
      scenarios in an ETO supply chain.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-42&amp;engl=1}
}

@inproceedings {INPROC-2011-41,
   author = {Jorge Minguez and David Baureis and Donald Neumann},
   title = {{Providing Coordination and Goal Definition in Product-Service Systems through Service-oriented Computing}},
   booktitle = {Proceedings of the 44th CIRP International Conference on Manufacturing Systems},
   publisher = {CIRP},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {7--12},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   keywords = {Manufacturing; Product-Service Systems, PSS, Service-oriented Architecture, SOA},
   language = {English},
   cr-category = {D.2.11 Software Engineering Software Architectures,
                   D.2.13 Software Engineering Reusable Software},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Product-Service Systems (PSS) are a strategic approach that offers
      manufacturing companies the possibility of long-term differentiation against
      competitors by integrating goods and services. The implementation of a PSS
      entails challenges for the resulting supply chain structure and the IT
      infrastructure supporting coordinated service offerings, such as conflicting
      goals and coordination in the integrated business processes. The
      Service-oriented Architecture (SOA) paradigm, based on loosely-coupled
      components, provides rapid reconfiguration of business processes, rapid
      integration of services and goal definition through service level agreements.
      This paper presents a PSS service analysis methodology, which supports
      coordination and definition of goals in heterogeneous supply chains.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-41&amp;engl=1}
}

@inproceedings {INPROC-2011-40,
   author = {Jorge Minguez and Stefan Silcher and Bernhard Mitschang and Engelbert Westk{\"a}mper},
   title = {{Towards Intelligent Manufacturing: Equipping SOA-based Architectures with advanced SLM Services}},
   booktitle = {Proceedings of the 44th CIRP International Conference on Manufacturing Systems},
   publisher = {CIRP},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--8},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   keywords = {Service Oriented Architecture; Manufacturing; Service Lifecycle Management; SOA; SLM; Adaptability; Wandlungsf{\"a}higkeit},
   language = {English},
   cr-category = {D.2.11 Software Engineering Software Architectures,
                   D.2.13 Software Engineering Reusable Software},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {The vision of knowledge-based and intelligent manufacturing systems is driving
      the development of system architectures, which can seamlessly manage
      information flows across multiple heterogeneous manufacturing systems and
      provide the necessary services to support the execution of production
      processes. Constantly changing business conditions and turbulent scenarios
      force manufacturing companies to continuously adapt their business processes
      and manufacturing systems. In such a context, a flexible infrastructure that
      supports the full integration of processes and adapts its services is needed.
      This paper presents an innovative semantic service framework that enables the
      adoption of service lifecycle management (SLM) in an SOA-based integration
      framework.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-40&amp;engl=1}
}

@inproceedings {INPROC-2011-39,
   author = {Stefan Silcher and Jorge Minguez and Bernhard Mitschang},
   title = {{Adopting the Manufacturing Service Bus in a Service-based Product Lifecycle Management Architecture}},
   booktitle = {Proceedings of the 44th International CIRP Conference on Manufacturing Systems: ICMS '11; Madison, Wisconsin, USA, May 31 - June 3, 2011},
   publisher = {Online},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--6},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   keywords = {Information; System Architecture; Product Lifecycle Management; Service Oriented Architecture; Enterprise Service Bus},
   language = {English},
   cr-category = {D.2.11 Software Engineering Software Architectures,
                   D.2.13 Software Engineering Reusable Software},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Service-oriented computing is nowadays a rising technology to implement
      business processes in an efficient and flexible manner. This technology has a
      great impact on manufacturing environments. The realization of Product
      Lifecycle Management (PLM) with a Service Oriented Architecture (SOA) has many
      benefits. Some advantages are a seamless and flexible integration of all
      applications within PLM, including legacy systems, improved data provisioning
      and a reduced complexity by using a common service-based integration
      middleware, such as the Manufacturing Service Bus (MSB). In this paper the
      integration of the MSB into the service-oriented PLM approach will be described
      in detail.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-39&amp;engl=1}
}

@inproceedings {INPROC-2011-37,
   author = {Sylvia Radesch{\"u}tz and Marko Vrhovnik and Holger Schwarz and Bernhard Mitschang},
   title = {{Exploiting the Symbiotic Aspects of Process and Operational Data for Optimizing Business Processes}},
   booktitle = {Proc. of the 12th IEEE International Conference on Information Reuse and Integration (IRI 2011)},
   address = {Las Vegas, USA},
   publisher = {IEEE},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--6},
   type = {Conference Paper},
   month = {August},
   year = {2011},
   language = {English},
   cr-category = {H.2.4 Database Management Systems},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {A profound analysis of all relevant business data in a company is necessary for
      optimizing business processes effectively. Current analyses typically run
      either on business process execution data or on operational business data.
      Correlations among the separate data sets have to be found manually under big
      effort. However, to achieve a more informative analysis and to fully optimize a
      company’s business, an efficient consolidation of all major data sources is
      indispensable. Recent matching algorithms are insufficient for this task since
      they are restricted either to schema or to process matching. We present a new
      matching framework to combine process data models and operational data models
      (semi-)automatically for performing such a profound business analysis. We
      describe this approach and its basic matching rules as well as an experimental
      study that shows the achieved high recall and precision.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-37&amp;engl=1}
}

@inproceedings {INPROC-2011-36,
   author = {Oliver Schiller and Benjamin Schiller and Andreas Brodt and Bernhard Mitschang},
   title = {{Native support of multi-tenancy in RDBMS for software as a service}},
   booktitle = {EDBT},
   publisher = {ACM},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {117--128},
   type = {Conference Paper},
   month = {January},
   year = {2011},
   language = {English},
   cr-category = {H.2.1 Database Management Logical Design,
                   H.2 Database Management},
   ee = {http://doi.acm.org/10.1145/1951365.1951382},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Software as a Service (SaaS) facilitates acquiring a huge number of small
      tenants by providing low service fees. To achieve low service fees, it is
      essential to reduce costs per tenant. For this, consolidating multiple tenants
      onto a single relational schema instance turned out beneficial because of low
      overheads per tenant and scalable man- ageability. This approach implements
      data isolation between ten- ants, per-tenant schema extension and further
      tenant-centric data management features in application logic. This is complex,
      dis- ables some optimization opportunities in the RDBMS and repre- sents a
      conceptual misstep with Separation of Concerns in mind. Therefore, we
      contribute first features of a RDBMS to support tenant-aware data management
      natively. We introduce tenants as first-class database objects and propose the
      concept of a tenant con- text to isolate a tenant from other tenants. We
      present a schema inheritance concept that allows sharing a core application
      schema among tenants while enabling schema extensions per tenant. Fi- nally, we
      evaluate a preliminary implementation of our approach.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-36&amp;engl=1}
}

@inproceedings {INPROC-2011-27,
   author = {Nazario Cipriani and Carlos L{\"u}bbe and Oliver D{\"o}rler},
   title = {{NexusDSEditor - Integrated Tool Support for the Data Stream Processing Middleware NexusDS}},
   booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW), 14. Fachtagung des GI-Fachbereichs ``Datenbanken und Informationssysteme'' (DBIS), 2.-4.3.2011 in Kaiserslautern, Germany},
   editor = {Theo H{\"a}rder and Wolfgang Lehner and Bernhard Mitschang and Harald Sch{\"o}ning and Holger Schwarz},
   publisher = {Lecture Notes in Informatics (LNI)},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   series = {Gesellschaft f{\"u}r Informatik (GI)},
   volume = {180},
   pages = {714--717},
   type = {Conference Paper},
   month = {March},
   year = {2011},
   isbn = {978-3-88579-274-1},
   language = {English},
   cr-category = {D.2.6 Software Engineering Programming Environments,
                   H.5.2 Information Interfaces and Presentation User Interfaces},
   contact = {nazario.cipriani@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {In this paper we present NexusDSEditor --- an integrated tool for the stream
      processing middleware NexusDS. NexusDSEditor is an extension module for the
      NexusEditor and supports developers with designing new streaming applications
      by providing an integrated tool for orchestrating stream query graphs, define
      the deployment of query graph fragments to execution nodes, and analyzing data
      streams. In this paper we demonstrate these single steps and show how
      NexusDSEditor supports developing streaming data applications for the NexusDS
      platform by hiding complexity and providing an intuitive user interface.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-27&amp;engl=1}
}

@inproceedings {INPROC-2011-26,
   author = {Florian Niedermann and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{Design-Time Process Optimization through Optimization Patterns and Process Model Matching}},
   booktitle = {Proceedings of the 12th IEEE Conference on Commerce and Enterprise Computing (CEC)},
   publisher = {IEEE Computer Society},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {48--55},
   type = {Conference Paper},
   month = {November},
   year = {2011},
   keywords = {Business Process Analytics; Business Process Design; Business Process Management; Business Process Optimization; Process Model Matching},
   language = {English},
   cr-category = {H.4.1 Office Automation},
   ee = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5708392},
   contact = {florian.niedermann@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {The goal of process design is the construction of a process model that is a
      priori optimal w.r.t. the goal(s) of the business owning the process. Process
      design is therefore a major factor in determining the process performance and
      ultimately the success of a business. Despite this importance, the designed
      process is often less than optimal. This is due to two major challenges: First,
      since the design is an a priori ability, no actual execution data is available
      to provide the foundations for design decisions. Second, since modeling
      decision support is typically basic at best, the quality of the design largely
      depends on the ability of business analysts to make the ”right” design choices.
      To address these challenges, we present in this paper our deep Business
      Optimization Platform that enables (semi-) automated process optimization
      during process design based on actual execution data. Our platform achieves
      this task by matching new processes to existing processes stored in a
      repository based on similarity metrics and by using a set of formalized
      best-practice process optimization patterns.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-26&amp;engl=1}
}

@inproceedings {INPROC-2011-25,
   author = {Florian Niedermann and Holger Schwarz},
   title = {{Deep Business Optimization: Making Business Process Optimization Theory Work in Practice}},
   booktitle = {Proceedings of the Conference on Business Process Modeling, Development and Support (BPMDS 2011)},
   publisher = {Springer},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--10},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   keywords = {Business Process Optimization, Optimization Techniques, Business Process Analytics, Data Mining, Tool Support},
   language = {English},
   cr-category = {H.4.1 Office Automation},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2011-25/INPROC-2011-25.pdf},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {The success of most of today's businesses is tied to the efficiency and
      effectiveness of their core processes. This importance has been recognized in
      research, leading to a wealth of sophisticated process optimization and
      analysis techniques. Their use in practice is, however, often limited as both
      the selection and the application of the appropriate techniques are challenging
      tasks. Hence, many techniques are not considered causing potentially
      significant opportunities of improvement not to be implemented. This paper
      proposes an approach to addressing this challenge using our deep Business
      Optimization Platform. By integrating a catalogue of formalized optimization
      techniques with data analysis and integration capabilities, it assists analysts
      both with the selection and the application of the most fitting optimization
      techniques for their specific situation. The paper presents both the concepts
      underlying this platform as well as its prototypical implementation.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-25&amp;engl=1}
}

@inproceedings {INPROC-2011-24,
   author = {Florian Niedermann and Bernhard Maier and Sylvia Radesch{\"u}tz and Holger Schwarz and Bernhard Mitschang},
   title = {{Automated Process Decision Making based on Integrated Source Data}},
   booktitle = {Proceedings of the 14th International Conference on Business Information Systems (BIS 2011)},
   editor = {Witold Abramowicz},
   publisher = {Springer},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   series = {Lecture Notes in Business Information Processing},
   pages = {1--10},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   keywords = {Data Mining, Decision Automation, Data Integration, Business Process Management, Data-driven Processes},
   language = {English},
   cr-category = {H.4.1 Office Automation,
                   H.2.8 Database Applications,
                   H.5.2 Information Interfaces and Presentation User Interfaces},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {The success of most of today's businesses is tied to the efficiency and
      effectiveness of their core processes. Yet, two major challenges often prevent
      optimal processes: First, the analysis techniques applied during the
      optimization are inadequate and fail to include all relevant data sources.
      Second, the success depends on the abilities of the individual analysts to spot
      the right designs amongst a plethora of choices. Our deep Business Optimization
      Platform addresses these challenges through specialized data integration,
      analysis and optimization facilities. In this paper, we focus on how it uses
      formalized process optimization patterns for detecting and implementing process
      improvements.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-24&amp;engl=1}
}

@inproceedings {INPROC-2011-23,
   author = {Florian Niedermann and Sylvia Radesch{\"u}tz and Bernhard Mitschang},
   title = {{Business Process Optimization using Formalized Optimization Patterns}},
   booktitle = {Proceedings of the 14th International Conference on Business Information Systems (BIS 2011)},
   editor = {Witold Abramowicz},
   publisher = {Springer},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--10},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   keywords = {Business Process Management; Business Process Optimization},
   language = {English},
   cr-category = {H.4.1 Office Automation},
   contact = {florian.niedermann@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {The success of most of today's businesses is tied to the efficiency and
      effectiveness of their core processes. Yet, two major challenges often prevent
      optimal processes: First, the analysis techniques applied during the
      optimization are inadequate and fail to include all relevant data sources.
      Second, the success depends on the abilities of the individual analysts to spot
      the right designs amongst a plethora of choices. Our deep Business Optimization
      Platform addresses these challenges through specialized data integration,
      analysis and optimization facilities. In this paper, we focus on how it uses
      formalized process optimization patterns for detecting and implementing process
      improvements.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-23&amp;engl=1}
}

@inproceedings {INPROC-2011-20,
   author = {Carlos L{\"u}bbe and Andreas Brodt and Nazario Cipriani and Matthias Gro{\ss}mann and Bernhard Mitschang},
   title = {{DiSCO: A Distributed Semantic Cache Overlay for Location-based Services}},
   booktitle = {Proceedings of the 2011 Twelfth International Conference on Mobile Data Management},
   address = {Washington, DC, USA},
   publisher = {IEEE Computer Society},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--10},
   type = {Conference Paper},
   month = {January},
   year = {2011},
   keywords = {peer-to-peer; semantic caching},
   language = {German},
   cr-category = {C.2.4 Distributed Systems,
                   H.2.4 Database Management Systems},
   contact = {carlos.luebbe@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Location-based services (LBS) have gained tremendous popularity with millions
      of simultaneous users daily. LBS handle very large data volumes and face
      enormous query loads. Both the data and the queries possess high locality:
      spatial data is distributed very unevenly around the globe, query load is
      different throughout the day, and users often search for similar things in the
      same places. This causes high load peaks at the data tier of LBS, which may
      seriously degrade performance. To cope with these load peaks, we present DiSCO,
      a distributed semantic cache overlay for LBS. DiSCO exploits the spatial,
      temporal and semantic locality in the queries of LBS and distributes frequently
      accessed data over many nodes. Based on the Content-Addressable Network (CAN)
      peer-to-peer approach, DiSCO achieves high scalability by partitioning data
      using spatial proximity. Our evaluation shows that DiSCO significantly reduces
      queries to the underlying data tier.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-20&amp;engl=1}
}

@inproceedings {INPROC-2011-17,
   author = {Christoph Stach and Andreas Brodt},
   title = {{– vHike – A Dynamic Ride-sharing Service for Smartphones}},
   booktitle = {Proceedings of the 12th international conference on Mobile data management},
   address = {Lule{\aa}, Sweden},
   publisher = {ACM},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {1--4},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   keywords = {ride-sharing; trust; security; location-based.},
   language = {English},
   cr-category = {K.4 Computers and Society},
   contact = {Senden Sie eine E-Mail an christoph.stach@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {In times of lacking natural resources and increasing environmental pollution at
      the same time, modern resource efficient kinds of personal transportation have
      to be considered. Ride-sharing is maybe one of the most economical ways to
      avouch permanent mobility without losing too much comfort. However, especially
      dynamic ride-sharing is laden with many resistances including a lack of
      security and a heavy scheduling and coordinating burden. Hence this paper
      introduces an implementation of a system for dynamic ride-sharing called vHike
      which should eliminate these barriers. With our demonstrator every interested
      participant may test whether or not such a system can be viable and effective.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-17&amp;engl=1}
}

@inproceedings {INPROC-2011-16,
   author = {Andreas Brodt and Oliver Schiller and Sailesh Sathish and Bernhard Mitschang},
   title = {{A mobile data management architecture for interoperability of resource and context data}},
   booktitle = {Proceedings of the 2011 Twelveth International Conference on Mobile Data Management},
   publisher = {IEEE Computer Society},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--6},
   type = {Conference Paper},
   month = {June},
   year = {2011},
   language = {English},
   cr-category = {H.3.4 Information Storage and Retrieval Systems and Software,
                   H.3.5 Online Information Services,
                   H.2.4 Database Management Systems},
   ee = {ftp://ftp.informatik.uni-stuttgart.de/pub/library/ncstrl.ustuttgart_fi/INPROC-2011-16/INPROC-2011-16.pdf},
   contact = {brodt@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Mobile devices have become general-purpose computers that are equipped with
      sensors, constantly access the internet, and almost always accompany the user.
      Consequently, devices manage many different kinds of data about the user's life
      and context. There is considerable overlap in this data, as different
      applications handle similar data domains. Applications often keep this data in
      separated data silos. Web applications, which manage large amounts of personal
      data, hardly share this data with other applications at all. This lack of
      interoperability creates redundancy and impacts usability of mobile devices. We
      present a data management architecture for mobile devices to support
      interoperability between applications, devices and web applications at the data
      management level. We propose a central on-device repository for applications to
      share resource and context data in an integrated, extensible data model which
      uses semantic web technologies and supports location data. A web browser
      interface shares data with web applications, as controlled by a general
      security model.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-16&amp;engl=1}
}

@inproceedings {INPROC-2011-07,
   author = {Peter Reimann and Michael Reiter and Holger Schwarz and Dimka Karastoyanova and Frank Leymann},
   title = {{SIMPL - A Framework for Accessing External Data in Simulation Workflows}},
   booktitle = {Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2011), 14. Fachtagung des GI-Fachbereichs „Datenbanken und Informationssysteme“ (DBIS), Proceedings, 02.-04. M{\"a}rz 2011, Kaiserslautern, Germany},
   editor = {Gesellschaft f{\"u}r Informatik (GI)},
   publisher = {Lecture Notes in Informatics (LNI)},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   series = {Series of the Gesellschaft f{\"u}r Informatik (GI)},
   volume = {180},
   pages = {534--553},
   type = {Conference Paper},
   month = {March},
   year = {2011},
   isbn = {978-3-88579-274-1},
   keywords = {Data Provisioning; Workflow; Scientific Workflow; Simulation Workflow; BPEL; WS-BPEL; SIMPL},
   language = {English},
   cr-category = {H.2.8 Database Applications,
                   H.4.1 Office Automation},
   contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
                  University of Stuttgart, Institute of Architecture of Application Systems},
   abstract = {Adequate data management and data provisioning are among the most important
      topics to cope with the information explosion intrinsically associated with
      simulation applications. Today, data exchange with and between simulation
      applications is mainly accomplished in a file-style manner. These files show
      proprietary formats and have to be transformed according to the specific needs
      of simulation applications. Lots of effort has to be spent to find appropriate
      data sources and to specify and implement data transformations. In this paper,
      we present SIMPL – an extensible framework that provides a generic and
      consolidated abstraction for data management and data provisioning in
      simulation workflows. We introduce extensions to workflow languages and show
      how they are used to model the data provisioning for simulation workflows based
      on data management patterns. Furthermore, we show how the framework supports a
      uniform access to arbitrary external data in such workflows. This removes the
      burden from engineers and scientists to specify low-level details of data
      management for their simulation applications and thus boosts their
      productivity.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-07&amp;engl=1}
}

@inproceedings {INPROC-2011-06,
   author = {Christoph Stach},
   title = {{Saving time, money and the environment - vHike a dynamic ride-sharing service for mobile devices}},
   booktitle = {Work in Progress workshop at PerCom 2011 (WIP of PerCom 2011)},
   address = {Seattle, USA},
   publisher = {IEEE},
   institution = {University of Stuttgart : Collaborative Research Center SFB 627 (Nexus: World Models for Mobile Context-Based Systems), Germany},
   pages = {1--4},
   type = {Conference Paper},
   month = {March},
   year = {2011},
   keywords = {ride-sharing; trust; security; location-based},
   language = {English},
   cr-category = {K.4 Computers and Society},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {In times of increasing traffic-related problems, such as air-pollution or
      traffic jams, ride-sharing is one of the most environmentally friendly and
      pleasantest ways to travel. The many benefits are offset by a multitude of
      prejudices and fears, including security concerns and a heavy scheduling and
      coordinating burden. For this reason this paper introduces vHike an easy-to-use
      management system for dynamic ridesharing running on modern Smartphones. By the
      use of techniques well-known from Web 2.0 social networks the threats and
      social discomfort emanated by ride-sharing is mitigated. With vHike we want to
      show that a proper designed social dynamic ride-sharing system can be feasible
      and viable.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=INPROC-2011-06&amp;engl=1}
}

@article {ART-2011-19,
   author = {Bernhard Mitschang and Holger Schwarz},
   title = {{Der Lehrstuhl ”Datenbanken und Informationssysteme” an der Universit{\"a}t Stuttgart stellt sich vor}},
   journal = {Datenbank-Spektrum},
   publisher = {Springer},
   volume = {11},
   number = {3},
   pages = {213--217},
   type = {Article in Journal},
   month = {November},
   year = {2011},
   language = {German},
   cr-category = {H.2 Database Management},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {In diesem Beitrag stellen wir den Lehrstuhl f{\"u}r Datenbanken und
      Informationssysteme der Universit{\"a}t Stuttgart unter der Leitung von Prof. Dr.
      Bernhard Mitschang vor. Nach einem {\"U}berblick {\"u}ber die Forschungsschwerpunkte
      des Lehrstuhls gehen wir auf ausgew{\"a}hlte aktuelle Forschungsprojekte ein und
      erl{\"a}utern die Beteiligung an der Lehre in Bachelor- und Masterstudieng{\"a}ngen.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-19&amp;engl=1}
}

@article {ART-2011-14,
   author = {Peter Reimann and Holger Schwarz and Bernhard Mitschang},
   title = {{Design, Implementation, and Evaluation of a Tight Integration of Database and Workflow Engines}},
   journal = {Journal of Information and Data Management},
   editor = {Alberto H. F. Laender and Mirella M. Moro},
   publisher = {SBC - Brazilian Computer Society},
   volume = {2},
   number = {3},
   pages = {353--368},
   type = {Article in Journal},
   month = {October},
   year = {2011},
   issn = {2178-7107},
   keywords = {Data-Intensive Workflow; Improved Local Data Processing; Scientific Workflow; Simulation Workflow},
   language = {English},
   cr-category = {D.2.11 Software Engineering Software Architectures,
                   H.2.8 Database Applications,
                   H.4.1 Office Automation},
   contact = {Peter Reimann Peter.Reimann@ipvs.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Accessing and processing huge amounts of heterogeneous and distributed data are
      some of the major challenges of data-intensive workflows. Traditionally, the
      descriptions of such workflows focus on their data flow. Nevertheless,
      control-flow-oriented workflow languages are increasingly adapted to the needs
      of data-intensive workflows. This provides a common level of abstraction for
      both data-intensive workflows and classical orchestration workflows, e.g.,
      business workflows, which then enables a comprehensive optimization across all
      workflows. However, the problem still remains that workflows described in
      control-flow-oriented languages tend to be less efficient for data-intensive
      processes compared to specialized data-flow-oriented approaches. In this paper,
      we propose a new kind of optimization targeted at data-intensive workflows that
      are described in control-flow-oriented languages. We show how to improve
      efficiency of such workflows by introducing various techniques that partition
      the local data processing tasks to be performed during workflow execution in an
      improved way. These data processing tasks are either assigned to the workflow
      engine or to the tightly integrated local database engine. We evaluate the
      effectiveness of these techniques by means of various test scenarios.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-14&amp;engl=1}
}

@article {ART-2011-12,
   author = {Jorge Minguez and Stefan Silcher and Philipp Riffelmacher and Bernhard Mitschang},
   title = {{A Service Bus Architecture for Application Integration in the Planning and Production Phases of a Product Lifecycle}},
   journal = {International Journal of Systems and Service-Oriented Engineering},
   publisher = {IGI Global},
   volume = {2},
   number = {2},
   pages = {21--36},
   type = {Article in Journal},
   month = {June},
   year = {2011},
   issn = {1947-3052},
   keywords = {Manufacturing Service Bus; Service-oriented Architecture; Product Lifecycle Management; SOA; MSB; PLM},
   language = {English},
   cr-category = {D.2.11 Software Engineering Software Architectures,
                   D.2.13 Software Engineering Reusable Software},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Constantly changing business conditions require a high level of flexibility in
      business processes as well as an adaptive and fully interoperable IT
      infrastructure in today’s manufacturing environments. The lack of flexibility
      prevents manufacturing companies from improving their responsiveness and
      adapting their workflows to turbulent scenarios. In order to achieve highly
      flexible and adaptive workflows, information systems in digital factories and
      shop floors need to be integrated. The most challenging problem in such
      manufacturing environments is the high heterogeneity of the IT landscape, where
      the integration of legacy systems and information silos has led to chaotic
      architectures over the last two decades. In order to overcome this issue, the
      authors present a flexible integration platform that allows a loose coupling of
      distributed services in event-driven manufacturing environments. The proposed
      approach enables a flexible communication between digital factory and shop
      floor components by introducing a service bus architecture. This solution
      integrates an application-independent canonical message format for
      manufacturing events, content-based routing and transformation services as well
      as event processing workflows.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-12&amp;engl=1}
}

@article {ART-2011-07,
   author = {Holger Schwarz},
   title = {{Generierung des Datenzugriffs in Anwendungsprogrammen: Anwendungsbereiche und Implementierungstechniken}},
   journal = {Datenbank Spektrum},
   address = {Heidelberg},
   publisher = {Springer},
   volume = {11},
   number = {1},
   pages = {5--14},
   type = {Article in Journal},
   month = {April},
   year = {2011},
   language = {German},
   cr-category = {H.4 Information Systems Applications},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {Datenzugriffe auf externe und heterogene Datenbest{\"a}nde sind ein grundlegender
      Bestandteil von Anwendungsprogrammen in ganz unterschiedlichen
      Anwendungsbereichen. Vielfach k{\"o}nnen diese Datenzugriffe nicht {\"u}ber statisch
      eingebettete Anweisungen realisiert werden, sondern m{\"u}ssen dynamisch generiert
      werden. In diesem Beitrag wird das Spektrum relevanter Anwendungsbereiche
      vorgestellt. Ausgehend von einzelnen Systembeispielen werden wichtige Aspekte
      anfragegenerierender Systeme verallgemeinert. Hierzu wird eine
      Systemklassifikation vorgestellt und die Bedeutung der Klassifikation
      insbesondere f{\"u}r Optimierungsaspekte erl{\"a}utert. Ferner werden drei grundlegende
      Implementierungskonzepte f{\"u}r anfragegenerierende Systeme vorgestellt und deren
      Eignung f{\"u}r einzelne Anwendungsklassen diskutiert.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-07&amp;engl=1}
}

@article {ART-2011-04,
   author = {Jorge Minguez and Philipp Riffelmacher and Bernhard Mitschang and Engelbert Westk{\"a}mper},
   title = {{Servicebasierte Integration von Produktionsanwendungen}},
   journal = {Werkstattstechnik online},
   publisher = {Springer-VDI Verlag},
   volume = {3-2011},
   pages = {128--133},
   type = {Article in Journal},
   month = {March},
   year = {2011},
   keywords = {service-oriented architecture; SOA; ESB; manufacturing; produktion; lernfabrik; produktionsanwendungen; servicebasierte integration},
   language = {German},
   cr-category = {D.2.11 Software Engineering Software Architectures,
                   D.2.13 Software Engineering Reusable Software},
   ee = {http://www.technikwissen.de/wt/currentarticle.php?data[article_id]=59574},
   contact = {jorge.minguez@gsame.uni-stuttgart.de},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {In einem modernen Produktionsumfeld soll es m{\"o}glich sein,
      informationstechnische Prozesse an die sich zunehmend {\"a}ndernden
      Gesch{\"a}ftsbedingungen anzupassen. Um eine schnelle Anpassung zu realisieren, ist
      eine flexible Integration unterschiedlicher Informationssysteme erforderlich,
      da die Informationsfl{\"u}sse durch system{\"u}bergreifende Datenbearbeitungsprozesse
      gesteuert werden. Die heterogene Landschaft der digitalen Werkzeuge stellt
      dabei eine enorme Herausforderung dar. Der vorgestellte servicebasierte Ansatz
      adressiert diese Problematik.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-04&amp;engl=1}
}

@article {ART-2011-03,
   author = {Nazario Cipriani and Matthias Wieland and Matthias Grossmann and Daniela Nicklas},
   title = {{Tool support for the design and management of context models}},
   journal = {Information Systems},
   editor = {Gottfried Vossen and Tadeusz Morzy},
   address = {Oxford, UK, UK},
   publisher = {Elsevier Science Ltd.},
   volume = {36},
   number = {1},
   pages = {99--114},
   type = {Article in Journal},
   month = {March},
   year = {2011},
   isbn = {0306-4379},
   language = {English},
   cr-category = {H.2.4 Database Management Systems},
   ee = {http://www.sciencedirect.com/science?_ob=PublicationURL&_tockey=%23TOC%235646%232011%23999639998%232475749%23FLA%23&_cdi=5646&_pubType=J&_auth=y&_acct=C000022964&_version=1&_urlVersion=0&_userid=479010&md5=90fcaef40ac5285da3d69e894c214388,
      http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6V0G-50GMMMG-4-1K&_cdi=5646&_user=479010&_pii=S0306437910000669&_origin=browse&_zone=rslt_list_item&_coverDate=03%2F31%2F2011&_sk=999639998&wchp=dGLbVtb-zSkzk&md5=aac6f0561c2464d528bcce117970acff&ie=/sdarticle.pdf},
   department = {University of Stuttgart, Institute of Architecture of Application Systems;
                  University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems},
   abstract = {A central task in the development of context-aware applications is the modeling
      and management of complex context information. In this paper, we present the
      NexusEditor, which can ease this task by providing a graphical user interface
      to design schemas for spatial and technical context models, interactively
      create queries, send them to a server and visualize the results. One main
      contribution is to show how schema awareness can improve such a tool: The
      NexusEditor dynamically parses the underlying data model and provides
      additional syntactic checks, semantic checks, and short-cuts based on the
      schema information. Furthermore, the tool helps to design new schema
      definitions based on the existing ones, which is crucial for an iterative and
      user-centric development of context-aware applications. Finally, it provides
      interfaces to existing information spaces and visualization tools for spatial
      data like GoogleEarth.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=ART-2011-03&amp;engl=1}
}

@proceedings {PROC-2011-01,
   editor = {Theo H{\"a}rder and Wolfgang Lehner and Bernhard Mitschang and Harald Sch{\"o}ning and Holger Schwarz},
   title = {{Datenbanksysteme f{\"u}r Business, Technologie und Web (BTW 2011)}},
   publisher = {GI},
   institution = {University of Stuttgart, Faculty of Computer Science, Electrical Engineering, and Information Technology, Germany},
   pages = {760},
   type = {Proceedings},
   month = {February},
   year = {2011},
   isbn = {978-3-88579-274-1},
   language = {German},
   cr-category = {H.2 Database Management},
   department = {University of Stuttgart, Institute of Parallel and Distributed Systems, Applications of Parallel and Distributed Systems;
                  University of Stuttgart, Institute of Parallel and Distributed Systems},
   abstract = {The ”BTW” is a biennial conference series focusing on a broad range of topics
      addressing database management for Business, Technology, and Web. BTW 2011 as
      its 14th event took place in Kaiserslautern from March 2nd to 4th. This volume
      contains 24 long and 6 short papers selected for presentation at the
      conference, 9 industrial contributions, 3 papers or abstracts for the invited
      talks, 12 demonstration proposals, a panel description, and a paper written by
      the winner of the dissertation award. The subject areas include core database
      technology such as query optimization and indexing, DBMS-related prediction
      models, data streams, processing of large data sets, Web-based information
      extraction, benchmarking and simulation, and others.},
   url = {http://www2.informatik.uni-stuttgart.de/cgi-bin/NCSTRL/NCSTRL_view.pl?id=PROC-2011-01&amp;engl=1}
}

